id
stringlengths
28
33
content
stringlengths
14
265k
max_stars_repo_path
stringlengths
49
55
crossvul-python_data_bad_2105_1
404: Not Found
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2105_1
crossvul-python_data_bad_2940_0
# coding: utf-8 """ mistune ~~~~~~~ The fastest markdown parser in pure Python with renderer feature. :copyright: (c) 2014 - 2017 by Hsiaoming Yang. """ import re import inspect __version__ = '0.8' __author__ = 'Hsiaoming Yang <me@lepture.com>' __all__ = [ 'BlockGrammar', 'BlockLexer', 'InlineGrammar', 'InlineLexer', 'Renderer', 'Markdown', 'markdown', 'escape', ] _key_pattern = re.compile(r'\s+') _nonalpha_pattern = re.compile(r'\W') _escape_pattern = re.compile(r'&(?!#?\w+;)') _newline_pattern = re.compile(r'\r\n|\r') _block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M) _block_code_leading_pattern = re.compile(r'^ {4}', re.M) _inline_tags = [ 'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data', 'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark', 'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del', 'img', 'font', ] _pre_tags = ['pre', 'script', 'style'] _valid_end = r'(?!:/|[^\w\s@]*@)\b' _valid_attr = r'''\s*[a-zA-Z\-](?:\=(?:"[^"]*"|'[^']*'|[^\s'">]+))?''' _block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end) _scheme_blacklist = ('javascript:', 'vbscript:') def _pure_pattern(regex): pattern = regex.pattern if pattern.startswith('^'): pattern = pattern[1:] return pattern def _keyify(key): return _key_pattern.sub(' ', key.lower()) def escape(text, quote=False, smart_amp=True): """Replace special characters "&", "<" and ">" to HTML-safe sequences. The original cgi.escape will always escape "&", but you can control this one for a smart escape amp. :param quote: if set to True, " and ' will be escaped. :param smart_amp: if set to False, & will always be escaped. """ if smart_amp: text = _escape_pattern.sub('&amp;', text) else: text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') if quote: text = text.replace('"', '&quot;') text = text.replace("'", '&#39;') return text def escape_link(url): """Remove dangerous URL schemes like javascript: and escape afterwards.""" lower_url = url.lower().strip('\x00\x1a \n\r\t') for scheme in _scheme_blacklist: if re.sub(r'[^A-Za-z0-9\/:]+', '', lower_url).startswith(scheme): return '' return escape(url, quote=True, smart_amp=False) def preprocessing(text, tab=4): text = _newline_pattern.sub('\n', text) text = text.expandtabs(tab) text = text.replace('\u2424', '\n') pattern = re.compile(r'^ +$', re.M) return pattern.sub('', text) class BlockGrammar(object): """Grammars for block level tokens.""" def_links = re.compile( r'^ *\[([^^\]]+)\]: *' # [key]: r'<?([^\s>]+)>?' # <link> or link r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)' ) def_footnotes = re.compile( r'^\[\^([^\]]+)\]: *(' r'[^\n]*(?:\n+|$)' # [^key]: r'(?: {1,}[^\n]*(?:\n+|$))*' r')' ) newline = re.compile(r'^\n+') block_code = re.compile(r'^( {4}[^\n]+\n*)+') fences = re.compile( r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang r'([\s\S]+?)\s*' r'\1 *(?:\n+|$)' # ``` ) hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)') heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)') lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)') block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+') list_block = re.compile( r'^( *)([*+-]|\d+\.) [\s\S]+?' r'(?:' r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule r'|\n+(?=%s)' # def links r'|\n+(?=%s)' # def footnotes r'|\n{2,}' r'(?! )' r'(?!\1(?:[*+-]|\d+\.) )\n*' r'|' r'\s*$)' % ( _pure_pattern(def_links), _pure_pattern(def_footnotes), ) ) list_item = re.compile( r'^(( *)(?:[*+-]|\d+\.) [^\n]*' r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)', flags=re.M ) list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +') paragraph = re.compile( r'^((?:[^\n]+\n?(?!' r'%s|%s|%s|%s|%s|%s|%s|%s|%s' r'))+)\n*' % ( _pure_pattern(fences).replace(r'\1', r'\2'), _pure_pattern(list_block).replace(r'\1', r'\3'), _pure_pattern(hrule), _pure_pattern(heading), _pure_pattern(lheading), _pure_pattern(block_quote), _pure_pattern(def_links), _pure_pattern(def_footnotes), '<' + _block_tag, ) ) block_html = re.compile( r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % ( r'<!--[\s\S]*?-->', r'<(%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_block_tag, _valid_attr), r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr), ) ) table = re.compile( r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*' ) nptable = re.compile( r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*' ) text = re.compile(r'^[^\n]+') class BlockLexer(object): """Block level lexer for block grammars.""" grammar_class = BlockGrammar default_rules = [ 'newline', 'hrule', 'block_code', 'fences', 'heading', 'nptable', 'lheading', 'block_quote', 'list_block', 'block_html', 'def_links', 'def_footnotes', 'table', 'paragraph', 'text' ] list_rules = ( 'newline', 'block_code', 'fences', 'lheading', 'hrule', 'block_quote', 'list_block', 'block_html', 'text', ) footnote_rules = ( 'newline', 'block_code', 'fences', 'heading', 'nptable', 'lheading', 'hrule', 'block_quote', 'list_block', 'block_html', 'table', 'paragraph', 'text' ) def __init__(self, rules=None, **kwargs): self.tokens = [] self.def_links = {} self.def_footnotes = {} if not rules: rules = self.grammar_class() self.rules = rules def __call__(self, text, rules=None): return self.parse(text, rules) def parse(self, text, rules=None): text = text.rstrip('\n') if not rules: rules = self.default_rules def manipulate(text): for key in rules: rule = getattr(self.rules, key) m = rule.match(text) if not m: continue getattr(self, 'parse_%s' % key)(m) return m return False # pragma: no cover while text: m = manipulate(text) if m is not False: text = text[len(m.group(0)):] continue if text: # pragma: no cover raise RuntimeError('Infinite loop at: %s' % text) return self.tokens def parse_newline(self, m): length = len(m.group(0)) if length > 1: self.tokens.append({'type': 'newline'}) def parse_block_code(self, m): # clean leading whitespace code = _block_code_leading_pattern.sub('', m.group(0)) self.tokens.append({ 'type': 'code', 'lang': None, 'text': code, }) def parse_fences(self, m): self.tokens.append({ 'type': 'code', 'lang': m.group(2), 'text': m.group(3), }) def parse_heading(self, m): self.tokens.append({ 'type': 'heading', 'level': len(m.group(1)), 'text': m.group(2), }) def parse_lheading(self, m): """Parse setext heading.""" self.tokens.append({ 'type': 'heading', 'level': 1 if m.group(2) == '=' else 2, 'text': m.group(1), }) def parse_hrule(self, m): self.tokens.append({'type': 'hrule'}) def parse_list_block(self, m): bull = m.group(2) self.tokens.append({ 'type': 'list_start', 'ordered': '.' in bull, }) cap = m.group(0) self._process_list_item(cap, bull) self.tokens.append({'type': 'list_end'}) def _process_list_item(self, cap, bull): cap = self.rules.list_item.findall(cap) _next = False length = len(cap) for i in range(length): item = cap[i][0] # remove the bullet space = len(item) item = self.rules.list_bullet.sub('', item) # outdent if '\n ' in item: space = space - len(item) pattern = re.compile(r'^ {1,%d}' % space, flags=re.M) item = pattern.sub('', item) # determine whether item is loose or not loose = _next if not loose and re.search(r'\n\n(?!\s*$)', item): loose = True rest = len(item) if i != length - 1 and rest: _next = item[rest-1] == '\n' if not loose: loose = _next if loose: t = 'loose_item_start' else: t = 'list_item_start' self.tokens.append({'type': t}) # recurse self.parse(item, self.list_rules) self.tokens.append({'type': 'list_item_end'}) def parse_block_quote(self, m): self.tokens.append({'type': 'block_quote_start'}) # clean leading > cap = _block_quote_leading_pattern.sub('', m.group(0)) self.parse(cap) self.tokens.append({'type': 'block_quote_end'}) def parse_def_links(self, m): key = _keyify(m.group(1)) self.def_links[key] = { 'link': m.group(2), 'title': m.group(3), } def parse_def_footnotes(self, m): key = _keyify(m.group(1)) if key in self.def_footnotes: # footnote is already defined return self.def_footnotes[key] = 0 self.tokens.append({ 'type': 'footnote_start', 'key': key, }) text = m.group(2) if '\n' in text: lines = text.split('\n') whitespace = None for line in lines[1:]: space = len(line) - len(line.lstrip()) if space and (not whitespace or space < whitespace): whitespace = space newlines = [lines[0]] for line in lines[1:]: newlines.append(line[whitespace:]) text = '\n'.join(newlines) self.parse(text, self.footnote_rules) self.tokens.append({ 'type': 'footnote_end', 'key': key, }) def parse_table(self, m): item = self._process_table(m) cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3)) cells = cells.split('\n') for i, v in enumerate(cells): v = re.sub(r'^ *\| *| *\| *$', '', v) cells[i] = re.split(r' *\| *', v) item['cells'] = cells self.tokens.append(item) def parse_nptable(self, m): item = self._process_table(m) cells = re.sub(r'\n$', '', m.group(3)) cells = cells.split('\n') for i, v in enumerate(cells): cells[i] = re.split(r' *\| *', v) item['cells'] = cells self.tokens.append(item) def _process_table(self, m): header = re.sub(r'^ *| *\| *$', '', m.group(1)) header = re.split(r' *\| *', header) align = re.sub(r' *|\| *$', '', m.group(2)) align = re.split(r' *\| *', align) for i, v in enumerate(align): if re.search(r'^ *-+: *$', v): align[i] = 'right' elif re.search(r'^ *:-+: *$', v): align[i] = 'center' elif re.search(r'^ *:-+ *$', v): align[i] = 'left' else: align[i] = None item = { 'type': 'table', 'header': header, 'align': align, } return item def parse_block_html(self, m): tag = m.group(1) if not tag: text = m.group(0) self.tokens.append({ 'type': 'close_html', 'text': text }) else: attr = m.group(2) text = m.group(3) self.tokens.append({ 'type': 'open_html', 'tag': tag, 'extra': attr, 'text': text }) def parse_paragraph(self, m): text = m.group(1).rstrip('\n') self.tokens.append({'type': 'paragraph', 'text': text}) def parse_text(self, m): text = m.group(0) self.tokens.append({'type': 'text', 'text': text}) class InlineGrammar(object): """Grammars for inline level tokens.""" escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])') # \* \+ \! .... inline_html = re.compile( r'^(?:%s|%s|%s)' % ( r'<!--[\s\S]*?-->', r'<(\w+%s)((?:%s)*?)\s*>([\s\S]*?)<\/\1>' % (_valid_end, _valid_attr), r'<\w+%s(?:%s)*?\s*\/?>' % (_valid_end, _valid_attr), ) ) autolink = re.compile(r'^<([^ >]+(@|:)[^ >]+)>') link = re.compile( r'^!?\[(' r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*' r')\]\(' r'''\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*''' r'\)' ) reflink = re.compile( r'^!?\[(' r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*' r')\]\s*\[([^^\]]*)\]' ) nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]') url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''') double_emphasis = re.compile( r'^_{2}([\s\S]+?)_{2}(?!_)' # __word__ r'|' r'^\*{2}([\s\S]+?)\*{2}(?!\*)' # **word** ) emphasis = re.compile( r'^\b_((?:__|[^_])+?)_\b' # _word_ r'|' r'^\*((?:\*\*|[^\*])+?)\*(?!\*)' # *word* ) code = re.compile(r'^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)') # `code` linebreak = re.compile(r'^ {2,}\n(?!\s*$)') strikethrough = re.compile(r'^~~(?=\S)([\s\S]*?\S)~~') # ~~word~~ footnote = re.compile(r'^\[\^([^\]]+)\]') text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| {2,}\n|$)') def hard_wrap(self): """Grammar for hard wrap linebreak. You don't need to add two spaces at the end of a line. """ self.linebreak = re.compile(r'^ *\n(?!\s*$)') self.text = re.compile( r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)' ) class InlineLexer(object): """Inline level lexer for inline grammars.""" grammar_class = InlineGrammar default_rules = [ 'escape', 'inline_html', 'autolink', 'url', 'footnote', 'link', 'reflink', 'nolink', 'double_emphasis', 'emphasis', 'code', 'linebreak', 'strikethrough', 'text', ] inline_html_rules = [ 'escape', 'autolink', 'url', 'link', 'reflink', 'nolink', 'double_emphasis', 'emphasis', 'code', 'linebreak', 'strikethrough', 'text', ] def __init__(self, renderer, rules=None, **kwargs): self.renderer = renderer self.links = {} self.footnotes = {} self.footnote_index = 0 if not rules: rules = self.grammar_class() kwargs.update(self.renderer.options) if kwargs.get('hard_wrap'): rules.hard_wrap() self.rules = rules self._in_link = False self._in_footnote = False self._parse_inline_html = kwargs.get('parse_inline_html') def __call__(self, text, rules=None): return self.output(text, rules) def setup(self, links, footnotes): self.footnote_index = 0 self.links = links or {} self.footnotes = footnotes or {} def output(self, text, rules=None): text = text.rstrip('\n') if not rules: rules = list(self.default_rules) if self._in_footnote and 'footnote' in rules: rules.remove('footnote') output = self.renderer.placeholder() def manipulate(text): for key in rules: pattern = getattr(self.rules, key) m = pattern.match(text) if not m: continue self.line_match = m out = getattr(self, 'output_%s' % key)(m) if out is not None: return m, out return False # pragma: no cover while text: ret = manipulate(text) if ret is not False: m, out = ret output += out text = text[len(m.group(0)):] continue if text: # pragma: no cover raise RuntimeError('Infinite loop at: %s' % text) return output def output_escape(self, m): text = m.group(1) return self.renderer.escape(text) def output_autolink(self, m): link = m.group(1) if m.group(2) == '@': is_email = True else: is_email = False return self.renderer.autolink(link, is_email) def output_url(self, m): link = m.group(1) if self._in_link: return self.renderer.text(link) return self.renderer.autolink(link, False) def output_inline_html(self, m): tag = m.group(1) if self._parse_inline_html and tag in _inline_tags: text = m.group(3) if tag == 'a': self._in_link = True text = self.output(text, rules=self.inline_html_rules) self._in_link = False else: text = self.output(text, rules=self.inline_html_rules) extra = m.group(2) or '' html = '<%s%s>%s</%s>' % (tag, extra, text, tag) else: html = m.group(0) return self.renderer.inline_html(html) def output_footnote(self, m): key = _keyify(m.group(1)) if key not in self.footnotes: return None if self.footnotes[key]: return None self.footnote_index += 1 self.footnotes[key] = self.footnote_index return self.renderer.footnote_ref(key, self.footnote_index) def output_link(self, m): return self._process_link(m, m.group(3), m.group(4)) def output_reflink(self, m): key = _keyify(m.group(2) or m.group(1)) if key not in self.links: return None ret = self.links[key] return self._process_link(m, ret['link'], ret['title']) def output_nolink(self, m): key = _keyify(m.group(1)) if key not in self.links: return None ret = self.links[key] return self._process_link(m, ret['link'], ret['title']) def _process_link(self, m, link, title=None): line = m.group(0) text = m.group(1) if line[0] == '!': return self.renderer.image(link, title, text) self._in_link = True text = self.output(text) self._in_link = False return self.renderer.link(link, title, text) def output_double_emphasis(self, m): text = m.group(2) or m.group(1) text = self.output(text) return self.renderer.double_emphasis(text) def output_emphasis(self, m): text = m.group(2) or m.group(1) text = self.output(text) return self.renderer.emphasis(text) def output_code(self, m): text = m.group(2) return self.renderer.codespan(text) def output_linebreak(self, m): return self.renderer.linebreak() def output_strikethrough(self, m): text = self.output(m.group(1)) return self.renderer.strikethrough(text) def output_text(self, m): text = m.group(0) return self.renderer.text(text) class Renderer(object): """The default HTML renderer for rendering Markdown. """ def __init__(self, **kwargs): self.options = kwargs def placeholder(self): """Returns the default, empty output value for the renderer. All renderer methods use the '+=' operator to append to this value. Default is a string so rendering HTML can build up a result string with the rendered Markdown. Can be overridden by Renderer subclasses to be types like an empty list, allowing the renderer to create a tree-like structure to represent the document (which can then be reprocessed later into a separate format like docx or pdf). """ return '' def block_code(self, code, lang=None): """Rendering block level code. ``pre > code``. :param code: text content of the code block. :param lang: language of the given code. """ code = code.rstrip('\n') if not lang: code = escape(code, smart_amp=False) return '<pre><code>%s\n</code></pre>\n' % code code = escape(code, quote=True, smart_amp=False) return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code) def block_quote(self, text): """Rendering <blockquote> with the given text. :param text: text content of the blockquote. """ return '<blockquote>%s\n</blockquote>\n' % text.rstrip('\n') def block_html(self, html): """Rendering block level pure html content. :param html: text content of the html snippet. """ if self.options.get('skip_style') and \ html.lower().startswith('<style'): return '' if self.options.get('escape'): return escape(html) return html def header(self, text, level, raw=None): """Rendering header/heading tags like ``<h1>`` ``<h2>``. :param text: rendered text content for the header. :param level: a number for the header level, for example: 1. :param raw: raw text content of the header. """ return '<h%d>%s</h%d>\n' % (level, text, level) def hrule(self): """Rendering method for ``<hr>`` tag.""" if self.options.get('use_xhtml'): return '<hr />\n' return '<hr>\n' def list(self, body, ordered=True): """Rendering list tags like ``<ul>`` and ``<ol>``. :param body: body contents of the list. :param ordered: whether this list is ordered or not. """ tag = 'ul' if ordered: tag = 'ol' return '<%s>\n%s</%s>\n' % (tag, body, tag) def list_item(self, text): """Rendering list item snippet. Like ``<li>``.""" return '<li>%s</li>\n' % text def paragraph(self, text): """Rendering paragraph tags. Like ``<p>``.""" return '<p>%s</p>\n' % text.strip(' ') def table(self, header, body): """Rendering table element. Wrap header and body in it. :param header: header part of the table. :param body: body part of the table. """ return ( '<table>\n<thead>%s</thead>\n' '<tbody>\n%s</tbody>\n</table>\n' ) % (header, body) def table_row(self, content): """Rendering a table row. Like ``<tr>``. :param content: content of current table row. """ return '<tr>\n%s</tr>\n' % content def table_cell(self, content, **flags): """Rendering a table cell. Like ``<th>`` ``<td>``. :param content: content of current table cell. :param header: whether this is header or not. :param align: align of current table cell. """ if flags['header']: tag = 'th' else: tag = 'td' align = flags['align'] if not align: return '<%s>%s</%s>\n' % (tag, content, tag) return '<%s style="text-align:%s">%s</%s>\n' % ( tag, align, content, tag ) def double_emphasis(self, text): """Rendering **strong** text. :param text: text content for emphasis. """ return '<strong>%s</strong>' % text def emphasis(self, text): """Rendering *emphasis* text. :param text: text content for emphasis. """ return '<em>%s</em>' % text def codespan(self, text): """Rendering inline `code` text. :param text: text content for inline code. """ text = escape(text.rstrip(), smart_amp=False) return '<code>%s</code>' % text def linebreak(self): """Rendering line break like ``<br>``.""" if self.options.get('use_xhtml'): return '<br />\n' return '<br>\n' def strikethrough(self, text): """Rendering ~~strikethrough~~ text. :param text: text content for strikethrough. """ return '<del>%s</del>' % text def text(self, text): """Rendering unformatted text. :param text: text content. """ if self.options.get('parse_block_html'): return text return escape(text) def escape(self, text): """Rendering escape sequence. :param text: text content. """ return escape(text) def autolink(self, link, is_email=False): """Rendering a given link or email address. :param link: link content or email address. :param is_email: whether this is an email or not. """ text = link = escape_link(link) if is_email: link = 'mailto:%s' % link return '<a href="%s">%s</a>' % (link, text) def link(self, link, title, text): """Rendering a given link with content and title. :param link: href link for ``<a>`` tag. :param title: title content for `title` attribute. :param text: text content for description. """ link = escape_link(link) if not title: return '<a href="%s">%s</a>' % (link, text) title = escape(title, quote=True) return '<a href="%s" title="%s">%s</a>' % (link, title, text) def image(self, src, title, text): """Rendering a image with title and text. :param src: source link of the image. :param title: title text of the image. :param text: alt text of the image. """ src = escape_link(src) text = escape(text, quote=True) if title: title = escape(title, quote=True) html = '<img src="%s" alt="%s" title="%s"' % (src, text, title) else: html = '<img src="%s" alt="%s"' % (src, text) if self.options.get('use_xhtml'): return '%s />' % html return '%s>' % html def inline_html(self, html): """Rendering span level pure html content. :param html: text content of the html snippet. """ if self.options.get('escape'): return escape(html) return html def newline(self): """Rendering newline element.""" return '' def footnote_ref(self, key, index): """Rendering the ref anchor of a footnote. :param key: identity key for the footnote. :param index: the index count of current footnote. """ html = ( '<sup class="footnote-ref" id="fnref-%s">' '<a href="#fn-%s">%d</a></sup>' ) % (escape(key), escape(key), index) return html def footnote_item(self, key, text): """Rendering a footnote item. :param key: identity key for the footnote. :param text: text content of the footnote. """ back = ( '<a href="#fnref-%s" class="footnote">&#8617;</a>' ) % escape(key) text = text.rstrip() if text.endswith('</p>'): text = re.sub(r'<\/p>$', r'%s</p>' % back, text) else: text = '%s<p>%s</p>' % (text, back) html = '<li id="fn-%s">%s</li>\n' % (escape(key), text) return html def footnotes(self, text): """Wrapper for all footnotes. :param text: contents of all footnotes. """ html = '<div class="footnotes">\n%s<ol>%s</ol>\n</div>\n' return html % (self.hrule(), text) class Markdown(object): """The Markdown parser. :param renderer: An instance of ``Renderer``. :param inline: An inline lexer class or instance. :param block: A block lexer class or instance. """ def __init__(self, renderer=None, inline=None, block=None, **kwargs): if not renderer: renderer = Renderer(**kwargs) else: kwargs.update(renderer.options) self.renderer = renderer if inline and inspect.isclass(inline): inline = inline(renderer, **kwargs) if block and inspect.isclass(block): block = block(**kwargs) if inline: self.inline = inline else: self.inline = InlineLexer(renderer, **kwargs) self.block = block or BlockLexer(BlockGrammar()) self.footnotes = [] self.tokens = [] # detect if it should parse text in block html self._parse_block_html = kwargs.get('parse_block_html') def __call__(self, text): return self.parse(text) def render(self, text): """Render the Markdown text. :param text: markdown formatted text content. """ return self.parse(text) def parse(self, text): out = self.output(preprocessing(text)) keys = self.block.def_footnotes # reset block self.block.def_links = {} self.block.def_footnotes = {} # reset inline self.inline.links = {} self.inline.footnotes = {} if not self.footnotes: return out footnotes = filter(lambda o: keys.get(o['key']), self.footnotes) self.footnotes = sorted( footnotes, key=lambda o: keys.get(o['key']), reverse=True ) body = self.renderer.placeholder() while self.footnotes: note = self.footnotes.pop() body += self.renderer.footnote_item( note['key'], note['text'] ) out += self.renderer.footnotes(body) return out def pop(self): if not self.tokens: return None self.token = self.tokens.pop() return self.token def peek(self): if self.tokens: return self.tokens[-1] return None # pragma: no cover def output(self, text, rules=None): self.tokens = self.block(text, rules) self.tokens.reverse() self.inline.setup(self.block.def_links, self.block.def_footnotes) out = self.renderer.placeholder() while self.pop(): out += self.tok() return out def tok(self): t = self.token['type'] # sepcial cases if t.endswith('_start'): t = t[:-6] return getattr(self, 'output_%s' % t)() def tok_text(self): text = self.token['text'] while self.peek()['type'] == 'text': text += '\n' + self.pop()['text'] return self.inline(text) def output_newline(self): return self.renderer.newline() def output_hrule(self): return self.renderer.hrule() def output_heading(self): return self.renderer.header( self.inline(self.token['text']), self.token['level'], self.token['text'], ) def output_code(self): return self.renderer.block_code( self.token['text'], self.token['lang'] ) def output_table(self): aligns = self.token['align'] aligns_length = len(aligns) cell = self.renderer.placeholder() # header part header = self.renderer.placeholder() for i, value in enumerate(self.token['header']): align = aligns[i] if i < aligns_length else None flags = {'header': True, 'align': align} cell += self.renderer.table_cell(self.inline(value), **flags) header += self.renderer.table_row(cell) # body part body = self.renderer.placeholder() for i, row in enumerate(self.token['cells']): cell = self.renderer.placeholder() for j, value in enumerate(row): align = aligns[j] if j < aligns_length else None flags = {'header': False, 'align': align} cell += self.renderer.table_cell(self.inline(value), **flags) body += self.renderer.table_row(cell) return self.renderer.table(header, body) def output_block_quote(self): body = self.renderer.placeholder() while self.pop()['type'] != 'block_quote_end': body += self.tok() return self.renderer.block_quote(body) def output_list(self): ordered = self.token['ordered'] body = self.renderer.placeholder() while self.pop()['type'] != 'list_end': body += self.tok() return self.renderer.list(body, ordered) def output_list_item(self): body = self.renderer.placeholder() while self.pop()['type'] != 'list_item_end': if self.token['type'] == 'text': body += self.tok_text() else: body += self.tok() return self.renderer.list_item(body) def output_loose_item(self): body = self.renderer.placeholder() while self.pop()['type'] != 'list_item_end': body += self.tok() return self.renderer.list_item(body) def output_footnote(self): self.inline._in_footnote = True body = self.renderer.placeholder() key = self.token['key'] while self.pop()['type'] != 'footnote_end': body += self.tok() self.footnotes.append({'key': key, 'text': body}) self.inline._in_footnote = False return self.renderer.placeholder() def output_close_html(self): text = self.token['text'] return self.renderer.block_html(text) def output_open_html(self): text = self.token['text'] tag = self.token['tag'] if self._parse_block_html and tag not in _pre_tags: text = self.inline(text, rules=self.inline.inline_html_rules) extra = self.token.get('extra') or '' html = '<%s%s>%s</%s>' % (tag, extra, text, tag) return self.renderer.block_html(html) def output_paragraph(self): return self.renderer.paragraph(self.inline(self.token['text'])) def output_text(self): return self.renderer.paragraph(self.tok_text()) def markdown(text, escape=True, **kwargs): """Render markdown formatted text to html. :param text: markdown formatted text content. :param escape: if set to False, all html tags will not be escaped. :param use_xhtml: output with xhtml tags. :param hard_wrap: if set to True, it will use the GFM line breaks feature. :param parse_block_html: parse text only in block level html. :param parse_inline_html: parse text only in inline level html. """ return Markdown(escape=escape, **kwargs)(text)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2940_0
crossvul-python_data_good_3890_2
# Generated by Django 3.0.4 on 2020-04-06 09:46 from django.db import migrations import wagtail.core.blocks import wagtail.core.fields import wagtail.tests.testapp.models class Migration(migrations.Migration): dependencies = [ ('tests', '0046_personpage'), ] operations = [ migrations.AlterField( model_name='streampage', name='body', field=wagtail.core.fields.StreamField([('text', wagtail.core.blocks.CharBlock()), ('rich_text', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.tests.testapp.models.ExtendedImageChooserBlock()), ('product', wagtail.core.blocks.StructBlock([('name', wagtail.core.blocks.CharBlock()), ('price', wagtail.core.blocks.CharBlock())])), ('raw_html', wagtail.core.blocks.RawHTMLBlock())]), ), ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_3890_2
crossvul-python_data_good_2940_0
# coding: utf-8 """ mistune ~~~~~~~ The fastest markdown parser in pure Python with renderer feature. :copyright: (c) 2014 - 2017 by Hsiaoming Yang. """ import re import inspect __version__ = '0.8.1' __author__ = 'Hsiaoming Yang <me@lepture.com>' __all__ = [ 'BlockGrammar', 'BlockLexer', 'InlineGrammar', 'InlineLexer', 'Renderer', 'Markdown', 'markdown', 'escape', ] _key_pattern = re.compile(r'\s+') _nonalpha_pattern = re.compile(r'\W') _escape_pattern = re.compile(r'&(?!#?\w+;)') _newline_pattern = re.compile(r'\r\n|\r') _block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M) _block_code_leading_pattern = re.compile(r'^ {4}', re.M) _inline_tags = [ 'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data', 'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark', 'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del', 'img', 'font', ] _pre_tags = ['pre', 'script', 'style'] _valid_end = r'(?!:/|[^\w\s@]*@)\b' _valid_attr = r'''\s*[a-zA-Z\-](?:\=(?:"[^"]*"|'[^']*'|[^\s'">]+))?''' _block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end) _scheme_blacklist = ('javascript:', 'vbscript:') def _pure_pattern(regex): pattern = regex.pattern if pattern.startswith('^'): pattern = pattern[1:] return pattern def _keyify(key): key = escape(key.lower(), quote=True) return _key_pattern.sub(' ', key) def escape(text, quote=False, smart_amp=True): """Replace special characters "&", "<" and ">" to HTML-safe sequences. The original cgi.escape will always escape "&", but you can control this one for a smart escape amp. :param quote: if set to True, " and ' will be escaped. :param smart_amp: if set to False, & will always be escaped. """ if smart_amp: text = _escape_pattern.sub('&amp;', text) else: text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') if quote: text = text.replace('"', '&quot;') text = text.replace("'", '&#39;') return text def escape_link(url): """Remove dangerous URL schemes like javascript: and escape afterwards.""" lower_url = url.lower().strip('\x00\x1a \n\r\t') for scheme in _scheme_blacklist: if re.sub(r'[^A-Za-z0-9\/:]+', '', lower_url).startswith(scheme): return '' return escape(url, quote=True, smart_amp=False) def preprocessing(text, tab=4): text = _newline_pattern.sub('\n', text) text = text.expandtabs(tab) text = text.replace('\u2424', '\n') pattern = re.compile(r'^ +$', re.M) return pattern.sub('', text) class BlockGrammar(object): """Grammars for block level tokens.""" def_links = re.compile( r'^ *\[([^^\]]+)\]: *' # [key]: r'<?([^\s>]+)>?' # <link> or link r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)' ) def_footnotes = re.compile( r'^\[\^([^\]]+)\]: *(' r'[^\n]*(?:\n+|$)' # [^key]: r'(?: {1,}[^\n]*(?:\n+|$))*' r')' ) newline = re.compile(r'^\n+') block_code = re.compile(r'^( {4}[^\n]+\n*)+') fences = re.compile( r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang r'([\s\S]+?)\s*' r'\1 *(?:\n+|$)' # ``` ) hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)') heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)') lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)') block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+') list_block = re.compile( r'^( *)([*+-]|\d+\.) [\s\S]+?' r'(?:' r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule r'|\n+(?=%s)' # def links r'|\n+(?=%s)' # def footnotes r'|\n{2,}' r'(?! )' r'(?!\1(?:[*+-]|\d+\.) )\n*' r'|' r'\s*$)' % ( _pure_pattern(def_links), _pure_pattern(def_footnotes), ) ) list_item = re.compile( r'^(( *)(?:[*+-]|\d+\.) [^\n]*' r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)', flags=re.M ) list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +') paragraph = re.compile( r'^((?:[^\n]+\n?(?!' r'%s|%s|%s|%s|%s|%s|%s|%s|%s' r'))+)\n*' % ( _pure_pattern(fences).replace(r'\1', r'\2'), _pure_pattern(list_block).replace(r'\1', r'\3'), _pure_pattern(hrule), _pure_pattern(heading), _pure_pattern(lheading), _pure_pattern(block_quote), _pure_pattern(def_links), _pure_pattern(def_footnotes), '<' + _block_tag, ) ) block_html = re.compile( r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % ( r'<!--[\s\S]*?-->', r'<(%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_block_tag, _valid_attr), r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr), ) ) table = re.compile( r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*' ) nptable = re.compile( r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*' ) text = re.compile(r'^[^\n]+') class BlockLexer(object): """Block level lexer for block grammars.""" grammar_class = BlockGrammar default_rules = [ 'newline', 'hrule', 'block_code', 'fences', 'heading', 'nptable', 'lheading', 'block_quote', 'list_block', 'block_html', 'def_links', 'def_footnotes', 'table', 'paragraph', 'text' ] list_rules = ( 'newline', 'block_code', 'fences', 'lheading', 'hrule', 'block_quote', 'list_block', 'block_html', 'text', ) footnote_rules = ( 'newline', 'block_code', 'fences', 'heading', 'nptable', 'lheading', 'hrule', 'block_quote', 'list_block', 'block_html', 'table', 'paragraph', 'text' ) def __init__(self, rules=None, **kwargs): self.tokens = [] self.def_links = {} self.def_footnotes = {} if not rules: rules = self.grammar_class() self.rules = rules def __call__(self, text, rules=None): return self.parse(text, rules) def parse(self, text, rules=None): text = text.rstrip('\n') if not rules: rules = self.default_rules def manipulate(text): for key in rules: rule = getattr(self.rules, key) m = rule.match(text) if not m: continue getattr(self, 'parse_%s' % key)(m) return m return False # pragma: no cover while text: m = manipulate(text) if m is not False: text = text[len(m.group(0)):] continue if text: # pragma: no cover raise RuntimeError('Infinite loop at: %s' % text) return self.tokens def parse_newline(self, m): length = len(m.group(0)) if length > 1: self.tokens.append({'type': 'newline'}) def parse_block_code(self, m): # clean leading whitespace code = _block_code_leading_pattern.sub('', m.group(0)) self.tokens.append({ 'type': 'code', 'lang': None, 'text': code, }) def parse_fences(self, m): self.tokens.append({ 'type': 'code', 'lang': m.group(2), 'text': m.group(3), }) def parse_heading(self, m): self.tokens.append({ 'type': 'heading', 'level': len(m.group(1)), 'text': m.group(2), }) def parse_lheading(self, m): """Parse setext heading.""" self.tokens.append({ 'type': 'heading', 'level': 1 if m.group(2) == '=' else 2, 'text': m.group(1), }) def parse_hrule(self, m): self.tokens.append({'type': 'hrule'}) def parse_list_block(self, m): bull = m.group(2) self.tokens.append({ 'type': 'list_start', 'ordered': '.' in bull, }) cap = m.group(0) self._process_list_item(cap, bull) self.tokens.append({'type': 'list_end'}) def _process_list_item(self, cap, bull): cap = self.rules.list_item.findall(cap) _next = False length = len(cap) for i in range(length): item = cap[i][0] # remove the bullet space = len(item) item = self.rules.list_bullet.sub('', item) # outdent if '\n ' in item: space = space - len(item) pattern = re.compile(r'^ {1,%d}' % space, flags=re.M) item = pattern.sub('', item) # determine whether item is loose or not loose = _next if not loose and re.search(r'\n\n(?!\s*$)', item): loose = True rest = len(item) if i != length - 1 and rest: _next = item[rest-1] == '\n' if not loose: loose = _next if loose: t = 'loose_item_start' else: t = 'list_item_start' self.tokens.append({'type': t}) # recurse self.parse(item, self.list_rules) self.tokens.append({'type': 'list_item_end'}) def parse_block_quote(self, m): self.tokens.append({'type': 'block_quote_start'}) # clean leading > cap = _block_quote_leading_pattern.sub('', m.group(0)) self.parse(cap) self.tokens.append({'type': 'block_quote_end'}) def parse_def_links(self, m): key = _keyify(m.group(1)) self.def_links[key] = { 'link': m.group(2), 'title': m.group(3), } def parse_def_footnotes(self, m): key = _keyify(m.group(1)) if key in self.def_footnotes: # footnote is already defined return self.def_footnotes[key] = 0 self.tokens.append({ 'type': 'footnote_start', 'key': key, }) text = m.group(2) if '\n' in text: lines = text.split('\n') whitespace = None for line in lines[1:]: space = len(line) - len(line.lstrip()) if space and (not whitespace or space < whitespace): whitespace = space newlines = [lines[0]] for line in lines[1:]: newlines.append(line[whitespace:]) text = '\n'.join(newlines) self.parse(text, self.footnote_rules) self.tokens.append({ 'type': 'footnote_end', 'key': key, }) def parse_table(self, m): item = self._process_table(m) cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3)) cells = cells.split('\n') for i, v in enumerate(cells): v = re.sub(r'^ *\| *| *\| *$', '', v) cells[i] = re.split(r' *\| *', v) item['cells'] = cells self.tokens.append(item) def parse_nptable(self, m): item = self._process_table(m) cells = re.sub(r'\n$', '', m.group(3)) cells = cells.split('\n') for i, v in enumerate(cells): cells[i] = re.split(r' *\| *', v) item['cells'] = cells self.tokens.append(item) def _process_table(self, m): header = re.sub(r'^ *| *\| *$', '', m.group(1)) header = re.split(r' *\| *', header) align = re.sub(r' *|\| *$', '', m.group(2)) align = re.split(r' *\| *', align) for i, v in enumerate(align): if re.search(r'^ *-+: *$', v): align[i] = 'right' elif re.search(r'^ *:-+: *$', v): align[i] = 'center' elif re.search(r'^ *:-+ *$', v): align[i] = 'left' else: align[i] = None item = { 'type': 'table', 'header': header, 'align': align, } return item def parse_block_html(self, m): tag = m.group(1) if not tag: text = m.group(0) self.tokens.append({ 'type': 'close_html', 'text': text }) else: attr = m.group(2) text = m.group(3) self.tokens.append({ 'type': 'open_html', 'tag': tag, 'extra': attr, 'text': text }) def parse_paragraph(self, m): text = m.group(1).rstrip('\n') self.tokens.append({'type': 'paragraph', 'text': text}) def parse_text(self, m): text = m.group(0) self.tokens.append({'type': 'text', 'text': text}) class InlineGrammar(object): """Grammars for inline level tokens.""" escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])') # \* \+ \! .... inline_html = re.compile( r'^(?:%s|%s|%s)' % ( r'<!--[\s\S]*?-->', r'<(\w+%s)((?:%s)*?)\s*>([\s\S]*?)<\/\1>' % ( _valid_end, _valid_attr), r'<\w+%s(?:%s)*?\s*\/?>' % (_valid_end, _valid_attr), ) ) autolink = re.compile(r'^<([^ >]+(@|:)[^ >]+)>') link = re.compile( r'^!?\[(' r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*' r')\]\(' r'''\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*''' r'\)' ) reflink = re.compile( r'^!?\[(' r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*' r')\]\s*\[([^^\]]*)\]' ) nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]') url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''') double_emphasis = re.compile( r'^_{2}([\s\S]+?)_{2}(?!_)' # __word__ r'|' r'^\*{2}([\s\S]+?)\*{2}(?!\*)' # **word** ) emphasis = re.compile( r'^\b_((?:__|[^_])+?)_\b' # _word_ r'|' r'^\*((?:\*\*|[^\*])+?)\*(?!\*)' # *word* ) code = re.compile(r'^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)') # `code` linebreak = re.compile(r'^ {2,}\n(?!\s*$)') strikethrough = re.compile(r'^~~(?=\S)([\s\S]*?\S)~~') # ~~word~~ footnote = re.compile(r'^\[\^([^\]]+)\]') text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| {2,}\n|$)') def hard_wrap(self): """Grammar for hard wrap linebreak. You don't need to add two spaces at the end of a line. """ self.linebreak = re.compile(r'^ *\n(?!\s*$)') self.text = re.compile( r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)' ) class InlineLexer(object): """Inline level lexer for inline grammars.""" grammar_class = InlineGrammar default_rules = [ 'escape', 'inline_html', 'autolink', 'url', 'footnote', 'link', 'reflink', 'nolink', 'double_emphasis', 'emphasis', 'code', 'linebreak', 'strikethrough', 'text', ] inline_html_rules = [ 'escape', 'autolink', 'url', 'link', 'reflink', 'nolink', 'double_emphasis', 'emphasis', 'code', 'linebreak', 'strikethrough', 'text', ] def __init__(self, renderer, rules=None, **kwargs): self.renderer = renderer self.links = {} self.footnotes = {} self.footnote_index = 0 if not rules: rules = self.grammar_class() kwargs.update(self.renderer.options) if kwargs.get('hard_wrap'): rules.hard_wrap() self.rules = rules self._in_link = False self._in_footnote = False self._parse_inline_html = kwargs.get('parse_inline_html') def __call__(self, text, rules=None): return self.output(text, rules) def setup(self, links, footnotes): self.footnote_index = 0 self.links = links or {} self.footnotes = footnotes or {} def output(self, text, rules=None): text = text.rstrip('\n') if not rules: rules = list(self.default_rules) if self._in_footnote and 'footnote' in rules: rules.remove('footnote') output = self.renderer.placeholder() def manipulate(text): for key in rules: pattern = getattr(self.rules, key) m = pattern.match(text) if not m: continue self.line_match = m out = getattr(self, 'output_%s' % key)(m) if out is not None: return m, out return False # pragma: no cover while text: ret = manipulate(text) if ret is not False: m, out = ret output += out text = text[len(m.group(0)):] continue if text: # pragma: no cover raise RuntimeError('Infinite loop at: %s' % text) return output def output_escape(self, m): text = m.group(1) return self.renderer.escape(text) def output_autolink(self, m): link = m.group(1) if m.group(2) == '@': is_email = True else: is_email = False return self.renderer.autolink(link, is_email) def output_url(self, m): link = m.group(1) if self._in_link: return self.renderer.text(link) return self.renderer.autolink(link, False) def output_inline_html(self, m): tag = m.group(1) if self._parse_inline_html and tag in _inline_tags: text = m.group(3) if tag == 'a': self._in_link = True text = self.output(text, rules=self.inline_html_rules) self._in_link = False else: text = self.output(text, rules=self.inline_html_rules) extra = m.group(2) or '' html = '<%s%s>%s</%s>' % (tag, extra, text, tag) else: html = m.group(0) return self.renderer.inline_html(html) def output_footnote(self, m): key = _keyify(m.group(1)) if key not in self.footnotes: return None if self.footnotes[key]: return None self.footnote_index += 1 self.footnotes[key] = self.footnote_index return self.renderer.footnote_ref(key, self.footnote_index) def output_link(self, m): return self._process_link(m, m.group(3), m.group(4)) def output_reflink(self, m): key = _keyify(m.group(2) or m.group(1)) if key not in self.links: return None ret = self.links[key] return self._process_link(m, ret['link'], ret['title']) def output_nolink(self, m): key = _keyify(m.group(1)) if key not in self.links: return None ret = self.links[key] return self._process_link(m, ret['link'], ret['title']) def _process_link(self, m, link, title=None): line = m.group(0) text = m.group(1) if line[0] == '!': return self.renderer.image(link, title, text) self._in_link = True text = self.output(text) self._in_link = False return self.renderer.link(link, title, text) def output_double_emphasis(self, m): text = m.group(2) or m.group(1) text = self.output(text) return self.renderer.double_emphasis(text) def output_emphasis(self, m): text = m.group(2) or m.group(1) text = self.output(text) return self.renderer.emphasis(text) def output_code(self, m): text = m.group(2) return self.renderer.codespan(text) def output_linebreak(self, m): return self.renderer.linebreak() def output_strikethrough(self, m): text = self.output(m.group(1)) return self.renderer.strikethrough(text) def output_text(self, m): text = m.group(0) return self.renderer.text(text) class Renderer(object): """The default HTML renderer for rendering Markdown. """ def __init__(self, **kwargs): self.options = kwargs def placeholder(self): """Returns the default, empty output value for the renderer. All renderer methods use the '+=' operator to append to this value. Default is a string so rendering HTML can build up a result string with the rendered Markdown. Can be overridden by Renderer subclasses to be types like an empty list, allowing the renderer to create a tree-like structure to represent the document (which can then be reprocessed later into a separate format like docx or pdf). """ return '' def block_code(self, code, lang=None): """Rendering block level code. ``pre > code``. :param code: text content of the code block. :param lang: language of the given code. """ code = code.rstrip('\n') if not lang: code = escape(code, smart_amp=False) return '<pre><code>%s\n</code></pre>\n' % code code = escape(code, quote=True, smart_amp=False) return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code) def block_quote(self, text): """Rendering <blockquote> with the given text. :param text: text content of the blockquote. """ return '<blockquote>%s\n</blockquote>\n' % text.rstrip('\n') def block_html(self, html): """Rendering block level pure html content. :param html: text content of the html snippet. """ if self.options.get('skip_style') and \ html.lower().startswith('<style'): return '' if self.options.get('escape'): return escape(html) return html def header(self, text, level, raw=None): """Rendering header/heading tags like ``<h1>`` ``<h2>``. :param text: rendered text content for the header. :param level: a number for the header level, for example: 1. :param raw: raw text content of the header. """ return '<h%d>%s</h%d>\n' % (level, text, level) def hrule(self): """Rendering method for ``<hr>`` tag.""" if self.options.get('use_xhtml'): return '<hr />\n' return '<hr>\n' def list(self, body, ordered=True): """Rendering list tags like ``<ul>`` and ``<ol>``. :param body: body contents of the list. :param ordered: whether this list is ordered or not. """ tag = 'ul' if ordered: tag = 'ol' return '<%s>\n%s</%s>\n' % (tag, body, tag) def list_item(self, text): """Rendering list item snippet. Like ``<li>``.""" return '<li>%s</li>\n' % text def paragraph(self, text): """Rendering paragraph tags. Like ``<p>``.""" return '<p>%s</p>\n' % text.strip(' ') def table(self, header, body): """Rendering table element. Wrap header and body in it. :param header: header part of the table. :param body: body part of the table. """ return ( '<table>\n<thead>%s</thead>\n' '<tbody>\n%s</tbody>\n</table>\n' ) % (header, body) def table_row(self, content): """Rendering a table row. Like ``<tr>``. :param content: content of current table row. """ return '<tr>\n%s</tr>\n' % content def table_cell(self, content, **flags): """Rendering a table cell. Like ``<th>`` ``<td>``. :param content: content of current table cell. :param header: whether this is header or not. :param align: align of current table cell. """ if flags['header']: tag = 'th' else: tag = 'td' align = flags['align'] if not align: return '<%s>%s</%s>\n' % (tag, content, tag) return '<%s style="text-align:%s">%s</%s>\n' % ( tag, align, content, tag ) def double_emphasis(self, text): """Rendering **strong** text. :param text: text content for emphasis. """ return '<strong>%s</strong>' % text def emphasis(self, text): """Rendering *emphasis* text. :param text: text content for emphasis. """ return '<em>%s</em>' % text def codespan(self, text): """Rendering inline `code` text. :param text: text content for inline code. """ text = escape(text.rstrip(), smart_amp=False) return '<code>%s</code>' % text def linebreak(self): """Rendering line break like ``<br>``.""" if self.options.get('use_xhtml'): return '<br />\n' return '<br>\n' def strikethrough(self, text): """Rendering ~~strikethrough~~ text. :param text: text content for strikethrough. """ return '<del>%s</del>' % text def text(self, text): """Rendering unformatted text. :param text: text content. """ if self.options.get('parse_block_html'): return text return escape(text) def escape(self, text): """Rendering escape sequence. :param text: text content. """ return escape(text) def autolink(self, link, is_email=False): """Rendering a given link or email address. :param link: link content or email address. :param is_email: whether this is an email or not. """ text = link = escape_link(link) if is_email: link = 'mailto:%s' % link return '<a href="%s">%s</a>' % (link, text) def link(self, link, title, text): """Rendering a given link with content and title. :param link: href link for ``<a>`` tag. :param title: title content for `title` attribute. :param text: text content for description. """ link = escape_link(link) if not title: return '<a href="%s">%s</a>' % (link, text) title = escape(title, quote=True) return '<a href="%s" title="%s">%s</a>' % (link, title, text) def image(self, src, title, text): """Rendering a image with title and text. :param src: source link of the image. :param title: title text of the image. :param text: alt text of the image. """ src = escape_link(src) text = escape(text, quote=True) if title: title = escape(title, quote=True) html = '<img src="%s" alt="%s" title="%s"' % (src, text, title) else: html = '<img src="%s" alt="%s"' % (src, text) if self.options.get('use_xhtml'): return '%s />' % html return '%s>' % html def inline_html(self, html): """Rendering span level pure html content. :param html: text content of the html snippet. """ if self.options.get('escape'): return escape(html) return html def newline(self): """Rendering newline element.""" return '' def footnote_ref(self, key, index): """Rendering the ref anchor of a footnote. :param key: identity key for the footnote. :param index: the index count of current footnote. """ html = ( '<sup class="footnote-ref" id="fnref-%s">' '<a href="#fn-%s">%d</a></sup>' ) % (escape(key), escape(key), index) return html def footnote_item(self, key, text): """Rendering a footnote item. :param key: identity key for the footnote. :param text: text content of the footnote. """ back = ( '<a href="#fnref-%s" class="footnote">&#8617;</a>' ) % escape(key) text = text.rstrip() if text.endswith('</p>'): text = re.sub(r'<\/p>$', r'%s</p>' % back, text) else: text = '%s<p>%s</p>' % (text, back) html = '<li id="fn-%s">%s</li>\n' % (escape(key), text) return html def footnotes(self, text): """Wrapper for all footnotes. :param text: contents of all footnotes. """ html = '<div class="footnotes">\n%s<ol>%s</ol>\n</div>\n' return html % (self.hrule(), text) class Markdown(object): """The Markdown parser. :param renderer: An instance of ``Renderer``. :param inline: An inline lexer class or instance. :param block: A block lexer class or instance. """ def __init__(self, renderer=None, inline=None, block=None, **kwargs): if not renderer: renderer = Renderer(**kwargs) else: kwargs.update(renderer.options) self.renderer = renderer if inline and inspect.isclass(inline): inline = inline(renderer, **kwargs) if block and inspect.isclass(block): block = block(**kwargs) if inline: self.inline = inline else: self.inline = InlineLexer(renderer, **kwargs) self.block = block or BlockLexer(BlockGrammar()) self.footnotes = [] self.tokens = [] # detect if it should parse text in block html self._parse_block_html = kwargs.get('parse_block_html') def __call__(self, text): return self.parse(text) def render(self, text): """Render the Markdown text. :param text: markdown formatted text content. """ return self.parse(text) def parse(self, text): out = self.output(preprocessing(text)) keys = self.block.def_footnotes # reset block self.block.def_links = {} self.block.def_footnotes = {} # reset inline self.inline.links = {} self.inline.footnotes = {} if not self.footnotes: return out footnotes = filter(lambda o: keys.get(o['key']), self.footnotes) self.footnotes = sorted( footnotes, key=lambda o: keys.get(o['key']), reverse=True ) body = self.renderer.placeholder() while self.footnotes: note = self.footnotes.pop() body += self.renderer.footnote_item( note['key'], note['text'] ) out += self.renderer.footnotes(body) return out def pop(self): if not self.tokens: return None self.token = self.tokens.pop() return self.token def peek(self): if self.tokens: return self.tokens[-1] return None # pragma: no cover def output(self, text, rules=None): self.tokens = self.block(text, rules) self.tokens.reverse() self.inline.setup(self.block.def_links, self.block.def_footnotes) out = self.renderer.placeholder() while self.pop(): out += self.tok() return out def tok(self): t = self.token['type'] # sepcial cases if t.endswith('_start'): t = t[:-6] return getattr(self, 'output_%s' % t)() def tok_text(self): text = self.token['text'] while self.peek()['type'] == 'text': text += '\n' + self.pop()['text'] return self.inline(text) def output_newline(self): return self.renderer.newline() def output_hrule(self): return self.renderer.hrule() def output_heading(self): return self.renderer.header( self.inline(self.token['text']), self.token['level'], self.token['text'], ) def output_code(self): return self.renderer.block_code( self.token['text'], self.token['lang'] ) def output_table(self): aligns = self.token['align'] aligns_length = len(aligns) cell = self.renderer.placeholder() # header part header = self.renderer.placeholder() for i, value in enumerate(self.token['header']): align = aligns[i] if i < aligns_length else None flags = {'header': True, 'align': align} cell += self.renderer.table_cell(self.inline(value), **flags) header += self.renderer.table_row(cell) # body part body = self.renderer.placeholder() for i, row in enumerate(self.token['cells']): cell = self.renderer.placeholder() for j, value in enumerate(row): align = aligns[j] if j < aligns_length else None flags = {'header': False, 'align': align} cell += self.renderer.table_cell(self.inline(value), **flags) body += self.renderer.table_row(cell) return self.renderer.table(header, body) def output_block_quote(self): body = self.renderer.placeholder() while self.pop()['type'] != 'block_quote_end': body += self.tok() return self.renderer.block_quote(body) def output_list(self): ordered = self.token['ordered'] body = self.renderer.placeholder() while self.pop()['type'] != 'list_end': body += self.tok() return self.renderer.list(body, ordered) def output_list_item(self): body = self.renderer.placeholder() while self.pop()['type'] != 'list_item_end': if self.token['type'] == 'text': body += self.tok_text() else: body += self.tok() return self.renderer.list_item(body) def output_loose_item(self): body = self.renderer.placeholder() while self.pop()['type'] != 'list_item_end': body += self.tok() return self.renderer.list_item(body) def output_footnote(self): self.inline._in_footnote = True body = self.renderer.placeholder() key = self.token['key'] while self.pop()['type'] != 'footnote_end': body += self.tok() self.footnotes.append({'key': key, 'text': body}) self.inline._in_footnote = False return self.renderer.placeholder() def output_close_html(self): text = self.token['text'] return self.renderer.block_html(text) def output_open_html(self): text = self.token['text'] tag = self.token['tag'] if self._parse_block_html and tag not in _pre_tags: text = self.inline(text, rules=self.inline.inline_html_rules) extra = self.token.get('extra') or '' html = '<%s%s>%s</%s>' % (tag, extra, text, tag) return self.renderer.block_html(html) def output_paragraph(self): return self.renderer.paragraph(self.inline(self.token['text'])) def output_text(self): return self.renderer.paragraph(self.tok_text()) def markdown(text, escape=True, **kwargs): """Render markdown formatted text to html. :param text: markdown formatted text content. :param escape: if set to False, all html tags will not be escaped. :param use_xhtml: output with xhtml tags. :param hard_wrap: if set to True, it will use the GFM line breaks feature. :param parse_block_html: parse text only in block level html. :param parse_inline_html: parse text only in inline level html. """ return Markdown(escape=escape, **kwargs)(text)
./CrossVul/dataset_final_sorted/CWE-79/py/good_2940_0
crossvul-python_data_good_1735_1
from Products.CMFCore.URLTool import URLTool as BaseTool from Products.CMFCore.utils import getToolByName from AccessControl import ClassSecurityInfo from App.class_init import InitializeClass from Products.CMFPlone.PloneBaseTool import PloneBaseTool from posixpath import normpath from urlparse import urlparse, urljoin import re class URLTool(PloneBaseTool, BaseTool): meta_type = 'Plone URL Tool' security = ClassSecurityInfo() toolicon = 'skins/plone_images/link_icon.png' security.declarePublic('isURLInPortal') def isURLInPortal(self, url, context=None): """ Check if a given url is on the same host and contains the portal path. Used to ensure that login forms can determine relevant referrers (i.e. in portal). Also return true for some relative urls if context is passed in to allow for url parsing. When context is not provided, assume that relative urls are in the portal. It is assumed that http://portal is the same portal as https://portal. External sites listed in 'allow_external_login_sites' of site_properties are also considered within the portal to allow for single sign on. """ # sanitize url url = re.sub('^[\x00-\x20]+', '', url).strip() if ('<script' in url or '%3Cscript' in url or 'javascript:' in url or 'javascript%3A' in url): return False p_url = self() _, u_host, u_path, _, _, _ = urlparse(url) if not u_host and not u_path.startswith('/'): if context is None: return True # old behavior if not context.isPrincipiaFolderish: useurl = context.aq_parent.absolute_url() else: useurl = context.absolute_url() else: useurl = p_url # when u_path.startswith('/') if not useurl.endswith('/'): useurl += '/' # urljoin to current url to get an absolute path _, u_host, u_path, _, _, _ = urlparse(urljoin(useurl, url)) # normalise to end with a '/' so /foobar is not considered within /foo if not u_path: u_path = '/' else: u_path = normpath(u_path) if not u_path.endswith('/'): u_path += '/' _, host, path, _, _, _ = urlparse(p_url) if not path.endswith('/'): path += '/' if host == u_host and u_path.startswith(path): return True props = getToolByName(self, 'portal_properties').site_properties for external_site in props.getProperty('allow_external_login_sites', []): _, host, path, _, _, _ = urlparse(external_site) if not path.endswith('/'): path += '/' if host == u_host and u_path.startswith(path): return True return False URLTool.__doc__ = BaseTool.__doc__ InitializeClass(URLTool)
./CrossVul/dataset_final_sorted/CWE-79/py/good_1735_1
crossvul-python_data_bad_5788_0
import os import re from django.conf import global_settings, settings from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.core import mail from django.core.exceptions import SuspiciousOperation from django.core.urlresolvers import reverse, NoReverseMatch from django.http import QueryDict, HttpRequest from django.utils.encoding import force_text from django.utils.html import escape from django.utils.http import urlquote from django.utils._os import upath from django.test import TestCase from django.test.utils import override_settings from django.middleware.csrf import CsrfViewMiddleware from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm, SetPasswordForm, PasswordResetForm) from django.contrib.auth.tests.utils import skipIfCustomUser from django.contrib.auth.views import login as login_view @override_settings( LANGUAGES=( ('en', 'English'), ), LANGUAGE_CODE='en', TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS, TEMPLATE_DIRS=( os.path.join(os.path.dirname(upath(__file__)), 'templates'), ), USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',), ) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) def assertContainsEscaped(self, response, text, **kwargs): return self.assertContains(response, escape(force_text(text)), **kwargs) @skipIfCustomUser class AuthViewNamedURLTests(AuthViewsTestCase): urls = 'django.contrib.auth.urls' def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb36': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) @skipIfCustomUser class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown']) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) @override_settings(ALLOWED_HOSTS=['adminsite.com']) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://adminsite.com" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with self.assertRaises(SuspiciousOperation): self.client.post('/password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with self.assertRaises(SuspiciousOperation): self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) @override_settings(AUTH_USER_MODEL='auth.CustomUser') class CustomUserPasswordResetTest(AuthViewsTestCase): fixtures = ['custom_user.json'] def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid_custom_user(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") @skipIfCustomUser class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login'] % { 'username': User._meta.get_field('username').verbose_name }) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) def test_password_change_done_fails(self): with self.settings(LOGIN_URL='/login/'): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/')) @skipIfCustomUser class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) def test_login_csrf_rotate(self, password='password'): """ Makes sure that a login rotates the currently-used CSRF token. """ # Do a GET to establish a CSRF token # TestClient isn't used here as we're testing middleware, essentially. req = HttpRequest() CsrfViewMiddleware().process_view(req, login_view, (), {}) req.META["CSRF_COOKIE_USED"] = True resp = login_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None) token1 = csrf_cookie.coded_value # Prepare the POST request req = HttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = token1 req.method = "POST" req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1} req.REQUEST = req.POST # Use POST request to log in SessionMiddleware().process_request(req) CsrfViewMiddleware().process_view(req, login_view, (), {}) req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view req.META["SERVER_PORT"] = 80 req.META["CSRF_COOKIE_USED"] = True resp = login_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None) token2 = csrf_cookie.coded_value # Check the CSRF token switched self.assertNotEqual(token1, token2) @skipIfCustomUser class LoginURLSettings(AuthViewsTestCase): def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) @skipIfCustomUser class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertContains(response, 'Logged out') self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out() @skipIfCustomUser class ChangelistTests(AuthViewsTestCase): urls = 'django.contrib.auth.tests.urls_admin' # #20078 - users shouldn't be allowed to guess password hashes via # repeated password__startswith queries. def test_changelist_disallows_password_lookups(self): # Make me a superuser before loging in. User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True) self.login() # A lookup that tries to filter on password isn't OK with self.assertRaises(SuspiciousOperation): response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5788_0
crossvul-python_data_bad_2104_9
404: Not Found
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2104_9
crossvul-python_data_good_5790_0
from __future__ import with_statement import os import re import urllib from django.conf import settings from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.core import mail from django.core.exceptions import SuspiciousOperation from django.core.urlresolvers import reverse, NoReverseMatch from django.http import QueryDict from django.utils.encoding import force_unicode from django.utils.html import escape from django.test import TestCase from django.test.utils import override_settings from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm, SetPasswordForm, PasswordResetForm) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def setUp(self): self.old_LANGUAGES = settings.LANGUAGES self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE settings.LANGUAGES = (('en', 'English'),) settings.LANGUAGE_CODE = 'en' self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) def tearDown(self): settings.LANGUAGES = self.old_LANGUAGES settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) def assertContainsEscaped(self, response, text, **kwargs): return self.assertContains(response, escape(force_unicode(text)), **kwargs) AuthViewsTestCase = override_settings(USE_TZ=False)(AuthViewsTestCase) class AuthViewNamedURLTests(AuthViewsTestCase): urls = 'django.contrib.auth.urls' def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb36': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown']) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) @override_settings(ALLOWED_HOSTS=['adminsite.com']) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://adminsite.com" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with self.assertRaises(SuspiciousOperation): self.client.post('/password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with self.assertRaises(SuspiciousOperation): self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertEqual(response.status_code, 200) self.assertTrue("Please enter your new password" in response.content) def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login']) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) def test_password_change_done_fails(self): with self.settings(LOGIN_URL='/login/'): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/')) class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com', 'javascript:alert("XSS")'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', 'HTTPS:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) class LoginURLSettings(AuthViewsTestCase): def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertEqual(200, response.status_code) self.assertTrue('Logged out' in response.content) self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com', 'javascript:alert("XSS")'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', 'HTTPS:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out()
./CrossVul/dataset_final_sorted/CWE-79/py/good_5790_0
crossvul-python_data_good_2104_9
# encoding:utf-8 """ :synopsis: views "read-only" for main textual content By main textual content is meant - text of Questions, Answers and Comments. The "read-only" requirement here is not 100% strict, as for example "question" view does allow adding new comments via Ajax form post. """ import datetime import logging import urllib import operator from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed from django.core.paginator import Paginator, EmptyPage, InvalidPage from django.template.loader import get_template from django.template import RequestContext from django.utils import simplejson from django.utils.html import escape from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils import translation from django.views.decorators import csrf from django.core.urlresolvers import reverse from django.core import exceptions as django_exceptions from django.contrib.humanize.templatetags import humanize from django.http import QueryDict from django.conf import settings as django_settings import askbot from askbot import exceptions from askbot.utils.diff import textDiff as htmldiff from askbot.forms import AnswerForm, ShowQuestionForm from askbot import conf from askbot import models from askbot import schedules from askbot.models.tag import Tag from askbot import const from askbot.utils import functions from askbot.utils.html import sanitize_html from askbot.utils.decorators import anonymous_forbidden, ajax_only, get_only from askbot.utils.loading import load_module from askbot.search.state_manager import SearchState, DummySearchState from askbot.templatetags import extra_tags from askbot.conf import settings as askbot_settings from askbot.views import context # used in index page #todo: - take these out of const or settings from askbot.models import Post, Vote INDEX_PAGE_SIZE = 30 INDEX_AWARD_SIZE = 15 INDEX_TAGS_SIZE = 25 # used in tags list DEFAULT_PAGE_SIZE = 60 # used in questions # used in answers #refactor? - we have these #views that generate a listing of questions in one way or another: #index, unanswered, questions, search, tag #should we dry them up? #related topics - information drill-down, search refinement def index(request):#generates front page - shows listing of questions sorted in various ways """index view mapped to the root url of the Q&A site """ return HttpResponseRedirect(reverse('questions')) def questions(request, **kwargs): """ List of Questions, Tagged questions, and Unanswered questions. matching search query or user selection """ #before = datetime.datetime.now() if request.method != 'GET': return HttpResponseNotAllowed(['GET']) search_state = SearchState( user_logged_in=request.user.is_authenticated(), **kwargs ) page_size = int(askbot_settings.DEFAULT_QUESTIONS_PAGE_SIZE) qs, meta_data = models.Thread.objects.run_advanced_search( request_user=request.user, search_state=search_state ) if meta_data['non_existing_tags']: search_state = search_state.remove_tags(meta_data['non_existing_tags']) paginator = Paginator(qs, page_size) if paginator.num_pages < search_state.page: search_state.page = 1 page = paginator.page(search_state.page) page.object_list = list(page.object_list) # evaluate the queryset # INFO: Because for the time being we need question posts and thread authors # down the pipeline, we have to precache them in thread objects models.Thread.objects.precache_view_data_hack(threads=page.object_list) related_tags = Tag.objects.get_related_to_search( threads=page.object_list, ignored_tag_names=meta_data.get('ignored_tag_names',[]) ) tag_list_type = askbot_settings.TAG_LIST_FORMAT if tag_list_type == 'cloud': #force cloud to sort by name related_tags = sorted(related_tags, key = operator.attrgetter('name')) contributors = list( models.Thread.objects.get_thread_contributors( thread_list=page.object_list ).only('id', 'username', 'gravatar') ) paginator_context = { 'is_paginated' : (paginator.count > page_size), 'pages': paginator.num_pages, 'page': search_state.page, 'has_previous': page.has_previous(), 'has_next': page.has_next(), 'previous': page.previous_page_number(), 'next': page.next_page_number(), 'base_url' : search_state.query_string(), 'page_size' : page_size, } # We need to pass the rss feed url based # on the search state to the template. # We use QueryDict to get a querystring # from dicts and arrays. Much cleaner # than parsing and string formating. rss_query_dict = QueryDict("").copy() if search_state.query: # We have search string in session - pass it to # the QueryDict rss_query_dict.update({"q": search_state.query}) if search_state.tags: # We have tags in session - pass it to the # QueryDict but as a list - we want tags+ rss_query_dict.setlist("tags", search_state.tags) context_feed_url = '/%sfeeds/rss/?%s' % ( django_settings.ASKBOT_URL, rss_query_dict.urlencode() ) # Format the url with the QueryDict reset_method_count = len(filter(None, [search_state.query, search_state.tags, meta_data.get('author_name', None)])) if request.is_ajax(): q_count = paginator.count question_counter = ungettext('%(q_num)s question', '%(q_num)s questions', q_count) question_counter = question_counter % {'q_num': humanize.intcomma(q_count),} if q_count > page_size: paginator_tpl = get_template('main_page/paginator.html') paginator_html = paginator_tpl.render( RequestContext( request, { 'context': functions.setup_paginator(paginator_context), 'questions_count': q_count, 'page_size' : page_size, 'search_state': search_state, } ) ) else: paginator_html = '' questions_tpl = get_template('main_page/questions_loop.html') questions_html = questions_tpl.render( RequestContext( request, { 'threads': page, 'search_state': search_state, 'reset_method_count': reset_method_count, 'request': request } ) ) ajax_data = { 'query_data': { 'tags': search_state.tags, 'sort_order': search_state.sort, 'ask_query_string': search_state.ask_query_string(), }, 'paginator': paginator_html, 'question_counter': question_counter, 'faces': [],#[extra_tags.gravatar(contributor, 48) for contributor in contributors], 'feed_url': context_feed_url, 'query_string': search_state.query_string(), 'page_size' : page_size, 'questions': questions_html.replace('\n',''), 'non_existing_tags': meta_data['non_existing_tags'] } ajax_data['related_tags'] = [{ 'name': escape(tag.name), 'used_count': humanize.intcomma(tag.local_used_count) } for tag in related_tags] return HttpResponse(simplejson.dumps(ajax_data), mimetype = 'application/json') else: # non-AJAX branch template_data = { 'active_tab': 'questions', 'author_name' : meta_data.get('author_name',None), 'contributors' : contributors, 'context' : paginator_context, 'is_unanswered' : False,#remove this from template 'interesting_tag_names': meta_data.get('interesting_tag_names', None), 'ignored_tag_names': meta_data.get('ignored_tag_names', None), 'subscribed_tag_names': meta_data.get('subscribed_tag_names', None), 'language_code': translation.get_language(), 'name_of_anonymous_user' : models.get_name_of_anonymous_user(), 'page_class': 'main-page', 'page_size': page_size, 'query': search_state.query, 'threads' : page, 'questions_count' : paginator.count, 'reset_method_count': reset_method_count, 'scope': search_state.scope, 'show_sort_by_relevance': conf.should_show_sort_by_relevance(), 'search_tags' : search_state.tags, 'sort': search_state.sort, 'tab_id' : search_state.sort, 'tags' : related_tags, 'tag_list_type' : tag_list_type, 'font_size' : extra_tags.get_tag_font_size(related_tags), 'display_tag_filter_strategy_choices': conf.get_tag_display_filter_strategy_choices(), 'email_tag_filter_strategy_choices': conf.get_tag_email_filter_strategy_choices(), 'update_avatar_data': schedules.should_update_avatar_data(request), 'query_string': search_state.query_string(), 'search_state': search_state, 'feed_url': context_feed_url, } return render(request, 'main_page.html', template_data) def tags(request):#view showing a listing of available tags - plain list #1) Get parameters. This normally belongs to form cleaning. post_data = request.GET sortby = post_data.get('sort', 'used') try: page = int(post_data.get('page', '1')) except ValueError: page = 1 if sortby == 'name': order_by = 'name' else: order_by = '-used_count' query = post_data.get('query', '').strip() tag_list_type = askbot_settings.TAG_LIST_FORMAT #2) Get query set for the tags. query_params = {'deleted': False} if query != '': query_params['name__icontains'] = query tags_qs = Tag.objects.filter(**query_params).exclude(used_count=0) tags_qs = tags_qs.order_by(order_by) #3) Start populating the template context. data = { 'active_tab': 'tags', 'page_class': 'tags-page', 'tag_list_type' : tag_list_type, 'stag' : query, 'tab_id' : sortby, 'keywords' : query, 'search_state': SearchState(*[None for x in range(7)]) } if tag_list_type == 'list': #plain listing is paginated objects_list = Paginator(tags_qs, DEFAULT_PAGE_SIZE) try: tags = objects_list.page(page) except (EmptyPage, InvalidPage): tags = objects_list.page(objects_list.num_pages) paginator_data = { 'is_paginated' : (objects_list.num_pages > 1), 'pages': objects_list.num_pages, 'page': page, 'has_previous': tags.has_previous(), 'has_next': tags.has_next(), 'previous': tags.previous_page_number(), 'next': tags.next_page_number(), 'base_url' : reverse('tags') + '?sort=%s&amp;' % sortby } paginator_context = functions.setup_paginator(paginator_data) data['paginator_context'] = paginator_context else: #tags for the tag cloud are given without pagination tags = tags_qs font_size = extra_tags.get_tag_font_size(tags) data['font_size'] = font_size data['tags'] = tags if request.is_ajax(): template = get_template('tags/content.html') template_context = RequestContext(request, data) json_data = {'success': True, 'html': template.render(template_context)} json_string = simplejson.dumps(json_data) return HttpResponse(json_string, mimetype='application/json') else: return render(request, 'tags.html', data) @csrf.csrf_protect def question(request, id):#refactor - long subroutine. display question body, answers and comments """view that displays body of the question and all answers to it """ #process url parameters #todo: fix inheritance of sort method from questions #before = datetime.datetime.now() form = ShowQuestionForm(request.GET) form.full_clean()#always valid show_answer = form.cleaned_data['show_answer'] show_comment = form.cleaned_data['show_comment'] show_page = form.cleaned_data['show_page'] answer_sort_method = form.cleaned_data['answer_sort_method'] #load question and maybe refuse showing deleted question #if the question does not exist - try mapping to old questions #and and if it is not found again - then give up try: question_post = models.Post.objects.filter( post_type = 'question', id = id ).select_related('thread')[0] except IndexError: # Handle URL mapping - from old Q/A/C/ URLs to the new one try: question_post = models.Post.objects.filter( post_type='question', old_question_id = id ).select_related('thread')[0] except IndexError: raise Http404 if show_answer: try: old_answer = models.Post.objects.get_answers().get(old_answer_id=show_answer) return HttpResponseRedirect(old_answer.get_absolute_url()) except models.Post.DoesNotExist: pass elif show_comment: try: old_comment = models.Post.objects.get_comments().get(old_comment_id=show_comment) return HttpResponseRedirect(old_comment.get_absolute_url()) except models.Post.DoesNotExist: pass try: question_post.assert_is_visible_to(request.user) except exceptions.QuestionHidden, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('index')) #redirect if slug in the url is wrong if request.path.split('/')[-2] != question_post.slug: logging.debug('no slug match!') question_url = '?'.join(( question_post.get_absolute_url(), urllib.urlencode(request.GET) )) return HttpResponseRedirect(question_url) #resolve comment and answer permalinks #they go first because in theory both can be moved to another question #this block "returns" show_post and assigns actual comment and answer #to show_comment and show_answer variables #in the case if the permalinked items or their parents are gone - redirect #redirect also happens if id of the object's origin post != requested id show_post = None #used for permalinks if show_comment: #if url calls for display of a specific comment, #check that comment exists, that it belongs to #the current question #if it is an answer comment and the answer is hidden - #redirect to the default view of the question #if the question is hidden - redirect to the main page #in addition - if url points to a comment and the comment #is for the answer - we need the answer object try: show_comment = models.Post.objects.get_comments().get(id=show_comment) except models.Post.DoesNotExist: error_message = _( 'Sorry, the comment you are looking for has been ' 'deleted and is no longer accessible' ) request.user.message_set.create(message = error_message) return HttpResponseRedirect(question_post.thread.get_absolute_url()) if str(show_comment.thread._question_post().id) != str(id): return HttpResponseRedirect(show_comment.get_absolute_url()) show_post = show_comment.parent try: show_comment.assert_is_visible_to(request.user) except exceptions.AnswerHidden, error: request.user.message_set.create(message = unicode(error)) #use reverse function here because question is not yet loaded return HttpResponseRedirect(reverse('question', kwargs = {'id': id})) except exceptions.QuestionHidden, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('index')) elif show_answer: #if the url calls to view a particular answer to #question - we must check whether the question exists #whether answer is actually corresponding to the current question #and that the visitor is allowed to see it show_post = get_object_or_404(models.Post, post_type='answer', id=show_answer) if str(show_post.thread._question_post().id) != str(id): return HttpResponseRedirect(show_post.get_absolute_url()) try: show_post.assert_is_visible_to(request.user) except django_exceptions.PermissionDenied, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('question', kwargs = {'id': id})) thread = question_post.thread if getattr(django_settings, 'ASKBOT_MULTILINGUAL', False): if thread.language_code != translation.get_language(): return HttpResponseRedirect(thread.get_absolute_url()) logging.debug('answer_sort_method=' + unicode(answer_sort_method)) #load answers and post id's->athor_id mapping #posts are pre-stuffed with the correctly ordered comments updated_question_post, answers, post_to_author, published_answer_ids = thread.get_cached_post_data( sort_method = answer_sort_method, user = request.user ) question_post.set_cached_comments( updated_question_post.get_cached_comments() ) #Post.objects.precache_comments(for_posts=[question_post] + answers, visitor=request.user) user_votes = {} user_post_id_list = list() #todo: cache this query set, but again takes only 3ms! if request.user.is_authenticated(): user_votes = Vote.objects.filter( user=request.user, voted_post__id__in = post_to_author.keys() ).values_list('voted_post_id', 'vote') user_votes = dict(user_votes) #we can avoid making this query by iterating through #already loaded posts user_post_id_list = [ id for id in post_to_author if post_to_author[id] == request.user.id ] #resolve page number and comment number for permalinks show_comment_position = None if show_comment: show_page = show_comment.get_page_number(answer_posts=answers) show_comment_position = show_comment.get_order_number() elif show_answer: show_page = show_post.get_page_number(answer_posts=answers) objects_list = Paginator(answers, const.ANSWERS_PAGE_SIZE) if show_page > objects_list.num_pages: return HttpResponseRedirect(question_post.get_absolute_url()) page_objects = objects_list.page(show_page) #count visits #import ipdb; ipdb.set_trace() if functions.not_a_robot_request(request): #todo: split this out into a subroutine #todo: merge view counts per user and per session #1) view count per session update_view_count = False if 'question_view_times' not in request.session: request.session['question_view_times'] = {} last_seen = request.session['question_view_times'].get(question_post.id, None) if thread.last_activity_by_id != request.user.id: if last_seen: if last_seen < thread.last_activity_at: update_view_count = True else: update_view_count = True request.session['question_view_times'][question_post.id] = \ datetime.datetime.now() #2) run the slower jobs in a celery task from askbot import tasks tasks.record_question_visit.delay( question_post = question_post, user_id = request.user.id, update_view_count = update_view_count ) paginator_data = { 'is_paginated' : (objects_list.count > const.ANSWERS_PAGE_SIZE), 'pages': objects_list.num_pages, 'page': show_page, 'has_previous': page_objects.has_previous(), 'has_next': page_objects.has_next(), 'previous': page_objects.previous_page_number(), 'next': page_objects.next_page_number(), 'base_url' : request.path + '?sort=%s&amp;' % answer_sort_method, } paginator_context = functions.setup_paginator(paginator_data) #todo: maybe consolidate all activity in the thread #for the user into just one query? favorited = thread.has_favorite_by_user(request.user) is_cacheable = True if show_page != 1: is_cacheable = False elif show_comment_position > askbot_settings.MAX_COMMENTS_TO_SHOW: is_cacheable = False initial = { 'wiki': question_post.wiki and askbot_settings.WIKI_ON, 'email_notify': thread.is_followed_by(request.user) } #maybe load draft if request.user.is_authenticated(): #todo: refactor into methor on thread drafts = models.DraftAnswer.objects.filter( author=request.user, thread=thread ) if drafts.count() > 0: initial['text'] = drafts[0].text answer_form = AnswerForm(initial, user=request.user) user_can_post_comment = ( request.user.is_authenticated() and request.user.can_post_comment() ) user_already_gave_answer = False previous_answer = None if request.user.is_authenticated(): if askbot_settings.LIMIT_ONE_ANSWER_PER_USER: for answer in answers: if answer.author == request.user: user_already_gave_answer = True previous_answer = answer break data = { 'is_cacheable': False,#is_cacheable, #temporary, until invalidation fix 'long_time': const.LONG_TIME,#"forever" caching 'page_class': 'question-page', 'active_tab': 'questions', 'question' : question_post, 'thread': thread, 'thread_is_moderated': thread.is_moderated(), 'user_is_thread_moderator': thread.has_moderator(request.user), 'published_answer_ids': published_answer_ids, 'answer' : answer_form, 'answers' : page_objects.object_list, 'answer_count': thread.get_answer_count(request.user), 'category_tree_data': askbot_settings.CATEGORY_TREE, 'user_votes': user_votes, 'user_post_id_list': user_post_id_list, 'user_can_post_comment': user_can_post_comment,#in general 'user_already_gave_answer': user_already_gave_answer, 'oldest_answer_id': thread.get_oldest_answer_id(request.user), 'previous_answer': previous_answer, 'tab_id' : answer_sort_method, 'favorited' : favorited, 'similar_threads' : thread.get_similar_threads(), 'language_code': translation.get_language(), 'paginator_context' : paginator_context, 'show_post': show_post, 'show_comment': show_comment, 'show_comment_position': show_comment_position, } #shared with ... if askbot_settings.GROUPS_ENABLED: data['sharing_info'] = thread.get_sharing_info() data.update(context.get_for_tag_editor()) extra_context = getattr( django_settings, 'ASKBOT_QUESTION_PAGE_EXTRA_CONTEXT', None ) if extra_context: extra_context_getter = load_module(extra_context) extra_data = extra_context_getter(request, data) data.update(extra_data) return render(request, 'question.html', data) def revisions(request, id, post_type = None): assert post_type in ('question', 'answer') post = get_object_or_404(models.Post, post_type=post_type, id=id) revisions = list(models.PostRevision.objects.filter(post=post)) revisions.reverse() for i, revision in enumerate(revisions): if i == 0: revision.diff = sanitize_html(revisions[i].html) revision.summary = _('initial version') else: revision.diff = htmldiff( sanitize_html(revisions[i-1].html), sanitize_html(revision.html) ) data = { 'page_class':'revisions-page', 'active_tab':'questions', 'post': post, 'revisions': revisions, } return render(request, 'revisions.html', data) @csrf.csrf_exempt @ajax_only @anonymous_forbidden @get_only def get_comment(request): """returns text of a comment by id via ajax response requires request method get and request must be ajax """ id = int(request.GET['id']) comment = models.Post.objects.get(post_type='comment', id=id) request.user.assert_can_edit_comment(comment) return {'text': comment.text}
./CrossVul/dataset_final_sorted/CWE-79/py/good_2104_9
crossvul-python_data_bad_2105_2
404: Not Found
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2105_2
crossvul-python_data_good_453_0
"""A cleanup tool for HTML. Removes unwanted tags and content. See the `Cleaner` class for details. """ import re import copy try: from urlparse import urlsplit from urllib import unquote_plus except ImportError: # Python 3 from urllib.parse import urlsplit, unquote_plus from lxml import etree from lxml.html import defs from lxml.html import fromstring, XHTML_NAMESPACE from lxml.html import xhtml_to_html, _transform_result try: unichr except NameError: # Python 3 unichr = chr try: unicode except NameError: # Python 3 unicode = str try: bytes except NameError: # Python < 2.6 bytes = str try: basestring except NameError: basestring = (str, bytes) __all__ = ['clean_html', 'clean', 'Cleaner', 'autolink', 'autolink_html', 'word_break', 'word_break_html'] # Look at http://code.sixapart.com/trac/livejournal/browser/trunk/cgi-bin/cleanhtml.pl # Particularly the CSS cleaning; most of the tag cleaning is integrated now # I have multiple kinds of schemes searched; but should schemes be # whitelisted instead? # max height? # remove images? Also in CSS? background attribute? # Some way to whitelist object, iframe, etc (e.g., if you want to # allow *just* embedded YouTube movies) # Log what was deleted and why? # style="behavior: ..." might be bad in IE? # Should we have something for just <meta http-equiv>? That's the worst of the # metas. # UTF-7 detections? Example: # <HEAD><META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=UTF-7"> </HEAD>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4- # you don't always have to have the charset set, if the page has no charset # and there's UTF7-like code in it. # Look at these tests: http://htmlpurifier.org/live/smoketests/xssAttacks.php # This is an IE-specific construct you can have in a stylesheet to # run some Javascript: _css_javascript_re = re.compile( r'expression\s*\(.*?\)', re.S|re.I) # Do I have to worry about @\nimport? _css_import_re = re.compile( r'@\s*import', re.I) # All kinds of schemes besides just javascript: that can cause # execution: _is_image_dataurl = re.compile( r'^data:image/.+;base64', re.I).search _is_possibly_malicious_scheme = re.compile( r'(?:javascript|jscript|livescript|vbscript|data|about|mocha):', re.I).search def _is_javascript_scheme(s): if _is_image_dataurl(s): return None return _is_possibly_malicious_scheme(s) _substitute_whitespace = re.compile(r'[\s\x00-\x08\x0B\x0C\x0E-\x19]+').sub # FIXME: should data: be blocked? # FIXME: check against: http://msdn2.microsoft.com/en-us/library/ms537512.aspx _conditional_comment_re = re.compile( r'\[if[\s\n\r]+.*?][\s\n\r]*>', re.I|re.S) _find_styled_elements = etree.XPath( "descendant-or-self::*[@style]") _find_external_links = etree.XPath( ("descendant-or-self::a [normalize-space(@href) and substring(normalize-space(@href),1,1) != '#'] |" "descendant-or-self::x:a[normalize-space(@href) and substring(normalize-space(@href),1,1) != '#']"), namespaces={'x':XHTML_NAMESPACE}) class Cleaner(object): """ Instances cleans the document of each of the possible offending elements. The cleaning is controlled by attributes; you can override attributes in a subclass, or set them in the constructor. ``scripts``: Removes any ``<script>`` tags. ``javascript``: Removes any Javascript, like an ``onclick`` attribute. Also removes stylesheets as they could contain Javascript. ``comments``: Removes any comments. ``style``: Removes any style tags. ``inline_style`` Removes any style attributes. Defaults to the value of the ``style`` option. ``links``: Removes any ``<link>`` tags ``meta``: Removes any ``<meta>`` tags ``page_structure``: Structural parts of a page: ``<head>``, ``<html>``, ``<title>``. ``processing_instructions``: Removes any processing instructions. ``embedded``: Removes any embedded objects (flash, iframes) ``frames``: Removes any frame-related tags ``forms``: Removes any form tags ``annoying_tags``: Tags that aren't *wrong*, but are annoying. ``<blink>`` and ``<marquee>`` ``remove_tags``: A list of tags to remove. Only the tags will be removed, their content will get pulled up into the parent tag. ``kill_tags``: A list of tags to kill. Killing also removes the tag's content, i.e. the whole subtree, not just the tag itself. ``allow_tags``: A list of tags to include (default include all). ``remove_unknown_tags``: Remove any tags that aren't standard parts of HTML. ``safe_attrs_only``: If true, only include 'safe' attributes (specifically the list from the feedparser HTML sanitisation web site). ``safe_attrs``: A set of attribute names to override the default list of attributes considered 'safe' (when safe_attrs_only=True). ``add_nofollow``: If true, then any <a> tags will have ``rel="nofollow"`` added to them. ``host_whitelist``: A list or set of hosts that you can use for embedded content (for content like ``<object>``, ``<link rel="stylesheet">``, etc). You can also implement/override the method ``allow_embedded_url(el, url)`` or ``allow_element(el)`` to implement more complex rules for what can be embedded. Anything that passes this test will be shown, regardless of the value of (for instance) ``embedded``. Note that this parameter might not work as intended if you do not make the links absolute before doing the cleaning. Note that you may also need to set ``whitelist_tags``. ``whitelist_tags``: A set of tags that can be included with ``host_whitelist``. The default is ``iframe`` and ``embed``; you may wish to include other tags like ``script``, or you may want to implement ``allow_embedded_url`` for more control. Set to None to include all tags. This modifies the document *in place*. """ scripts = True javascript = True comments = True style = False inline_style = None links = True meta = True page_structure = True processing_instructions = True embedded = True frames = True forms = True annoying_tags = True remove_tags = None allow_tags = None kill_tags = None remove_unknown_tags = True safe_attrs_only = True safe_attrs = defs.safe_attrs add_nofollow = False host_whitelist = () whitelist_tags = set(['iframe', 'embed']) def __init__(self, **kw): for name, value in kw.items(): if not hasattr(self, name): raise TypeError( "Unknown parameter: %s=%r" % (name, value)) setattr(self, name, value) if self.inline_style is None and 'inline_style' not in kw: self.inline_style = self.style # Used to lookup the primary URL for a given tag that is up for # removal: _tag_link_attrs = dict( script='src', link='href', # From: http://java.sun.com/j2se/1.4.2/docs/guide/misc/applet.html # From what I can tell, both attributes can contain a link: applet=['code', 'object'], iframe='src', embed='src', layer='src', # FIXME: there doesn't really seem like a general way to figure out what # links an <object> tag uses; links often go in <param> tags with values # that we don't really know. You'd have to have knowledge about specific # kinds of plugins (probably keyed off classid), and match against those. ##object=?, # FIXME: not looking at the action currently, because it is more complex # than than -- if you keep the form, you should keep the form controls. ##form='action', a='href', ) def __call__(self, doc): """ Cleans the document. """ if hasattr(doc, 'getroot'): # ElementTree instance, instead of an element doc = doc.getroot() # convert XHTML to HTML xhtml_to_html(doc) # Normalize a case that IE treats <image> like <img>, and that # can confuse either this step or later steps. for el in doc.iter('image'): el.tag = 'img' if not self.comments: # Of course, if we were going to kill comments anyway, we don't # need to worry about this self.kill_conditional_comments(doc) kill_tags = set(self.kill_tags or ()) remove_tags = set(self.remove_tags or ()) allow_tags = set(self.allow_tags or ()) if self.scripts: kill_tags.add('script') if self.safe_attrs_only: safe_attrs = set(self.safe_attrs) for el in doc.iter(etree.Element): attrib = el.attrib for aname in attrib.keys(): if aname not in safe_attrs: del attrib[aname] if self.javascript: if not (self.safe_attrs_only and self.safe_attrs == defs.safe_attrs): # safe_attrs handles events attributes itself for el in doc.iter(etree.Element): attrib = el.attrib for aname in attrib.keys(): if aname.startswith('on'): del attrib[aname] doc.rewrite_links(self._remove_javascript_link, resolve_base_href=False) # If we're deleting style then we don't have to remove JS links # from styles, otherwise... if not self.inline_style: for el in _find_styled_elements(doc): old = el.get('style') new = _css_javascript_re.sub('', old) new = _css_import_re.sub('', new) if self._has_sneaky_javascript(new): # Something tricky is going on... del el.attrib['style'] elif new != old: el.set('style', new) if not self.style: for el in list(doc.iter('style')): if el.get('type', '').lower().strip() == 'text/javascript': el.drop_tree() continue old = el.text or '' new = _css_javascript_re.sub('', old) # The imported CSS can do anything; we just can't allow: new = _css_import_re.sub('', old) if self._has_sneaky_javascript(new): # Something tricky is going on... el.text = '/* deleted */' elif new != old: el.text = new if self.comments or self.processing_instructions: # FIXME: why either? I feel like there's some obscure reason # because you can put PIs in comments...? But I've already # forgotten it kill_tags.add(etree.Comment) if self.processing_instructions: kill_tags.add(etree.ProcessingInstruction) if self.style: kill_tags.add('style') if self.inline_style: etree.strip_attributes(doc, 'style') if self.links: kill_tags.add('link') elif self.style or self.javascript: # We must get rid of included stylesheets if Javascript is not # allowed, as you can put Javascript in them for el in list(doc.iter('link')): if 'stylesheet' in el.get('rel', '').lower(): # Note this kills alternate stylesheets as well if not self.allow_element(el): el.drop_tree() if self.meta: kill_tags.add('meta') if self.page_structure: remove_tags.update(('head', 'html', 'title')) if self.embedded: # FIXME: is <layer> really embedded? # We should get rid of any <param> tags not inside <applet>; # These are not really valid anyway. for el in list(doc.iter('param')): found_parent = False parent = el.getparent() while parent is not None and parent.tag not in ('applet', 'object'): parent = parent.getparent() if parent is None: el.drop_tree() kill_tags.update(('applet',)) # The alternate contents that are in an iframe are a good fallback: remove_tags.update(('iframe', 'embed', 'layer', 'object', 'param')) if self.frames: # FIXME: ideally we should look at the frame links, but # generally frames don't mix properly with an HTML # fragment anyway. kill_tags.update(defs.frame_tags) if self.forms: remove_tags.add('form') kill_tags.update(('button', 'input', 'select', 'textarea')) if self.annoying_tags: remove_tags.update(('blink', 'marquee')) _remove = [] _kill = [] for el in doc.iter(): if el.tag in kill_tags: if self.allow_element(el): continue _kill.append(el) elif el.tag in remove_tags: if self.allow_element(el): continue _remove.append(el) if _remove and _remove[0] == doc: # We have to drop the parent-most tag, which we can't # do. Instead we'll rewrite it: el = _remove.pop(0) el.tag = 'div' el.attrib.clear() elif _kill and _kill[0] == doc: # We have to drop the parent-most element, which we can't # do. Instead we'll clear it: el = _kill.pop(0) if el.tag != 'html': el.tag = 'div' el.clear() _kill.reverse() # start with innermost tags for el in _kill: el.drop_tree() for el in _remove: el.drop_tag() if self.remove_unknown_tags: if allow_tags: raise ValueError( "It does not make sense to pass in both allow_tags and remove_unknown_tags") allow_tags = set(defs.tags) if allow_tags: bad = [] for el in doc.iter(): if el.tag not in allow_tags: bad.append(el) if bad: if bad[0] is doc: el = bad.pop(0) el.tag = 'div' el.attrib.clear() for el in bad: el.drop_tag() if self.add_nofollow: for el in _find_external_links(doc): if not self.allow_follow(el): rel = el.get('rel') if rel: if ('nofollow' in rel and ' nofollow ' in (' %s ' % rel)): continue rel = '%s nofollow' % rel else: rel = 'nofollow' el.set('rel', rel) def allow_follow(self, anchor): """ Override to suppress rel="nofollow" on some anchors. """ return False def allow_element(self, el): if el.tag not in self._tag_link_attrs: return False attr = self._tag_link_attrs[el.tag] if isinstance(attr, (list, tuple)): for one_attr in attr: url = el.get(one_attr) if not url: return False if not self.allow_embedded_url(el, url): return False return True else: url = el.get(attr) if not url: return False return self.allow_embedded_url(el, url) def allow_embedded_url(self, el, url): if (self.whitelist_tags is not None and el.tag not in self.whitelist_tags): return False scheme, netloc, path, query, fragment = urlsplit(url) netloc = netloc.lower().split(':', 1)[0] if scheme not in ('http', 'https'): return False if netloc in self.host_whitelist: return True return False def kill_conditional_comments(self, doc): """ IE conditional comments basically embed HTML that the parser doesn't normally see. We can't allow anything like that, so we'll kill any comments that could be conditional. """ bad = [] self._kill_elements( doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment) def _kill_elements(self, doc, condition, iterate=None): bad = [] for el in doc.iter(iterate): if condition(el): bad.append(el) for el in bad: el.drop_tree() def _remove_javascript_link(self, link): # links like "j a v a s c r i p t:" might be interpreted in IE new = _substitute_whitespace('', unquote_plus(link)) if _is_javascript_scheme(new): # FIXME: should this be None to delete? return '' return link _substitute_comments = re.compile(r'/\*.*?\*/', re.S).sub def _has_sneaky_javascript(self, style): """ Depending on the browser, stuff like ``e x p r e s s i o n(...)`` can get interpreted, or ``expre/* stuff */ssion(...)``. This checks for attempt to do stuff like this. Typically the response will be to kill the entire style; if you have just a bit of Javascript in the style another rule will catch that and remove only the Javascript from the style; this catches more sneaky attempts. """ style = self._substitute_comments('', style) style = style.replace('\\', '') style = _substitute_whitespace('', style) style = style.lower() if 'javascript:' in style: return True if 'expression(' in style: return True return False def clean_html(self, html): result_type = type(html) if isinstance(html, basestring): doc = fromstring(html) else: doc = copy.deepcopy(html) self(doc) return _transform_result(result_type, doc) clean = Cleaner() clean_html = clean.clean_html ############################################################ ## Autolinking ############################################################ _link_regexes = [ re.compile(r'(?P<body>https?://(?P<host>[a-z0-9._-]+)(?:/[/\-_.,a-z0-9%&?;=~]*)?(?:\([/\-_.,a-z0-9%&?;=~]*\))?)', re.I), # This is conservative, but autolinking can be a bit conservative: re.compile(r'mailto:(?P<body>[a-z0-9._-]+@(?P<host>[a-z0-9_.-]+[a-z]))', re.I), ] _avoid_elements = ['textarea', 'pre', 'code', 'head', 'select', 'a'] _avoid_hosts = [ re.compile(r'^localhost', re.I), re.compile(r'\bexample\.(?:com|org|net)$', re.I), re.compile(r'^127\.0\.0\.1$'), ] _avoid_classes = ['nolink'] def autolink(el, link_regexes=_link_regexes, avoid_elements=_avoid_elements, avoid_hosts=_avoid_hosts, avoid_classes=_avoid_classes): """ Turn any URLs into links. It will search for links identified by the given regular expressions (by default mailto and http(s) links). It won't link text in an element in avoid_elements, or an element with a class in avoid_classes. It won't link to anything with a host that matches one of the regular expressions in avoid_hosts (default localhost and 127.0.0.1). If you pass in an element, the element's tail will not be substituted, only the contents of the element. """ if el.tag in avoid_elements: return class_name = el.get('class') if class_name: class_name = class_name.split() for match_class in avoid_classes: if match_class in class_name: return for child in list(el): autolink(child, link_regexes=link_regexes, avoid_elements=avoid_elements, avoid_hosts=avoid_hosts, avoid_classes=avoid_classes) if child.tail: text, tail_children = _link_text( child.tail, link_regexes, avoid_hosts, factory=el.makeelement) if tail_children: child.tail = text index = el.index(child) el[index+1:index+1] = tail_children if el.text: text, pre_children = _link_text( el.text, link_regexes, avoid_hosts, factory=el.makeelement) if pre_children: el.text = text el[:0] = pre_children def _link_text(text, link_regexes, avoid_hosts, factory): leading_text = '' links = [] last_pos = 0 while 1: best_match, best_pos = None, None for regex in link_regexes: regex_pos = last_pos while 1: match = regex.search(text, pos=regex_pos) if match is None: break host = match.group('host') for host_regex in avoid_hosts: if host_regex.search(host): regex_pos = match.end() break else: break if match is None: continue if best_pos is None or match.start() < best_pos: best_match = match best_pos = match.start() if best_match is None: # No more matches if links: assert not links[-1].tail links[-1].tail = text else: assert not leading_text leading_text = text break link = best_match.group(0) end = best_match.end() if link.endswith('.') or link.endswith(','): # These punctuation marks shouldn't end a link end -= 1 link = link[:-1] prev_text = text[:best_match.start()] if links: assert not links[-1].tail links[-1].tail = prev_text else: assert not leading_text leading_text = prev_text anchor = factory('a') anchor.set('href', link) body = best_match.group('body') if not body: body = link if body.endswith('.') or body.endswith(','): body = body[:-1] anchor.text = body links.append(anchor) text = text[end:] return leading_text, links def autolink_html(html, *args, **kw): result_type = type(html) if isinstance(html, basestring): doc = fromstring(html) else: doc = copy.deepcopy(html) autolink(doc, *args, **kw) return _transform_result(result_type, doc) autolink_html.__doc__ = autolink.__doc__ ############################################################ ## Word wrapping ############################################################ _avoid_word_break_elements = ['pre', 'textarea', 'code'] _avoid_word_break_classes = ['nobreak'] def word_break(el, max_width=40, avoid_elements=_avoid_word_break_elements, avoid_classes=_avoid_word_break_classes, break_character=unichr(0x200b)): """ Breaks any long words found in the body of the text (not attributes). Doesn't effect any of the tags in avoid_elements, by default ``<textarea>`` and ``<pre>`` Breaks words by inserting &#8203;, which is a unicode character for Zero Width Space character. This generally takes up no space in rendering, but does copy as a space, and in monospace contexts usually takes up space. See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion """ # Character suggestion of &#8203 comes from: # http://www.cs.tut.fi/~jkorpela/html/nobr.html if el.tag in _avoid_word_break_elements: return class_name = el.get('class') if class_name: dont_break = False class_name = class_name.split() for avoid in avoid_classes: if avoid in class_name: dont_break = True break if dont_break: return if el.text: el.text = _break_text(el.text, max_width, break_character) for child in el: word_break(child, max_width=max_width, avoid_elements=avoid_elements, avoid_classes=avoid_classes, break_character=break_character) if child.tail: child.tail = _break_text(child.tail, max_width, break_character) def word_break_html(html, *args, **kw): result_type = type(html) doc = fromstring(html) word_break(doc, *args, **kw) return _transform_result(result_type, doc) def _break_text(text, max_width, break_character): words = text.split() for word in words: if len(word) > max_width: replacement = _insert_break(word, max_width, break_character) text = text.replace(word, replacement) return text _break_prefer_re = re.compile(r'[^a-z]', re.I) def _insert_break(word, width, break_character): orig_word = word result = '' while len(word) > width: start = word[:width] breaks = list(_break_prefer_re.finditer(start)) if breaks: last_break = breaks[-1] # Only walk back up to 10 characters to find a nice break: if last_break.end() > width-10: # FIXME: should the break character be at the end of the # chunk, or the beginning of the next chunk? start = word[:last_break.end()] result += start + break_character word = word[len(start):] result += word return result
./CrossVul/dataset_final_sorted/CWE-79/py/good_453_0
crossvul-python_data_good_1644_9
"""Tornado handlers for the sessions web service.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json from tornado import web from ...base.handlers import APIHandler, json_errors from IPython.utils.jsonutil import date_default from IPython.html.utils import url_path_join, url_escape from IPython.kernel.kernelspec import NoSuchKernel class SessionRootHandler(APIHandler): @web.authenticated @json_errors def get(self): # Return a list of running sessions sm = self.session_manager sessions = sm.list_sessions() self.finish(json.dumps(sessions, default=date_default)) @web.authenticated @json_errors def post(self): # Creates a new session #(unless a session already exists for the named nb) sm = self.session_manager cm = self.contents_manager km = self.kernel_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, "No JSON data provided") try: path = model['notebook']['path'] except KeyError: raise web.HTTPError(400, "Missing field in JSON data: notebook.path") try: kernel_name = model['kernel']['name'] except KeyError: self.log.debug("No kernel name specified, using default kernel") kernel_name = None # Check to see if session exists if sm.session_exists(path=path): model = sm.get_session(path=path) else: try: model = sm.create_session(path=path, kernel_name=kernel_name) except NoSuchKernel: msg = ("The '%s' kernel is not available. Please pick another " "suitable kernel instead, or install that kernel." % kernel_name) status_msg = '%s not found' % kernel_name self.log.warn('Kernel not found: %s' % kernel_name) self.set_status(501) self.finish(json.dumps(dict(message=msg, short_message=status_msg))) return location = url_path_join(self.base_url, 'api', 'sessions', model['id']) self.set_header('Location', url_escape(location)) self.set_status(201) self.finish(json.dumps(model, default=date_default)) class SessionHandler(APIHandler): SUPPORTED_METHODS = ('GET', 'PATCH', 'DELETE') @web.authenticated @json_errors def get(self, session_id): # Returns the JSON model for a single session sm = self.session_manager model = sm.get_session(session_id=session_id) self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors def patch(self, session_id): # Currently, this handler is strictly for renaming notebooks sm = self.session_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, "No JSON data provided") changes = {} if 'notebook' in model: notebook = model['notebook'] if 'path' in notebook: changes['path'] = notebook['path'] sm.update_session(session_id, **changes) model = sm.get_session(session_id=session_id) self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors def delete(self, session_id): # Deletes the session with given session_id sm = self.session_manager try: sm.delete_session(session_id) except KeyError: # the kernel was deleted but the session wasn't! raise web.HTTPError(410, "Kernel deleted before session") self.set_status(204) self.finish() #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _session_id_regex = r"(?P<session_id>\w+-\w+-\w+-\w+-\w+)" default_handlers = [ (r"/api/sessions/%s" % _session_id_regex, SessionHandler), (r"/api/sessions", SessionRootHandler) ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_9
crossvul-python_data_bad_447_0
"""Tornado handlers for nbconvert.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import io import os import zipfile from tornado import web, escape from tornado.log import app_log from ..base.handlers import ( IPythonHandler, FilesRedirectHandler, path_regex, ) from nbformat import from_dict from ipython_genutils.py3compat import cast_bytes from ipython_genutils import text def find_resource_files(output_files_dir): files = [] for dirpath, dirnames, filenames in os.walk(output_files_dir): files.extend([os.path.join(dirpath, f) for f in filenames]) return files def respond_zip(handler, name, output, resources): """Zip up the output and resource files and respond with the zip file. Returns True if it has served a zip file, False if there are no resource files, in which case we serve the plain output file. """ # Check if we have resource files we need to zip output_files = resources.get('outputs', None) if not output_files: return False # Headers zip_filename = os.path.splitext(name)[0] + '.zip' handler.set_attachment_header(zip_filename) handler.set_header('Content-Type', 'application/zip') handler.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') # Prepare the zip file buffer = io.BytesIO() zipf = zipfile.ZipFile(buffer, mode='w', compression=zipfile.ZIP_DEFLATED) output_filename = os.path.splitext(name)[0] + resources['output_extension'] zipf.writestr(output_filename, cast_bytes(output, 'utf-8')) for filename, data in output_files.items(): zipf.writestr(os.path.basename(filename), data) zipf.close() handler.finish(buffer.getvalue()) return True def get_exporter(format, **kwargs): """get an exporter, raising appropriate errors""" # if this fails, will raise 500 try: from nbconvert.exporters.base import get_exporter except ImportError as e: raise web.HTTPError(500, "Could not import nbconvert: %s" % e) try: Exporter = get_exporter(format) except KeyError: # should this be 400? raise web.HTTPError(404, u"No exporter for format: %s" % format) try: return Exporter(**kwargs) except Exception as e: app_log.exception("Could not construct Exporter: %s", Exporter) raise web.HTTPError(500, "Could not construct Exporter: %s" % e) class NbconvertFileHandler(IPythonHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated def get(self, format, path): exporter = get_exporter(format, config=self.config, log=self.log) path = path.strip('/') # If the notebook relates to a real file (default contents manager), # give its path to nbconvert. if hasattr(self.contents_manager, '_get_os_path'): os_path = self.contents_manager._get_os_path(path) ext_resources_dir, basename = os.path.split(os_path) else: ext_resources_dir = None model = self.contents_manager.get(path=path) name = model['name'] if model['type'] != 'notebook': # not a notebook, redirect to files return FilesRedirectHandler.redirect_to_files(self, path) nb = model['content'] self.set_header('Last-Modified', model['last_modified']) # create resources dictionary mod_date = model['last_modified'].strftime(text.date_format) nb_title = os.path.splitext(name)[0] resource_dict = { "metadata": { "name": nb_title, "modified_date": mod_date }, "config_dir": self.application.settings['config_dir'] } if ext_resources_dir: resource_dict['metadata']['path'] = ext_resources_dir try: output, resources = exporter.from_notebook_node( nb, resources=resource_dict ) except Exception as e: self.log.exception("nbconvert failed: %s", e) raise web.HTTPError(500, "nbconvert failed: %s" % e) if respond_zip(self, name, output, resources): return # Force download if requested if self.get_argument('download', 'false').lower() == 'true': filename = os.path.splitext(name)[0] + resources['output_extension'] self.set_attachment_header(filename) # MIME type if exporter.output_mimetype: self.set_header('Content-Type', '%s; charset=utf-8' % exporter.output_mimetype) self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') self.finish(output) class NbconvertPostHandler(IPythonHandler): SUPPORTED_METHODS = ('POST',) @web.authenticated def post(self, format): exporter = get_exporter(format, config=self.config) model = self.get_json_body() name = model.get('name', 'notebook.ipynb') nbnode = from_dict(model['content']) try: output, resources = exporter.from_notebook_node(nbnode, resources={ "metadata": {"name": name[:name.rfind('.')],}, "config_dir": self.application.settings['config_dir'], }) except Exception as e: raise web.HTTPError(500, "nbconvert failed: %s" % e) if respond_zip(self, name, output, resources): return # MIME type if exporter.output_mimetype: self.set_header('Content-Type', '%s; charset=utf-8' % exporter.output_mimetype) self.finish(output) #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _format_regex = r"(?P<format>\w+)" default_handlers = [ (r"/nbconvert/%s" % _format_regex, NbconvertPostHandler), (r"/nbconvert/%s%s" % (_format_regex, path_regex), NbconvertFileHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_447_0
crossvul-python_data_good_1091_0
from typing import Optional, Tuple from django.utils.translation import ugettext as _ from django.conf import settings from django.core.files import File from django.http import HttpRequest from django.db.models import Sum from jinja2 import Markup as mark_safe import unicodedata from zerver.lib.avatar_hash import user_avatar_path from zerver.lib.exceptions import JsonableError, ErrorCode from boto.s3.bucket import Bucket from boto.s3.key import Key from boto.s3.connection import S3Connection from mimetypes import guess_type, guess_extension from zerver.models import get_user_profile_by_id from zerver.models import Attachment from zerver.models import Realm, RealmEmoji, UserProfile, Message import urllib import base64 import os import re from PIL import Image, ImageOps, ExifTags from PIL.Image import DecompressionBombError from PIL.GifImagePlugin import GifImageFile import io import random import logging DEFAULT_AVATAR_SIZE = 100 MEDIUM_AVATAR_SIZE = 500 DEFAULT_EMOJI_SIZE = 64 # These sizes were selected based on looking at the maximum common # sizes in a library of animated custom emoji, balanced against the # network cost of very large emoji images. MAX_EMOJI_GIF_SIZE = 128 MAX_EMOJI_GIF_FILE_SIZE_BYTES = 128 * 1024 * 1024 # 128 kb INLINE_MIME_TYPES = [ "application/pdf", "image/gif", "image/jpeg", "image/png", "image/webp", # To avoid cross-site scripting attacks, DO NOT add types such # as application/xhtml+xml, application/x-shockwave-flash, # image/svg+xml, text/html, or text/xml. ] # Performance Note: # # For writing files to S3, the file could either be stored in RAM # (if it is less than 2.5MiB or so) or an actual temporary file on disk. # # Because we set FILE_UPLOAD_MAX_MEMORY_SIZE to 0, only the latter case # should occur in practice. # # This is great, because passing the pseudofile object that Django gives # you to boto would be a pain. # To come up with a s3 key we randomly generate a "directory". The # "file name" is the original filename provided by the user run # through a sanitization function. class RealmUploadQuotaError(JsonableError): code = ErrorCode.REALM_UPLOAD_QUOTA attachment_url_re = re.compile(r'[/\-]user[\-_]uploads[/\.-].*?(?=[ )]|\Z)') def attachment_url_to_path_id(attachment_url: str) -> str: path_id_raw = re.sub(r'[/\-]user[\-_]uploads[/\.-]', '', attachment_url) # Remove any extra '.' after file extension. These are probably added by the user return re.sub('[.]+$', '', path_id_raw, re.M) def sanitize_name(value: str) -> str: """ Sanitizes a value to be safe to store in a Linux filesystem, in S3, and in a URL. So unicode is allowed, but not special characters other than ".", "-", and "_". This implementation is based on django.utils.text.slugify; it is modified by: * adding '.' and '_' to the list of allowed characters. * preserving the case of the value. """ value = unicodedata.normalize('NFKC', value) value = re.sub(r'[^\w\s._-]', '', value, flags=re.U).strip() return mark_safe(re.sub(r'[-\s]+', '-', value, flags=re.U)) def random_name(bytes: int=60) -> str: return base64.urlsafe_b64encode(os.urandom(bytes)).decode('utf-8') class BadImageError(JsonableError): code = ErrorCode.BAD_IMAGE name_to_tag_num = dict((name, num) for num, name in ExifTags.TAGS.items()) # https://stackoverflow.com/a/6218425 def exif_rotate(image: Image) -> Image: if not hasattr(image, '_getexif'): return image exif_data = image._getexif() if exif_data is None: return image exif_dict = dict(exif_data.items()) orientation = exif_dict.get(name_to_tag_num['Orientation']) if orientation == 3: return image.rotate(180, expand=True) elif orientation == 6: return image.rotate(270, expand=True) elif orientation == 8: return image.rotate(90, expand=True) return image def resize_avatar(image_data: bytes, size: int=DEFAULT_AVATAR_SIZE) -> bytes: try: im = Image.open(io.BytesIO(image_data)) im = exif_rotate(im) im = ImageOps.fit(im, (size, size), Image.ANTIALIAS) except IOError: raise BadImageError(_("Could not decode image; did you upload an image file?")) except DecompressionBombError: raise BadImageError(_("Image size exceeds limit.")) out = io.BytesIO() if im.mode == 'CMYK': im = im.convert('RGB') im.save(out, format='png') return out.getvalue() def resize_logo(image_data: bytes) -> bytes: try: im = Image.open(io.BytesIO(image_data)) im = exif_rotate(im) im.thumbnail((8*DEFAULT_AVATAR_SIZE, DEFAULT_AVATAR_SIZE), Image.ANTIALIAS) except IOError: raise BadImageError(_("Could not decode image; did you upload an image file?")) except DecompressionBombError: raise BadImageError(_("Image size exceeds limit.")) out = io.BytesIO() if im.mode == 'CMYK': im = im.convert('RGB') im.save(out, format='png') return out.getvalue() def resize_gif(im: GifImageFile, size: int=DEFAULT_EMOJI_SIZE) -> bytes: frames = [] duration_info = [] # If 'loop' info is not set then loop for infinite number of times. loop = im.info.get("loop", 0) for frame_num in range(0, im.n_frames): im.seek(frame_num) new_frame = Image.new("RGBA", im.size) new_frame.paste(im, (0, 0), im.convert("RGBA")) new_frame = ImageOps.fit(new_frame, (size, size), Image.ANTIALIAS) frames.append(new_frame) duration_info.append(im.info['duration']) out = io.BytesIO() frames[0].save(out, save_all=True, optimize=True, format="GIF", append_images=frames[1:], duration=duration_info, loop=loop) return out.getvalue() def resize_emoji(image_data: bytes, size: int=DEFAULT_EMOJI_SIZE) -> bytes: try: im = Image.open(io.BytesIO(image_data)) image_format = im.format if image_format == "GIF": # There are a number of bugs in Pillow.GifImagePlugin which cause # results in resized gifs being broken. To work around this we # only resize under certain conditions to minimize the chance of # creating ugly gifs. should_resize = any(( im.size[0] != im.size[1], # not square im.size[0] > MAX_EMOJI_GIF_SIZE, # dimensions too large len(image_data) > MAX_EMOJI_GIF_FILE_SIZE_BYTES, # filesize too large )) return resize_gif(im, size) if should_resize else image_data else: im = exif_rotate(im) im = ImageOps.fit(im, (size, size), Image.ANTIALIAS) out = io.BytesIO() im.save(out, format=image_format) return out.getvalue() except IOError: raise BadImageError(_("Could not decode image; did you upload an image file?")) except DecompressionBombError: raise BadImageError(_("Image size exceeds limit.")) ### Common class ZulipUploadBackend: def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: raise NotImplementedError() def upload_avatar_image(self, user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: raise NotImplementedError() def delete_avatar_image(self, user: UserProfile) -> None: raise NotImplementedError() def delete_message_image(self, path_id: str) -> bool: raise NotImplementedError() def get_avatar_url(self, hash_key: str, medium: bool=False) -> str: raise NotImplementedError() def copy_avatar(self, source_profile: UserProfile, target_profile: UserProfile) -> None: raise NotImplementedError() def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None: raise NotImplementedError() def ensure_basic_avatar_image(self, user_profile: UserProfile) -> None: raise NotImplementedError() def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None: raise NotImplementedError() def get_realm_icon_url(self, realm_id: int, version: int) -> str: raise NotImplementedError() def upload_realm_logo_image(self, logo_file: File, user_profile: UserProfile, night: bool) -> None: raise NotImplementedError() def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str: raise NotImplementedError() def upload_emoji_image(self, emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: raise NotImplementedError() def get_emoji_url(self, emoji_file_name: str, realm_id: int) -> str: raise NotImplementedError() ### S3 def get_bucket(conn: S3Connection, bucket_name: str) -> Bucket: # Calling get_bucket() with validate=True can apparently lead # to expensive S3 bills: # http://www.appneta.com/blog/s3-list-get-bucket-default/ # The benefits of validation aren't completely clear to us, and # we want to save on our bills, so we set the validate flag to False. # (We think setting validate to True would cause us to fail faster # in situations where buckets don't exist, but that shouldn't be # an issue for us.) bucket = conn.get_bucket(bucket_name, validate=False) return bucket def upload_image_to_s3( bucket_name: str, file_name: str, content_type: Optional[str], user_profile: UserProfile, contents: bytes) -> None: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = Key(bucket) key.key = file_name key.set_metadata("user_profile_id", str(user_profile.id)) key.set_metadata("realm_id", str(user_profile.realm_id)) headers = {} if content_type is not None: headers["Content-Type"] = content_type if content_type not in INLINE_MIME_TYPES: headers["Content-Disposition"] = "attachment" key.set_contents_from_string(contents, headers=headers) # type: ignore # https://github.com/python/typeshed/issues/1552 def currently_used_upload_space(realm: Realm) -> int: used_space = Attachment.objects.filter(realm=realm).aggregate(Sum('size'))['size__sum'] if used_space is None: return 0 return used_space def check_upload_within_quota(realm: Realm, uploaded_file_size: int) -> None: upload_quota = realm.upload_quota_bytes() if upload_quota is None: return used_space = currently_used_upload_space(realm) if (used_space + uploaded_file_size) > upload_quota: raise RealmUploadQuotaError(_("Upload would exceed your organization's upload quota.")) def get_file_info(request: HttpRequest, user_file: File) -> Tuple[str, int, Optional[str]]: uploaded_file_name = user_file.name assert isinstance(uploaded_file_name, str) content_type = request.GET.get('mimetype') if content_type is None: guessed_type = guess_type(uploaded_file_name)[0] if guessed_type is not None: content_type = guessed_type else: extension = guess_extension(content_type) if extension is not None: uploaded_file_name = uploaded_file_name + extension uploaded_file_name = urllib.parse.unquote(uploaded_file_name) uploaded_file_size = user_file.size return uploaded_file_name, uploaded_file_size, content_type def get_signed_upload_url(path: str) -> str: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) return conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=path) def get_realm_for_filename(path: str) -> Optional[int]: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) key = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET).get_key(path) if key is None: # This happens if the key does not exist. return None return get_user_profile_by_id(key.metadata["user_profile_id"]).realm_id class S3UploadBackend(ZulipUploadBackend): def delete_file_from_s3(self, path_id: str, bucket_name: str) -> bool: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) # check if file exists key = bucket.get_key(path_id) if key is not None: bucket.delete_key(key) return True file_name = path_id.split("/")[-1] logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,)) return False def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: bucket_name = settings.S3_AUTH_UPLOADS_BUCKET if target_realm is None: target_realm = user_profile.realm s3_file_name = "/".join([ str(target_realm.id), random_name(18), sanitize_name(uploaded_file_name) ]) url = "/user_uploads/%s" % (s3_file_name,) upload_image_to_s3( bucket_name, s3_file_name, content_type, user_profile, file_data ) create_attachment(uploaded_file_name, s3_file_name, user_profile, uploaded_file_size) return url def delete_message_image(self, path_id: str) -> bool: return self.delete_file_from_s3(path_id, settings.S3_AUTH_UPLOADS_BUCKET) def write_avatar_images(self, s3_file_name: str, target_user_profile: UserProfile, image_data: bytes, content_type: Optional[str]) -> None: bucket_name = settings.S3_AVATAR_BUCKET upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, target_user_profile, image_data, ) # custom 500px wide version resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) upload_image_to_s3( bucket_name, s3_file_name + "-medium.png", "image/png", target_user_profile, resized_medium ) resized_data = resize_avatar(image_data) upload_image_to_s3( bucket_name, s3_file_name, 'image/png', target_user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) def upload_avatar_image(self, user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: content_type = guess_type(user_file.name)[0] s3_file_name = user_avatar_path(target_user_profile) image_data = user_file.read() self.write_avatar_images(s3_file_name, target_user_profile, image_data, content_type) def delete_avatar_image(self, user: UserProfile) -> None: path_id = user_avatar_path(user) bucket_name = settings.S3_AVATAR_BUCKET self.delete_file_from_s3(path_id + ".original", bucket_name) self.delete_file_from_s3(path_id + "-medium.png", bucket_name) self.delete_file_from_s3(path_id, bucket_name) def get_avatar_key(self, file_name: str) -> Key: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket_name = settings.S3_AVATAR_BUCKET bucket = get_bucket(conn, bucket_name) key = bucket.get_key(file_name) return key def copy_avatar(self, source_profile: UserProfile, target_profile: UserProfile) -> None: s3_source_file_name = user_avatar_path(source_profile) s3_target_file_name = user_avatar_path(target_profile) key = self.get_avatar_key(s3_source_file_name + ".original") image_data = key.get_contents_as_string() # type: ignore # https://github.com/python/typeshed/issues/1552 content_type = key.content_type self.write_avatar_images(s3_target_file_name, target_profile, image_data, content_type) # type: ignore # image_data is `bytes`, boto subs are wrong def get_avatar_url(self, hash_key: str, medium: bool=False) -> str: bucket = settings.S3_AVATAR_BUCKET medium_suffix = "-medium.png" if medium else "" # ?x=x allows templates to append additional parameters with &s return "https://%s.s3.amazonaws.com/%s%s?x=x" % (bucket, hash_key, medium_suffix) def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None: content_type = guess_type(icon_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET s3_file_name = os.path.join(str(user_profile.realm.id), 'realm', 'icon') image_data = icon_file.read() upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, user_profile, image_data, ) resized_data = resize_avatar(image_data) upload_image_to_s3( bucket_name, s3_file_name + ".png", 'image/png', user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) def get_realm_icon_url(self, realm_id: int, version: int) -> str: bucket = settings.S3_AVATAR_BUCKET # ?x=x allows templates to append additional parameters with &s return "https://%s.s3.amazonaws.com/%s/realm/icon.png?version=%s" % (bucket, realm_id, version) def upload_realm_logo_image(self, logo_file: File, user_profile: UserProfile, night: bool) -> None: content_type = guess_type(logo_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET if night: basename = 'night_logo' else: basename = 'logo' s3_file_name = os.path.join(str(user_profile.realm.id), 'realm', basename) image_data = logo_file.read() upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, user_profile, image_data, ) resized_data = resize_logo(image_data) upload_image_to_s3( bucket_name, s3_file_name + ".png", 'image/png', user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str: bucket = settings.S3_AVATAR_BUCKET # ?x=x allows templates to append additional parameters with &s if not night: file_name = 'logo.png' else: file_name = 'night_logo.png' return "https://%s.s3.amazonaws.com/%s/realm/%s?version=%s" % (bucket, realm_id, file_name, version) def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None: file_path = user_avatar_path(user_profile) s3_file_name = file_path bucket_name = settings.S3_AVATAR_BUCKET conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = bucket.get_key(file_path + ".original") image_data = key.get_contents_as_string() resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) # type: ignore # image_data is `bytes`, boto subs are wrong upload_image_to_s3( bucket_name, s3_file_name + "-medium.png", "image/png", user_profile, resized_medium ) def ensure_basic_avatar_image(self, user_profile: UserProfile) -> None: # nocoverage # TODO: Refactor this to share code with ensure_medium_avatar_image file_path = user_avatar_path(user_profile) # Also TODO: Migrate to user_avatar_path(user_profile) + ".png". s3_file_name = file_path bucket_name = settings.S3_AVATAR_BUCKET conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = bucket.get_key(file_path + ".original") image_data = key.get_contents_as_string() resized_avatar = resize_avatar(image_data) # type: ignore # image_data is `bytes`, boto subs are wrong upload_image_to_s3( bucket_name, s3_file_name, "image/png", user_profile, resized_avatar ) def upload_emoji_image(self, emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: content_type = guess_type(emoji_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format( realm_id=user_profile.realm_id, emoji_file_name=emoji_file_name ) image_data = emoji_file.read() resized_image_data = resize_emoji(image_data) upload_image_to_s3( bucket_name, ".".join((emoji_path, "original")), content_type, user_profile, image_data, ) upload_image_to_s3( bucket_name, emoji_path, content_type, user_profile, resized_image_data, ) def get_emoji_url(self, emoji_file_name: str, realm_id: int) -> str: bucket = settings.S3_AVATAR_BUCKET emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(realm_id=realm_id, emoji_file_name=emoji_file_name) return "https://%s.s3.amazonaws.com/%s" % (bucket, emoji_path) ### Local def write_local_file(type: str, path: str, file_data: bytes) -> None: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(file_path, 'wb') as f: f.write(file_data) def read_local_file(type: str, path: str) -> bytes: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) with open(file_path, 'rb') as f: return f.read() def delete_local_file(type: str, path: str) -> bool: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) if os.path.isfile(file_path): # This removes the file but the empty folders still remain. os.remove(file_path) return True file_name = path.split("/")[-1] logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,)) return False def get_local_file_path(path_id: str) -> Optional[str]: local_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id) if os.path.isfile(local_path): return local_path else: return None class LocalUploadBackend(ZulipUploadBackend): def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: # Split into 256 subdirectories to prevent directories from getting too big path = "/".join([ str(user_profile.realm_id), format(random.randint(0, 255), 'x'), random_name(18), sanitize_name(uploaded_file_name) ]) write_local_file('files', path, file_data) create_attachment(uploaded_file_name, path, user_profile, uploaded_file_size) return '/user_uploads/' + path def delete_message_image(self, path_id: str) -> bool: return delete_local_file('files', path_id) def write_avatar_images(self, file_path: str, image_data: bytes) -> None: write_local_file('avatars', file_path + '.original', image_data) resized_data = resize_avatar(image_data) write_local_file('avatars', file_path + '.png', resized_data) resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) write_local_file('avatars', file_path + '-medium.png', resized_medium) def upload_avatar_image(self, user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: file_path = user_avatar_path(target_user_profile) image_data = user_file.read() self.write_avatar_images(file_path, image_data) def delete_avatar_image(self, user: UserProfile) -> None: path_id = user_avatar_path(user) delete_local_file("avatars", path_id + ".original") delete_local_file("avatars", path_id + ".png") delete_local_file("avatars", path_id + "-medium.png") def get_avatar_url(self, hash_key: str, medium: bool=False) -> str: # ?x=x allows templates to append additional parameters with &s medium_suffix = "-medium" if medium else "" return "/user_avatars/%s%s.png?x=x" % (hash_key, medium_suffix) def copy_avatar(self, source_profile: UserProfile, target_profile: UserProfile) -> None: source_file_path = user_avatar_path(source_profile) target_file_path = user_avatar_path(target_profile) image_data = read_local_file('avatars', source_file_path + '.original') self.write_avatar_images(target_file_path, image_data) def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None: upload_path = os.path.join('avatars', str(user_profile.realm.id), 'realm') image_data = icon_file.read() write_local_file( upload_path, 'icon.original', image_data) resized_data = resize_avatar(image_data) write_local_file(upload_path, 'icon.png', resized_data) def get_realm_icon_url(self, realm_id: int, version: int) -> str: # ?x=x allows templates to append additional parameters with &s return "/user_avatars/%s/realm/icon.png?version=%s" % (realm_id, version) def upload_realm_logo_image(self, logo_file: File, user_profile: UserProfile, night: bool) -> None: upload_path = os.path.join('avatars', str(user_profile.realm.id), 'realm') if night: original_file = 'night_logo.original' resized_file = 'night_logo.png' else: original_file = 'logo.original' resized_file = 'logo.png' image_data = logo_file.read() write_local_file( upload_path, original_file, image_data) resized_data = resize_logo(image_data) write_local_file(upload_path, resized_file, resized_data) def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str: # ?x=x allows templates to append additional parameters with &s if night: file_name = 'night_logo.png' else: file_name = 'logo.png' return "/user_avatars/%s/realm/%s?version=%s" % (realm_id, file_name, version) def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None: file_path = user_avatar_path(user_profile) output_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + "-medium.png") if os.path.isfile(output_path): return image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original") image_data = open(image_path, "rb").read() resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) write_local_file('avatars', file_path + '-medium.png', resized_medium) def ensure_basic_avatar_image(self, user_profile: UserProfile) -> None: # nocoverage # TODO: Refactor this to share code with ensure_medium_avatar_image file_path = user_avatar_path(user_profile) output_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".png") if os.path.isfile(output_path): return image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original") image_data = open(image_path, "rb").read() resized_avatar = resize_avatar(image_data) write_local_file('avatars', file_path + '.png', resized_avatar) def upload_emoji_image(self, emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format( realm_id= user_profile.realm_id, emoji_file_name=emoji_file_name ) image_data = emoji_file.read() resized_image_data = resize_emoji(image_data) write_local_file( 'avatars', ".".join((emoji_path, "original")), image_data) write_local_file( 'avatars', emoji_path, resized_image_data) def get_emoji_url(self, emoji_file_name: str, realm_id: int) -> str: return os.path.join( "/user_avatars", RealmEmoji.PATH_ID_TEMPLATE.format(realm_id=realm_id, emoji_file_name=emoji_file_name)) # Common and wrappers if settings.LOCAL_UPLOADS_DIR is not None: upload_backend = LocalUploadBackend() # type: ZulipUploadBackend else: upload_backend = S3UploadBackend() # nocoverage def delete_message_image(path_id: str) -> bool: return upload_backend.delete_message_image(path_id) def upload_avatar_image(user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: upload_backend.upload_avatar_image(user_file, acting_user_profile, target_user_profile) def delete_avatar_image(user_profile: UserProfile) -> None: upload_backend.delete_avatar_image(user_profile) def copy_avatar(source_profile: UserProfile, target_profile: UserProfile) -> None: upload_backend.copy_avatar(source_profile, target_profile) def upload_icon_image(user_file: File, user_profile: UserProfile) -> None: upload_backend.upload_realm_icon_image(user_file, user_profile) def upload_logo_image(user_file: File, user_profile: UserProfile, night: bool) -> None: upload_backend.upload_realm_logo_image(user_file, user_profile, night) def upload_emoji_image(emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: upload_backend.upload_emoji_image(emoji_file, emoji_file_name, user_profile) def upload_message_file(uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: return upload_backend.upload_message_file(uploaded_file_name, uploaded_file_size, content_type, file_data, user_profile, target_realm=target_realm) def claim_attachment(user_profile: UserProfile, path_id: str, message: Message, is_message_realm_public: bool) -> Attachment: attachment = Attachment.objects.get(path_id=path_id) attachment.messages.add(message) attachment.is_realm_public = attachment.is_realm_public or is_message_realm_public attachment.save() return attachment def create_attachment(file_name: str, path_id: str, user_profile: UserProfile, file_size: int) -> bool: attachment = Attachment.objects.create(file_name=file_name, path_id=path_id, owner=user_profile, realm=user_profile.realm, size=file_size) from zerver.lib.actions import notify_attachment_update notify_attachment_update(user_profile, 'add', attachment.to_dict()) return True def upload_message_image_from_request(request: HttpRequest, user_file: File, user_profile: UserProfile) -> str: uploaded_file_name, uploaded_file_size, content_type = get_file_info(request, user_file) return upload_message_file(uploaded_file_name, uploaded_file_size, content_type, user_file.read(), user_profile)
./CrossVul/dataset_final_sorted/CWE-79/py/good_1091_0
crossvul-python_data_good_4097_3
# -*- coding: utf-8 -*- from wagtail.core.models import Page from wagtail.tests.testapp.models import ( FormField, FormFieldWithCustomSubmission, FormPage, FormPageWithCustomSubmission, FormPageWithRedirect, RedirectFormField) def make_form_page(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') form_page = home_page.add_child(instance=FormPage(**kwargs)) FormField.objects.create( page=form_page, sort_order=1, label="Your email", field_type='email', required=True, ) FormField.objects.create( page=form_page, sort_order=2, label="Your message", field_type='multiline', required=True, help_text="<em>please</em> be polite" ) FormField.objects.create( page=form_page, sort_order=3, label="Your choices", field_type='checkboxes', required=False, choices='foo,bar,baz', ) return form_page def make_form_page_with_custom_submission(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('intro', "<p>Boring intro text</p>") kwargs.setdefault('thank_you_text', "<p>Thank you for your patience!</p>") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') form_page = home_page.add_child(instance=FormPageWithCustomSubmission(**kwargs)) FormFieldWithCustomSubmission.objects.create( page=form_page, sort_order=1, label="Your email", field_type='email', required=True, ) FormFieldWithCustomSubmission.objects.create( page=form_page, sort_order=2, label="Your message", field_type='multiline', required=True, ) FormFieldWithCustomSubmission.objects.create( page=form_page, sort_order=3, label="Your choices", field_type='checkboxes', required=False, choices='foo,bar,baz', ) return form_page def make_form_page_with_redirect(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') kwargs.setdefault('thank_you_redirect_page', home_page) form_page = home_page.add_child(instance=FormPageWithRedirect(**kwargs)) # form_page.thank_you_redirect_page = home_page RedirectFormField.objects.create( page=form_page, sort_order=1, label="Your email", field_type='email', required=True, ) RedirectFormField.objects.create( page=form_page, sort_order=2, label="Your message", field_type='multiline', required=True, ) RedirectFormField.objects.create( page=form_page, sort_order=3, label="Your choices", field_type='checkboxes', required=False, choices='foo,bar,baz', ) return form_page def make_types_test_form_page(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') form_page = home_page.add_child(instance=FormPage(**kwargs)) FormField.objects.create( page=form_page, sort_order=1, label="Single line text", field_type='singleline', required=False, ) FormField.objects.create( page=form_page, sort_order=2, label="Multiline", field_type='multiline', required=False, ) FormField.objects.create( page=form_page, sort_order=3, label="Email", field_type='email', required=False, ) FormField.objects.create( page=form_page, sort_order=4, label="Number", field_type='number', required=False, ) FormField.objects.create( page=form_page, sort_order=5, label="URL", field_type='url', required=False, ) FormField.objects.create( page=form_page, sort_order=6, label="Checkbox", field_type='checkbox', required=False, ) FormField.objects.create( page=form_page, sort_order=7, label="Checkboxes", field_type='checkboxes', required=False, choices='foo,bar,baz', ) FormField.objects.create( page=form_page, sort_order=8, label="Drop down", field_type='dropdown', required=False, choices='spam,ham,eggs', ) FormField.objects.create( page=form_page, sort_order=9, label="Multiple select", field_type='multiselect', required=False, choices='qux,quux,quuz,corge', ) FormField.objects.create( page=form_page, sort_order=10, label="Radio buttons", field_type='radio', required=False, choices='wibble,wobble,wubble', ) FormField.objects.create( page=form_page, sort_order=11, label="Date", field_type='date', required=False, ) FormField.objects.create( page=form_page, sort_order=12, label="Datetime", field_type='datetime', required=False, ) return form_page
./CrossVul/dataset_final_sorted/CWE-79/py/good_4097_3
crossvul-python_data_good_5191_3
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os import tempfile from django import forms from django.conf.urls import url from django.contrib import admin from django.contrib.admin import BooleanFieldListFilter from django.contrib.admin.views.main import ChangeList from django.contrib.auth.admin import GroupAdmin, UserAdmin # Register core models we need in our tests from django.contrib.auth.models import Group, User from django.core.exceptions import ValidationError from django.core.files.storage import FileSystemStorage from django.core.mail import EmailMessage from django.core.servers.basehttp import FileWrapper from django.forms.models import BaseModelFormSet from django.http import HttpResponse, StreamingHttpResponse from django.utils.safestring import mark_safe from django.utils.six import StringIO from .models import ( Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField, AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice, City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter, CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel, EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK, ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost, FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery, GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient, InlineReference, InlineReferer, Inquisition, Language, Link, MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber, OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK, Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy, PluggableSearchPerson, Podcast, Post, PrePopulatedPost, PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe, Recommendation, Recommender, ReferencedByGenRel, ReferencedByInline, ReferencedByParent, RelatedPrepopulated, RelatedWithUUIDPKModel, Report, Reservation, Restaurant, RowLevelChangePermissionModel, Section, ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber, SuperVillain, Telegram, Thing, Topping, UnchangeableObject, UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast, Whatsit, Widget, Worker, WorkHour, ) def callable_year(dt_value): try: return dt_value.year except AttributeError: return None callable_year.admin_order_field = 'date' class ArticleInline(admin.TabularInline): model = Article fk_name = 'section' prepopulated_fields = { 'title': ('content',) } fieldsets = ( ('Some fields', { 'classes': ('collapse',), 'fields': ('title', 'content') }), ('Some other fields', { 'classes': ('wide',), 'fields': ('date', 'section') }) ) class ChapterInline(admin.TabularInline): model = Chapter class ChapterXtra1Admin(admin.ModelAdmin): list_filter = ('chap', 'chap__title', 'chap__book', 'chap__book__name', 'chap__book__promo', 'chap__book__promo__name',) class ArticleAdmin(admin.ModelAdmin): list_display = ('content', 'date', callable_year, 'model_year', 'modeladmin_year', 'model_year_reversed', 'section') list_editable = ('section',) list_filter = ('date', 'section') view_on_site = False fieldsets = ( ('Some fields', { 'classes': ('collapse',), 'fields': ('title', 'content') }), ('Some other fields', { 'classes': ('wide',), 'fields': ('date', 'section', 'sub_section') }) ) def changelist_view(self, request): "Test that extra_context works" return super(ArticleAdmin, self).changelist_view( request, extra_context={ 'extra_var': 'Hello!' } ) def modeladmin_year(self, obj): return obj.date.year modeladmin_year.admin_order_field = 'date' modeladmin_year.short_description = None def delete_model(self, request, obj): EmailMessage( 'Greetings from a deleted object', 'I hereby inform you that some user deleted me', 'from@example.com', ['to@example.com'] ).send() return super(ArticleAdmin, self).delete_model(request, obj) def save_model(self, request, obj, form, change=True): EmailMessage( 'Greetings from a created object', 'I hereby inform you that some user created me', 'from@example.com', ['to@example.com'] ).send() return super(ArticleAdmin, self).save_model(request, obj, form, change) class ArticleAdmin2(admin.ModelAdmin): def has_module_permission(self, request): return False class RowLevelChangePermissionModelAdmin(admin.ModelAdmin): def has_change_permission(self, request, obj=None): """ Only allow changing objects with even id number """ return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0) class CustomArticleAdmin(admin.ModelAdmin): """ Tests various hooks for using custom templates and contexts. """ change_list_template = 'custom_admin/change_list.html' change_form_template = 'custom_admin/change_form.html' add_form_template = 'custom_admin/add_form.html' object_history_template = 'custom_admin/object_history.html' delete_confirmation_template = 'custom_admin/delete_confirmation.html' delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html' def changelist_view(self, request): "Test that extra_context works" return super(CustomArticleAdmin, self).changelist_view( request, extra_context={ 'extra_var': 'Hello!' } ) class ThingAdmin(admin.ModelAdmin): list_filter = ('color__warm', 'color__value', 'pub_date',) class InquisitionAdmin(admin.ModelAdmin): list_display = ('leader', 'country', 'expected', 'sketch') def sketch(self, obj): # A method with the same name as a reverse accessor. return 'list-display-sketch' class SketchAdmin(admin.ModelAdmin): raw_id_fields = ('inquisition', 'defendant0', 'defendant1') class FabricAdmin(admin.ModelAdmin): list_display = ('surface',) list_filter = ('surface',) class BasePersonModelFormSet(BaseModelFormSet): def clean(self): for person_dict in self.cleaned_data: person = person_dict.get('id') alive = person_dict.get('alive') if person and alive and person.name == "Grace Hopper": raise forms.ValidationError("Grace is not a Zombie") class PersonAdmin(admin.ModelAdmin): list_display = ('name', 'gender', 'alive') list_editable = ('gender', 'alive') list_filter = ('gender',) search_fields = ('^name',) save_as = True def get_changelist_formset(self, request, **kwargs): return super(PersonAdmin, self).get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs) def get_queryset(self, request): # Order by a field that isn't in list display, to be able to test # whether ordering is preserved. return super(PersonAdmin, self).get_queryset(request).order_by('age') class FooAccountAdmin(admin.StackedInline): model = FooAccount extra = 1 class BarAccountAdmin(admin.StackedInline): model = BarAccount extra = 1 class PersonaAdmin(admin.ModelAdmin): inlines = ( FooAccountAdmin, BarAccountAdmin ) class SubscriberAdmin(admin.ModelAdmin): actions = ['mail_admin'] def mail_admin(self, request, selected): EmailMessage( 'Greetings from a ModelAdmin action', 'This is the test email from an admin action', 'from@example.com', ['to@example.com'] ).send() def external_mail(modeladmin, request, selected): EmailMessage( 'Greetings from a function action', 'This is the test email from a function action', 'from@example.com', ['to@example.com'] ).send() external_mail.short_description = 'External mail (Another awesome action)' def redirect_to(modeladmin, request, selected): from django.http import HttpResponseRedirect return HttpResponseRedirect('/some-where-else/') redirect_to.short_description = 'Redirect to (Awesome action)' def download(modeladmin, request, selected): buf = StringIO('This is the content of the file') return StreamingHttpResponse(FileWrapper(buf)) download.short_description = 'Download subscription' def no_perm(modeladmin, request, selected): return HttpResponse(content='No permission to perform this action', status=403) no_perm.short_description = 'No permission to run' class ExternalSubscriberAdmin(admin.ModelAdmin): actions = [redirect_to, external_mail, download, no_perm] class PodcastAdmin(admin.ModelAdmin): list_display = ('name', 'release_date') list_editable = ('release_date',) date_hierarchy = 'release_date' ordering = ('name',) class VodcastAdmin(admin.ModelAdmin): list_display = ('name', 'released') list_editable = ('released',) ordering = ('name',) class ChildInline(admin.StackedInline): model = Child class ParentAdmin(admin.ModelAdmin): model = Parent inlines = [ChildInline] list_editable = ('name',) def save_related(self, request, form, formsets, change): super(ParentAdmin, self).save_related(request, form, formsets, change) first_name, last_name = form.instance.name.split() for child in form.instance.child_set.all(): if len(child.name.split()) < 2: child.name = child.name + ' ' + last_name child.save() class EmptyModelAdmin(admin.ModelAdmin): def get_queryset(self, request): return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1) class OldSubscriberAdmin(admin.ModelAdmin): actions = None temp_storage = FileSystemStorage(tempfile.mkdtemp()) UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload') class PictureInline(admin.TabularInline): model = Picture extra = 1 class GalleryAdmin(admin.ModelAdmin): inlines = [PictureInline] class PictureAdmin(admin.ModelAdmin): pass class LanguageAdmin(admin.ModelAdmin): list_display = ['iso', 'shortlist', 'english_name', 'name'] list_editable = ['shortlist'] class RecommendationAdmin(admin.ModelAdmin): show_full_result_count = False search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',) class WidgetInline(admin.StackedInline): model = Widget class DooHickeyInline(admin.StackedInline): model = DooHickey class GrommetInline(admin.StackedInline): model = Grommet class WhatsitInline(admin.StackedInline): model = Whatsit class FancyDoodadInline(admin.StackedInline): model = FancyDoodad class CategoryAdmin(admin.ModelAdmin): list_display = ('id', 'collector', 'order') list_editable = ('order',) class CategoryInline(admin.StackedInline): model = Category class CollectorAdmin(admin.ModelAdmin): inlines = [ WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline, FancyDoodadInline, CategoryInline ] class LinkInline(admin.TabularInline): model = Link extra = 1 readonly_fields = ("posted", "multiline", "readonly_link_content") def multiline(self, instance): return "InlineMultiline\ntest\nstring" class SubPostInline(admin.TabularInline): model = PrePopulatedSubPost prepopulated_fields = { 'subslug': ('subtitle',) } def get_readonly_fields(self, request, obj=None): if obj and obj.published: return ('subslug',) return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): if obj and obj.published: return {} return self.prepopulated_fields class PrePopulatedPostAdmin(admin.ModelAdmin): list_display = ['title', 'slug'] prepopulated_fields = { 'slug': ('title',) } inlines = [SubPostInline] def get_readonly_fields(self, request, obj=None): if obj and obj.published: return ('slug',) return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): if obj and obj.published: return {} return self.prepopulated_fields class PostAdmin(admin.ModelAdmin): list_display = ['title', 'public'] readonly_fields = ( 'posted', 'awesomeness_level', 'coolness', 'value', 'multiline', 'multiline_html', lambda obj: "foo", 'readonly_content', ) inlines = [ LinkInline ] def coolness(self, instance): if instance.pk: return "%d amount of cool." % instance.pk else: return "Unknown coolness." def value(self, instance): return 1000 def multiline(self, instance): return "Multiline\ntest\nstring" def multiline_html(self, instance): return mark_safe("Multiline<br>\nhtml<br>\ncontent") multiline_html.allow_tags = True value.short_description = 'Value in $US' class FieldOverridePostForm(forms.ModelForm): model = FieldOverridePost class Meta: help_texts = { 'posted': 'Overridden help text for the date', } labels = { 'public': 'Overridden public label', } class FieldOverridePostAdmin(PostAdmin): form = FieldOverridePostForm class CustomChangeList(ChangeList): def get_queryset(self, request): return self.root_queryset.filter(pk=9999) # Does not exist class GadgetAdmin(admin.ModelAdmin): def get_changelist(self, request, **kwargs): return CustomChangeList class ToppingAdmin(admin.ModelAdmin): readonly_fields = ('pizzas',) class PizzaAdmin(admin.ModelAdmin): readonly_fields = ('toppings',) class WorkHourAdmin(admin.ModelAdmin): list_display = ('datum', 'employee') list_filter = ('employee',) class FoodDeliveryAdmin(admin.ModelAdmin): list_display = ('reference', 'driver', 'restaurant') list_editable = ('driver', 'restaurant') class CoverLetterAdmin(admin.ModelAdmin): """ A ModelAdmin with a custom get_queryset() method that uses defer(), to test verbose_name display in messages shown after adding/editing CoverLetter instances. Note that the CoverLetter model defines a __unicode__ method. For testing fix for ticket #14529. """ def get_queryset(self, request): return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written') class PaperAdmin(admin.ModelAdmin): """ A ModelAdmin with a custom get_queryset() method that uses only(), to test verbose_name display in messages shown after adding/editing Paper instances. For testing fix for ticket #14529. """ def get_queryset(self, request): return super(PaperAdmin, self).get_queryset(request).only('title') class ShortMessageAdmin(admin.ModelAdmin): """ A ModelAdmin with a custom get_queryset() method that uses defer(), to test verbose_name display in messages shown after adding/editing ShortMessage instances. For testing fix for ticket #14529. """ def get_queryset(self, request): return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp') class TelegramAdmin(admin.ModelAdmin): """ A ModelAdmin with a custom get_queryset() method that uses only(), to test verbose_name display in messages shown after adding/editing Telegram instances. Note that the Telegram model defines a __unicode__ method. For testing fix for ticket #14529. """ def get_queryset(self, request): return super(TelegramAdmin, self).get_queryset(request).only('title') class StoryForm(forms.ModelForm): class Meta: widgets = {'title': forms.HiddenInput} class StoryAdmin(admin.ModelAdmin): list_display = ('id', 'title', 'content') list_display_links = ('title',) # 'id' not in list_display_links list_editable = ('content', ) form = StoryForm ordering = ["-pk"] class OtherStoryAdmin(admin.ModelAdmin): list_display = ('id', 'title', 'content') list_display_links = ('title', 'id') # 'id' in list_display_links list_editable = ('content', ) ordering = ["-pk"] class ComplexSortedPersonAdmin(admin.ModelAdmin): list_display = ('name', 'age', 'is_employee', 'colored_name') ordering = ('name',) def colored_name(self, obj): return '<span style="color: #%s;">%s</span>' % ('ff00ff', obj.name) colored_name.allow_tags = True colored_name.admin_order_field = 'name' class PluggableSearchPersonAdmin(admin.ModelAdmin): list_display = ('name', 'age') search_fields = ('name',) def get_search_results(self, request, queryset, search_term): queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(request, queryset, search_term) try: search_term_as_int = int(search_term) queryset |= self.model.objects.filter(age=search_term_as_int) except: pass return queryset, use_distinct class AlbumAdmin(admin.ModelAdmin): list_filter = ['title'] class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin): prepopulated_fields = { 'slug': ('title',) } class AdminOrderedFieldAdmin(admin.ModelAdmin): ordering = ('order',) list_display = ('stuff', 'order') class AdminOrderedModelMethodAdmin(admin.ModelAdmin): ordering = ('order',) list_display = ('stuff', 'some_order') class AdminOrderedAdminMethodAdmin(admin.ModelAdmin): def some_admin_order(self, obj): return obj.order some_admin_order.admin_order_field = 'order' ordering = ('order',) list_display = ('stuff', 'some_admin_order') def admin_ordered_callable(obj): return obj.order admin_ordered_callable.admin_order_field = 'order' class AdminOrderedCallableAdmin(admin.ModelAdmin): ordering = ('order',) list_display = ('stuff', admin_ordered_callable) class ReportAdmin(admin.ModelAdmin): def extra(self, request): return HttpResponse() def get_urls(self): # Corner case: Don't call parent implementation return [ url(r'^extra/$', self.extra, name='cable_extra'), ] class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter): template = 'custom_filter_template.html' class CustomTemplateFilterColorAdmin(admin.ModelAdmin): list_filter = (('warm', CustomTemplateBooleanFieldListFilter),) # For Selenium Prepopulated tests ------------------------------------- class RelatedPrepopulatedInline1(admin.StackedInline): fieldsets = ( (None, { 'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),) }), ) model = RelatedPrepopulated extra = 1 prepopulated_fields = {'slug1': ['name', 'pubdate'], 'slug2': ['status', 'name']} class RelatedPrepopulatedInline2(admin.TabularInline): model = RelatedPrepopulated extra = 1 prepopulated_fields = {'slug1': ['name', 'pubdate'], 'slug2': ['status', 'name']} class MainPrepopulatedAdmin(admin.ModelAdmin): inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2] fieldsets = ( (None, { 'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),) }), ) prepopulated_fields = {'slug1': ['name', 'pubdate'], 'slug2': ['status', 'name']} class UnorderedObjectAdmin(admin.ModelAdmin): list_display = ['name'] list_editable = ['name'] list_per_page = 2 class UndeletableObjectAdmin(admin.ModelAdmin): def change_view(self, *args, **kwargs): kwargs['extra_context'] = {'show_delete': False} return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs) class UnchangeableObjectAdmin(admin.ModelAdmin): def get_urls(self): # Disable change_view, but leave other urls untouched urlpatterns = super(UnchangeableObjectAdmin, self).get_urls() return [p for p in urlpatterns if not p.name.endswith("_change")] def callable_on_unknown(obj): return obj.unknown class AttributeErrorRaisingAdmin(admin.ModelAdmin): list_display = [callable_on_unknown, ] class CustomManagerAdmin(admin.ModelAdmin): def get_queryset(self, request): return FilteredManager.objects class MessageTestingAdmin(admin.ModelAdmin): actions = ["message_debug", "message_info", "message_success", "message_warning", "message_error", "message_extra_tags"] def message_debug(self, request, selected): self.message_user(request, "Test debug", level="debug") def message_info(self, request, selected): self.message_user(request, "Test info", level="info") def message_success(self, request, selected): self.message_user(request, "Test success", level="success") def message_warning(self, request, selected): self.message_user(request, "Test warning", level="warning") def message_error(self, request, selected): self.message_user(request, "Test error", level="error") def message_extra_tags(self, request, selected): self.message_user(request, "Test tags", extra_tags="extra_tag") class ChoiceList(admin.ModelAdmin): list_display = ['choice'] readonly_fields = ['choice'] fields = ['choice'] class DependentChildAdminForm(forms.ModelForm): """ Issue #20522 Form to test child dependency on parent object's validation """ def clean(self): parent = self.cleaned_data.get('parent') if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'): raise ValidationError("Children must share a family name with their parents " + "in this contrived test case") return super(DependentChildAdminForm, self).clean() class DependentChildInline(admin.TabularInline): model = DependentChild form = DependentChildAdminForm class ParentWithDependentChildrenAdmin(admin.ModelAdmin): inlines = [DependentChildInline] # Tests for ticket 11277 ---------------------------------- class FormWithoutHiddenField(forms.ModelForm): first = forms.CharField() second = forms.CharField() class FormWithoutVisibleField(forms.ModelForm): first = forms.CharField(widget=forms.HiddenInput) second = forms.CharField(widget=forms.HiddenInput) class FormWithVisibleAndHiddenField(forms.ModelForm): first = forms.CharField(widget=forms.HiddenInput) second = forms.CharField() class EmptyModelVisibleAdmin(admin.ModelAdmin): form = FormWithoutHiddenField fieldsets = ( (None, { 'fields': (('first', 'second'),), }), ) class EmptyModelHiddenAdmin(admin.ModelAdmin): form = FormWithoutVisibleField fieldsets = EmptyModelVisibleAdmin.fieldsets class EmptyModelMixinAdmin(admin.ModelAdmin): form = FormWithVisibleAndHiddenField fieldsets = EmptyModelVisibleAdmin.fieldsets class CityInlineAdmin(admin.TabularInline): model = City view_on_site = False class StateAdmin(admin.ModelAdmin): inlines = [CityInlineAdmin] class RestaurantInlineAdmin(admin.TabularInline): model = Restaurant view_on_site = True class CityAdmin(admin.ModelAdmin): inlines = [RestaurantInlineAdmin] view_on_site = True class WorkerAdmin(admin.ModelAdmin): def view_on_site(self, obj): return '/worker/%s/%s/' % (obj.surname, obj.name) class WorkerInlineAdmin(admin.TabularInline): model = Worker def view_on_site(self, obj): return '/worker_inline/%s/%s/' % (obj.surname, obj.name) class RestaurantAdmin(admin.ModelAdmin): inlines = [WorkerInlineAdmin] view_on_site = False def get_changeform_initial_data(self, request): return {'name': 'overridden_value'} class FunkyTagAdmin(admin.ModelAdmin): list_display = ('name', 'content_object') class InlineReferenceInline(admin.TabularInline): model = InlineReference class InlineRefererAdmin(admin.ModelAdmin): inlines = [InlineReferenceInline] class PlotReadonlyAdmin(admin.ModelAdmin): readonly_fields = ('plotdetails',) class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin): fields = ['name'] def add_view(self, request, *args, **kwargs): request.is_add_view = True return super(GetFormsetsArgumentCheckingAdmin, self).add_view(request, *args, **kwargs) def change_view(self, request, *args, **kwargs): request.is_add_view = False return super(GetFormsetsArgumentCheckingAdmin, self).change_view(request, *args, **kwargs) def get_formsets_with_inlines(self, request, obj=None): if request.is_add_view and obj is not None: raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view") if not request.is_add_view and obj is None: raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view") return super(GetFormsetsArgumentCheckingAdmin, self).get_formsets_with_inlines(request, obj) site = admin.AdminSite(name="admin") site.site_url = '/my-site-url/' site.register(Article, ArticleAdmin) site.register(CustomArticle, CustomArticleAdmin) site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property']) site.register(ModelWithStringPrimaryKey) site.register(Color) site.register(Thing, ThingAdmin) site.register(Actor) site.register(Inquisition, InquisitionAdmin) site.register(Sketch, SketchAdmin) site.register(Person, PersonAdmin) site.register(Persona, PersonaAdmin) site.register(Subscriber, SubscriberAdmin) site.register(ExternalSubscriber, ExternalSubscriberAdmin) site.register(OldSubscriber, OldSubscriberAdmin) site.register(Podcast, PodcastAdmin) site.register(Vodcast, VodcastAdmin) site.register(Parent, ParentAdmin) site.register(EmptyModel, EmptyModelAdmin) site.register(Fabric, FabricAdmin) site.register(Gallery, GalleryAdmin) site.register(Picture, PictureAdmin) site.register(Language, LanguageAdmin) site.register(Recommendation, RecommendationAdmin) site.register(Recommender) site.register(Collector, CollectorAdmin) site.register(Category, CategoryAdmin) site.register(Post, PostAdmin) site.register(FieldOverridePost, FieldOverridePostAdmin) site.register(Gadget, GadgetAdmin) site.register(Villain) site.register(SuperVillain) site.register(Plot) site.register(PlotDetails) site.register(PlotProxy, PlotReadonlyAdmin) site.register(CyclicOne) site.register(CyclicTwo) site.register(WorkHour, WorkHourAdmin) site.register(Reservation) site.register(FoodDelivery, FoodDeliveryAdmin) site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin) site.register(Paper, PaperAdmin) site.register(CoverLetter, CoverLetterAdmin) site.register(ShortMessage, ShortMessageAdmin) site.register(Telegram, TelegramAdmin) site.register(Story, StoryAdmin) site.register(OtherStory, OtherStoryAdmin) site.register(Report, ReportAdmin) site.register(MainPrepopulated, MainPrepopulatedAdmin) site.register(UnorderedObject, UnorderedObjectAdmin) site.register(UndeletableObject, UndeletableObjectAdmin) site.register(UnchangeableObject, UnchangeableObjectAdmin) site.register(State, StateAdmin) site.register(City, CityAdmin) site.register(Restaurant, RestaurantAdmin) site.register(Worker, WorkerAdmin) site.register(FunkyTag, FunkyTagAdmin) site.register(ReferencedByParent) site.register(ChildOfReferer) site.register(ReferencedByInline) site.register(InlineReferer, InlineRefererAdmin) site.register(ReferencedByGenRel) site.register(GenRelReference) # We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2. # That way we cover all four cases: # related ForeignKey object registered in admin # related ForeignKey object not registered in admin # related OneToOne object registered in admin # related OneToOne object not registered in admin # when deleting Book so as exercise all four troublesome (w.r.t escaping # and calling force_text to avoid problems on Python 2.3) paths through # contrib.admin.utils's get_deleted_objects function. site.register(Book, inlines=[ChapterInline]) site.register(Promo) site.register(ChapterXtra1, ChapterXtra1Admin) site.register(Pizza, PizzaAdmin) site.register(Topping, ToppingAdmin) site.register(Album, AlbumAdmin) site.register(Question) site.register(Answer) site.register(PrePopulatedPost, PrePopulatedPostAdmin) site.register(ComplexSortedPerson, ComplexSortedPersonAdmin) site.register(FilteredManager, CustomManagerAdmin) site.register(PluggableSearchPerson, PluggableSearchPersonAdmin) site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin) site.register(AdminOrderedField, AdminOrderedFieldAdmin) site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin) site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin) site.register(AdminOrderedCallable, AdminOrderedCallableAdmin) site.register(Color2, CustomTemplateFilterColorAdmin) site.register(Simple, AttributeErrorRaisingAdmin) site.register(UserMessenger, MessageTestingAdmin) site.register(Choice, ChoiceList) site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin) site.register(EmptyModelHidden, EmptyModelHiddenAdmin) site.register(EmptyModelVisible, EmptyModelVisibleAdmin) site.register(EmptyModelMixin, EmptyModelMixinAdmin) site.register(StumpJoke) site.register(Recipe) site.register(Ingredient) site.register(NotReferenced) site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin) site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin) site.register(User, UserAdmin) site.register(Group, GroupAdmin) # Used to test URL namespaces site2 = admin.AdminSite(name="namespaced_admin") site2.register(User, UserAdmin) site2.register(Group, GroupAdmin) site2.register(ParentWithUUIDPK) site2.register( RelatedWithUUIDPKModel, list_display=['pk', 'parent'], list_editable=['parent'], raw_id_fields=['parent'], ) site7 = admin.AdminSite(name="admin7") site7.register(Article, ArticleAdmin2)
./CrossVul/dataset_final_sorted/CWE-79/py/good_5191_3
crossvul-python_data_bad_2103_9
404: Not Found
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2103_9
crossvul-python_data_good_3890_0
import difflib from bs4 import BeautifulSoup from django.utils.encoding import force_str from django.utils.html import escape, format_html, format_html_join from django.utils.safestring import mark_safe from django.utils.text import capfirst from django.utils.translation import ugettext_lazy as _ from wagtail.core import blocks def text_from_html(val): # Return the unescaped text content of an HTML string return BeautifulSoup(force_str(val), 'html5lib').getText() class FieldComparison: is_field = True is_child_relation = False def __init__(self, field, obj_a, obj_b): self.field = field self.val_a = field.value_from_object(obj_a) self.val_b = field.value_from_object(obj_b) def field_label(self): """ Returns a label for this field to be displayed to the user """ verbose_name = getattr(self.field, 'verbose_name', None) if verbose_name is None: # Relations don't have a verbose_name verbose_name = self.field.name.replace('_', ' ') return capfirst(verbose_name) def htmldiff(self): if self.val_a != self.val_b: return TextDiff([('deletion', self.val_a), ('addition', self.val_b)]).to_html() else: return escape(self.val_a) def has_changed(self): """ Returns True if the field has changed """ return self.val_a != self.val_b class TextFieldComparison(FieldComparison): def htmldiff(self): return diff_text(self.val_a, self.val_b).to_html() class RichTextFieldComparison(TextFieldComparison): def htmldiff(self): return diff_text( text_from_html(self.val_a), text_from_html(self.val_b) ).to_html() def get_comparison_class_for_block(block): if hasattr(block, 'get_comparison_class'): return block.get_comparison_class() elif isinstance(block, (blocks.CharBlock, blocks.TextBlock)): return CharBlockComparison elif isinstance(block, blocks.RawHTMLBlock): # Compare raw HTML blocks as if they were plain text, so that tags are shown explicitly return CharBlockComparison elif isinstance(block, blocks.RichTextBlock): return RichTextBlockComparison elif isinstance(block, blocks.StructBlock): return StructBlockComparison else: # As all stream field blocks have a HTML representation, fall back to diffing that. return RichTextBlockComparison class BlockComparison: def __init__(self, block, exists_a, exists_b, val_a, val_b): self.block = block self.exists_a = exists_a self.exists_b = exists_b self.val_a = val_a self.val_b = val_b def is_new(self): return self.exists_b and not self.exists_a def is_deleted(self): return self.exists_a and not self.exists_b def has_changed(self): return self.val_a != self.val_b def htmlvalue(self, val): """ Return an HTML representation of this block that is safe to be included in comparison views """ return escape(text_from_html(self.block.render_basic(val))) def htmldiff(self): html_val_a = self.block.render_basic(self.val_a) html_val_b = self.block.render_basic(self.val_b) return diff_text( text_from_html(html_val_a), text_from_html(html_val_b) ).to_html() class CharBlockComparison(BlockComparison): def htmldiff(self): return diff_text( force_str(self.val_a), force_str(self.val_b) ).to_html() def htmlvalue(self, val): return escape(val) class RichTextBlockComparison(BlockComparison): pass class StructBlockComparison(BlockComparison): def htmlvalue(self, val): htmlvalues = [] for name, block in self.block.child_blocks.items(): label = self.block.child_blocks[name].label comparison_class = get_comparison_class_for_block(block) htmlvalues.append((label, comparison_class(block, True, True, val[name], val[name]).htmlvalue(val[name]))) return format_html('<dl>\n{}\n</dl>', format_html_join( '\n', ' <dt>{}</dt>\n <dd>{}</dd>', htmlvalues)) def htmldiff(self): htmldiffs = [] for name, block in self.block.child_blocks.items(): label = self.block.child_blocks[name].label comparison_class = get_comparison_class_for_block(block) htmldiffs.append((label, comparison_class(block, self.exists_a, self.exists_b, self.val_a[name], self.val_b[name]).htmldiff())) return format_html('<dl>\n{}\n</dl>', format_html_join( '\n', ' <dt>{}</dt>\n <dd>{}</dd>', htmldiffs)) class StreamBlockComparison(BlockComparison): def get_block_comparisons(self): a_blocks = list(self.val_a) or [] b_blocks = list(self.val_b) or [] a_blocks_by_id = {block.id: block for block in a_blocks} b_blocks_by_id = {block.id: block for block in b_blocks} deleted_ids = a_blocks_by_id.keys() - b_blocks_by_id.keys() comparisons = [] for block in b_blocks: comparison_class = get_comparison_class_for_block(block.block) if block.id in a_blocks_by_id: # Changed/existing block comparisons.append(comparison_class(block.block, True, True, a_blocks_by_id[block.id].value, block.value)) else: # New block comparisons.append(comparison_class(block.block, False, True, None, block.value)) # Insert deleted blocks at the index where they used to be deleted_block_indices = [(block, i) for i, block in enumerate(a_blocks) if block.id in deleted_ids] for block, index in deleted_block_indices: comparison_class = get_comparison_class_for_block(block.block) comparison_to_insert = comparison_class(block.block, True, False, block.value, None) # Insert the block back in where it was before it was deleted. # Note: we need to account for new blocks when finding the position. current_index = 0 block_inserted = False for i, comparison in enumerate(comparisons): if comparison.is_new(): continue if current_index == index: comparisons.insert(i, comparison_to_insert) block_inserted = True break current_index += 1 # Deleted block was from the end if not block_inserted: comparisons.append(comparison_to_insert) return comparisons def htmldiff(self): comparisons_html = [] for comparison in self.get_block_comparisons(): classes = ['comparison__child-object'] if comparison.is_new(): classes.append('addition') block_rendered = comparison.htmlvalue(comparison.val_b) elif comparison.is_deleted(): classes.append('deletion') block_rendered = comparison.htmlvalue(comparison.val_a) elif comparison.has_changed(): block_rendered = comparison.htmldiff() else: block_rendered = comparison.htmlvalue(comparison.val_a) classes = ' '.join(classes) comparisons_html.append('<div class="{0}">{1}</div>'.format(classes, block_rendered)) return mark_safe('\n'.join(comparisons_html)) class StreamFieldComparison(FieldComparison): def has_block_ids(self, val): if not val: return True return bool(val[0].id) def htmldiff(self): # Our method for diffing streamfields relies on the blocks in both revisions having UUIDs. # But as UUIDs were added in Wagtail 1.11 we can't compare revisions that were created before # that Wagtail version. if self.has_block_ids(self.val_a) and self.has_block_ids(self.val_b): return StreamBlockComparison(self.field.stream_block, True, True, self.val_a, self.val_b).htmldiff() else: # Fall back to diffing the HTML representation return diff_text( text_from_html(self.val_a), text_from_html(self.val_b) ).to_html() class ChoiceFieldComparison(FieldComparison): def htmldiff(self): val_a = force_str(dict(self.field.flatchoices).get(self.val_a, self.val_a), strings_only=True) val_b = force_str(dict(self.field.flatchoices).get(self.val_b, self.val_b), strings_only=True) if self.val_a != self.val_b: diffs = [] if val_a: diffs += [('deletion', val_a)] if val_b: diffs += [('addition', val_b)] return TextDiff(diffs).to_html() else: return escape(val_a) class M2MFieldComparison(FieldComparison): def get_items(self): return list(self.val_a), list(self.val_b) def get_item_display(self, item): return str(item) def htmldiff(self): # Get tags items_a, items_b = self.get_items() # Calculate changes sm = difflib.SequenceMatcher(0, items_a, items_b) changes = [] for op, i1, i2, j1, j2 in sm.get_opcodes(): if op == 'replace': for item in items_a[i1:i2]: changes.append(('deletion', self.get_item_display(item))) for item in items_b[j1:j2]: changes.append(('addition', self.get_item_display(item))) elif op == 'delete': for item in items_a[i1:i2]: changes.append(('deletion', self.get_item_display(item))) elif op == 'insert': for item in items_b[j1:j2]: changes.append(('addition', self.get_item_display(item))) elif op == 'equal': for item in items_a[i1:i2]: changes.append(('equal', self.get_item_display(item))) # Convert changelist to HTML return TextDiff(changes, separator=", ").to_html() def has_changed(self): items_a, items_b = self.get_items() return items_a != items_b class TagsFieldComparison(M2MFieldComparison): def get_item_display(self, tag): return tag.slug class ForeignObjectComparison(FieldComparison): def get_objects(self): model = self.field.related_model obj_a = model.objects.filter(pk=self.val_a).first() obj_b = model.objects.filter(pk=self.val_b).first() return obj_a, obj_b def htmldiff(self): obj_a, obj_b = self.get_objects() if obj_a != obj_b: if obj_a and obj_b: # Changed return TextDiff([('deletion', force_str(obj_a)), ('addition', force_str(obj_b))]).to_html() elif obj_b: # Added return TextDiff([('addition', force_str(obj_b))]).to_html() elif obj_a: # Removed return TextDiff([('deletion', force_str(obj_a))]).to_html() else: if obj_a: return escape(force_str(obj_a)) else: return mark_safe(_("None")) class ChildRelationComparison: is_field = False is_child_relation = True def __init__(self, field, field_comparisons, obj_a, obj_b): self.field = field self.field_comparisons = field_comparisons self.val_a = getattr(obj_a, field.related_name) self.val_b = getattr(obj_b, field.related_name) def field_label(self): """ Returns a label for this field to be displayed to the user """ verbose_name = getattr(self.field, 'verbose_name', None) if verbose_name is None: # Relations don't have a verbose_name verbose_name = self.field.name.replace('_', ' ') return capfirst(verbose_name) def get_mapping(self, objs_a, objs_b): """ This bit of code attempts to match the objects in the A revision with their counterpart in the B revision. A match is firstly attempted by PK (where a matching ID indicates they're the same). We compare remaining the objects by their field data; the objects with the fewest fields changed are matched until there are no more possible matches left. This returns 4 values: - map_forwards => a mapping of object indexes from the B version to the A version - map_backwards => a mapping of object indexes from the A version to the B version - added => a list of indices for objects that didn't exist in the B version - deleted => a list of indices for objects that didn't exist in the A version Note the indices are 0-based array indices indicating the location of the object in either the objs_a or objs_b arrays. For example: objs_a => A, B, C, D objs_b => B, C, D, E Will return the following: map_forwards = { 1: 0, # B (objs_a: objs_b) 2: 1, # C (objs_a: objs_b) 3: 2, # D (objs_a: objs_b) } map_backwards = { 0: 1, # B (objs_b: objs_a) 1: 2, # C (objs_b: objs_a) 2: 3, # D (objs_b: objs_a) } added = [4] # D in objs_b deleted = [0] # A in objs_a """ map_forwards = {} map_backwards = {} added = [] deleted = [] # Match child objects on PK (ID) for a_idx, a_child in enumerate(objs_a): for b_idx, b_child in enumerate(objs_b): if b_idx in map_backwards: continue if a_child.pk is not None and b_child.pk is not None and a_child.pk == b_child.pk: map_forwards[a_idx] = b_idx map_backwards[b_idx] = a_idx # Now try to match them by data matches = [] for a_idx, a_child in enumerate(objs_a): if a_idx not in map_forwards: for b_idx, b_child in enumerate(objs_b): if b_idx not in map_backwards: # If they both have a PK (ID) that is different, they can't be the same child object if a_child.pk and b_child.pk and a_child.pk != b_child.pk: continue comparison = self.get_child_comparison(objs_a[a_idx], objs_b[b_idx]) num_differences = comparison.get_num_differences() matches.append((a_idx, b_idx, num_differences)) # Objects with the least differences will be matched first. So only the best possible matches are made matches.sort(key=lambda match: match[2]) for a_idx, b_idx, num_differences in matches: # Make sure both objects were not matched previously if a_idx in map_forwards or b_idx in map_backwards: continue # Match! map_forwards[a_idx] = b_idx map_backwards[b_idx] = a_idx # Mark unmapped objects as added/deleted for a_idx, a_child in enumerate(objs_a): if a_idx not in map_forwards: deleted.append(a_idx) for b_idx, b_child in enumerate(objs_b): if b_idx not in map_backwards: added.append(b_idx) return map_forwards, map_backwards, added, deleted def get_child_comparison(self, obj_a, obj_b): return ChildObjectComparison(self.field.related_model, self.field_comparisons, obj_a, obj_b) def get_child_comparisons(self): """ Returns a list of ChildObjectComparison objects. Representing all child objects that existed in either version. They are returned in the order they appear in the B version with deletions appended at the end. All child objects are returned, regardless of whether they were actually changed. """ objs_a = list(self.val_a.all()) objs_b = list(self.val_b.all()) map_forwards, map_backwards, added, deleted = self.get_mapping(objs_a, objs_b) objs_a = dict(enumerate(objs_a)) objs_b = dict(enumerate(objs_b)) comparisons = [] for b_idx, b_child in objs_b.items(): if b_idx in added: comparisons.append(self.get_child_comparison(None, b_child)) else: comparisons.append(self.get_child_comparison(objs_a[map_backwards[b_idx]], b_child)) for a_idx, a_child in objs_a.items(): if a_idx in deleted: comparisons.append(self.get_child_comparison(a_child, None)) return comparisons def has_changed(self): """ Returns true if any changes were made to any of the child objects. This includes adding, deleting and reordering. """ objs_a = list(self.val_a.all()) objs_b = list(self.val_b.all()) map_forwards, map_backwards, added, deleted = self.get_mapping(objs_a, objs_b) if added or deleted: return True for a_idx, b_idx in map_forwards.items(): comparison = self.get_child_comparison(objs_a[a_idx], objs_b[b_idx]) if comparison.has_changed(): return True return False class ChildObjectComparison: def __init__(self, model, field_comparisons, obj_a, obj_b): self.model = model self.field_comparisons = field_comparisons self.obj_a = obj_a self.obj_b = obj_b def is_addition(self): """ Returns True if this child object was created since obj_a """ return self.obj_b and not self.obj_a def is_deletion(self): """ Returns True if this child object was deleted in obj_b """ return self.obj_a and not self.obj_b def get_position_change(self): """ Returns the change in position as an integer. Positive if the object was moved down, negative if it moved up. For example: '3' indicates the object moved down three spaces. '-1' indicates the object moved up one space. """ if not self.is_addition() and not self.is_deletion(): sort_a = getattr(self.obj_a, 'sort_order', 0) or 0 sort_b = getattr(self.obj_b, 'sort_order', 0) or 0 return sort_b - sort_a def get_field_comparisons(self): """ Returns a list of comparisons for all the fields in this object. Fields that haven't changed are included as well. """ comparisons = [] if self.is_addition() or self.is_deletion(): # Display the fields without diff as one of the versions are missing obj = self.obj_a or self.obj_b for field_comparison in self.field_comparisons: comparisons.append(field_comparison(obj, obj)) else: for field_comparison in self.field_comparisons: comparisons.append(field_comparison(self.obj_a, self.obj_b)) return comparisons def has_changed(self): for comparison in self.get_field_comparisons(): if comparison.has_changed(): return True return False def get_num_differences(self): """ Returns the number of fields that differ between the two objects. """ num_differences = 0 for comparison in self.get_field_comparisons(): if comparison.has_changed(): num_differences += 1 return num_differences class TextDiff: def __init__(self, changes, separator=""): self.changes = changes self.separator = separator def to_html(self, tag='span', addition_class='addition', deletion_class='deletion'): html = [] for change_type, value in self.changes: if change_type == 'equal': html.append(escape(value)) elif change_type == 'addition': html.append('<{tag} class="{classname}">{value}</{tag}>'.format( tag=tag, classname=addition_class, value=escape(value) )) elif change_type == 'deletion': html.append('<{tag} class="{classname}">{value}</{tag}>'.format( tag=tag, classname=deletion_class, value=escape(value) )) return mark_safe(self.separator.join(html)) def diff_text(a, b): """ Performs a diffing algorithm on two pieces of text. Returns a string of HTML containing the content of both texts with <span> tags inserted indicating where the differences are. """ def tokenise(text): """ Tokenises a string by spliting it into individual characters and grouping the alphanumeric ones together. This means that punctuation, whitespace, CJK characters, etc become separate tokens and words/numbers are merged together to form bigger tokens. This makes the output of the diff easier to read as words are not broken up. """ tokens = [] current_token = "" for c in text or "": if c.isalnum(): current_token += c else: if current_token: tokens.append(current_token) current_token = "" tokens.append(c) if current_token: tokens.append(current_token) return tokens a_tok = tokenise(a) b_tok = tokenise(b) sm = difflib.SequenceMatcher(lambda t: len(t) <= 4, a_tok, b_tok) changes = [] for op, i1, i2, j1, j2 in sm.get_opcodes(): if op == 'replace': for token in a_tok[i1:i2]: changes.append(('deletion', token)) for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'delete': for token in a_tok[i1:i2]: changes.append(('deletion', token)) elif op == 'insert': for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'equal': for token in a_tok[i1:i2]: changes.append(('equal', token)) # Merge ajacent changes which have the same type. This just cleans up the HTML a bit merged_changes = [] current_value = [] current_change_type = None for change_type, value in changes: if change_type != current_change_type: if current_change_type is not None: merged_changes.append((current_change_type, ''.join(current_value))) current_value = [] current_change_type = change_type current_value.append(value) if current_value: merged_changes.append((current_change_type, ''.join(current_value))) return TextDiff(merged_changes)
./CrossVul/dataset_final_sorted/CWE-79/py/good_3890_0
crossvul-python_data_good_3530_2
from django import template from django.conf import settings from django.db.models import Q from django.template import NodeList, TemplateSyntaxError from django.template.loader import render_to_string from django.utils import simplejson from django.utils.html import escape from django.utils.translation import ugettext_lazy as _ from djblets.util.decorators import basictag, blocktag from djblets.util.misc import get_object_or_none from djblets.util.templatetags.djblets_utils import humanize_list from reviewboard.accounts.models import Profile from reviewboard.diffviewer.models import DiffSet from reviewboard.reviews.models import Comment, Group, ReviewRequest, \ ScreenshotComment register = template.Library() @register.tag @blocktag def forcomment(context, nodelist, filediff, review=None): """ Loops over a list of comments beloning to a filediff. This will populate a special ``comment`` variable for use in the content. This is of the type :model:`reviews.Comment`. """ new_nodelist = NodeList() context.push() if not review: comments = filediff.comments.all() else: comments = filediff.comments.filter(review=review) for comment in comments: context['comment'] = comment for node in nodelist: new_nodelist.append(node.render(context)) context.pop() return new_nodelist.render(context) @register.tag @blocktag def ifneatnumber(context, nodelist, rid): """ Returns whether or not the specified number is a "neat" number. This is a number with a special property, such as being a palindrome or having trailing zeroes. If the number is a neat number, the contained content is rendered, and two variables, ``milestone`` and ``palindrome`` are defined. """ if rid == None or rid < 1000: return "" ridstr = str(rid) interesting = False context.push() context['milestone'] = False context['palindrome'] = False if rid >= 1000: trailing = ridstr[1:] if trailing == "0" * len(trailing): context['milestone'] = True interesting = True if not interesting: if ridstr == ''.join(reversed(ridstr)): context['palindrome'] = True interesting = True if not interesting: context.pop() return "" s = nodelist.render(context) context.pop() return s @register.tag @basictag(takes_context=True) def commentcounts(context, filediff, interfilediff=None): """ Returns a JSON array of current comments for a filediff, sorted by line number. Each entry in the array has a dictionary containing the following keys: =========== ================================================== Key Description =========== ================================================== comment_id The ID of the comment text The text of the comment line The first line number num_lines The number of lines this comment spans user A dictionary containing "username" and "name" keys for the user url The URL to the comment localdraft True if this is the current user's draft comment =========== ================================================== """ comment_dict = {} user = context.get('user', None) if interfilediff: query = Comment.objects.filter(filediff=filediff, interfilediff=interfilediff) else: query = Comment.objects.filter(filediff=filediff, interfilediff__isnull=True) for comment in query: review = get_object_or_none(comment.review) if review and (review.public or review.user == user): key = (comment.first_line, comment.num_lines) comment_dict.setdefault(key, []).append({ 'comment_id': comment.id, 'text': escape(comment.text), 'line': comment.first_line, 'num_lines': comment.num_lines, 'user': { 'username': review.user.username, 'name': review.user.get_full_name() or review.user.username, }, #'timestamp': comment.timestamp, 'url': comment.get_review_url(), 'localdraft': review.user == user and \ not review.public, }) comments_array = [] for key, value in comment_dict.iteritems(): comments_array.append({ 'linenum': key[0], 'num_lines': key[1], 'comments': value, }) comments_array.sort(cmp=lambda x, y: cmp(x['linenum'], y['linenum'] or cmp(x['num_lines'], y['num_lines']))) return simplejson.dumps(comments_array) @register.tag @basictag(takes_context=True) def screenshotcommentcounts(context, screenshot): """ Returns a JSON array of current comments for a screenshot. Each entry in the array has a dictionary containing the following keys: =========== ================================================== Key Description =========== ================================================== text The text of the comment localdraft True if this is the current user's draft comment x The X location of the comment's region y The Y location of the comment's region w The width of the comment's region h The height of the comment's region =========== ================================================== """ comments = {} user = context.get('user', None) for comment in screenshot.comments.all(): review = get_object_or_none(comment.review) if review and (review.public or review.user == user): position = '%dx%d+%d+%d' % (comment.w, comment.h, \ comment.x, comment.y) comments.setdefault(position, []).append({ 'id': comment.id, 'text': escape(comment.text), 'user': { 'username': review.user.username, 'name': review.user.get_full_name() or review.user.username, }, 'url': comment.get_review_url(), 'localdraft' : review.user == user and \ not review.public, 'x' : comment.x, 'y' : comment.y, 'w' : comment.w, 'h' : comment.h, }) return simplejson.dumps(comments) @register.tag @basictag(takes_context=True) def reply_list(context, review, comment, context_type, context_id): """ Renders a list of comments of a specified type. This is a complex, confusing function accepts lots of inputs in order to display replies to a type of object. In each case, the replies will be rendered using the template :template:`reviews/review_reply.html`. If ``context_type`` is ``"comment"`` or ``"screenshot_comment"``, the generated list of replies are to ``comment``. If ``context_type`` is ``"body_top"`` or ```"body_bottom"``, the generated list of replies are to ``review``. Depending on the ``context_type``, these will either be replies to the top of the review body or to the bottom. The ``context_id`` parameter has to do with the internal IDs used by the JavaScript code for storing and categorizing the comments. """ def generate_reply_html(reply, timestamp, text): return render_to_string('reviews/review_reply.html', { 'context_id': context_id, 'id': reply.id, 'review': review, 'timestamp': timestamp, 'text': text, 'reply_user': reply.user, 'draft': not reply.public }) def process_body_replies(queryset, attrname, user): if user.is_anonymous(): queryset = queryset.filter(public=True) else: queryset = queryset.filter(Q(public=True) | Q(user=user)) s = "" for reply_comment in queryset: s += generate_reply_html(reply, reply.timestamp, getattr(reply, attrname)) return s user = context.get('user', None) if user.is_anonymous(): user = None s = "" if context_type == "comment" or context_type == "screenshot_comment": for reply_comment in comment.public_replies(user): s += generate_reply_html(reply_comment.review.get(), reply_comment.timestamp, reply_comment.text) elif context_type == "body_top" or context_type == "body_bottom": q = Q(public=True) if user: q = q | Q(user=user) replies = getattr(review, "%s_replies" % context_type).filter(q) for reply in replies: s += generate_reply_html(reply, reply.timestamp, getattr(reply, context_type)) return s else: raise TemplateSyntaxError, "Invalid context type passed" return s @register.inclusion_tag('reviews/review_reply_section.html', takes_context=True) def reply_section(context, review, comment, context_type, context_id): """ Renders a template for displaying a reply. This takes the same parameters as :tag:`reply_list`. The template rendered by this function, :template:`reviews/review_reply_section.html`, is responsible for invoking :tag:`reply_list` and as such passes these variables through. It does not make use of them itself. """ if comment != "": if type(comment) is ScreenshotComment: context_id += 's' context_id += str(comment.id) return { 'review': review, 'comment': comment, 'context_type': context_type, 'context_id': context_id, 'user': context.get('user', None) } @register.inclusion_tag('reviews/dashboard_entry.html', takes_context=True) def dashboard_entry(context, level, text, view, group=None): """ Renders an entry in the dashboard sidebar. This includes the name of the entry and the list of review requests associated with it. The entry is rendered by the template :template:`reviews/dashboard_entry.html`. """ user = context.get('user', None) datagrid = context.get('datagrid', None) starred = False show_count = True count = 0 if view == 'to-group': count = datagrid.counts['groups'].get(group.name, 0) elif view == 'watched-groups': starred = True show_count = False elif view in datagrid.counts: count = datagrid.counts[view] if view == 'starred': starred = True else: raise template.TemplateSyntaxError, \ "Invalid view type '%s' passed to 'dashboard_entry' tag." % view return { 'MEDIA_URL': settings.MEDIA_URL, 'MEDIA_SERIAL': settings.MEDIA_SERIAL, 'level': level, 'text': text, 'view': view, 'group': group, 'count': count, 'show_count': show_count, 'user': user, 'starred': starred, 'selected': context.get('view', None) == view and \ (not group or context.get('group', None) == group.name), } @register.simple_tag def reviewer_list(review_request): """ Returns a humanized list of target reviewers in a review request. """ return humanize_list([group.display_name or group.name \ for group in review_request.target_groups.all()] + \ [user.get_full_name() or user.username \ for user in review_request.target_people.all()]) @register.filter def bug_url(bug_id, review_request): """ Returns the URL based on a bug number on the specified review request. If the repository the review request belongs to doesn't have an associated bug tracker, this returns None. """ if (review_request.repository and review_request.repository.bug_tracker and '%s' in review_request.repository.bug_tracker): try: return review_request.repository.bug_tracker % bug_id except TypeError: logging.error("Error creating bug URL. The bug tracker URL '%s' " "is likely invalid." % review_request.repository.bug_tracker) return None @register.filter def diffsets_with_comments(review, current_pair): """ Returns a list of diffsets in the review that contain draft comments. """ if not review: return diffsets = DiffSet.objects.filter(files__comments__review=review) diffsets = diffsets.filter(files__comments__interfilediff__isnull=True) diffsets = diffsets.distinct() for diffset in diffsets: yield { 'diffset': diffset, 'is_current': current_pair[0] == diffset and current_pair[1] == None, } @register.filter def interdiffs_with_comments(review, current_pair): """ Returns a list of interdiffs in the review that contain draft comments. """ if not review: return diffsets = DiffSet.objects.filter(files__comments__review=review) diffsets = diffsets.filter(files__comments__interfilediff__isnull=False) diffsets = diffsets.distinct() for diffset in diffsets: interdiffs = DiffSet.objects.filter( files__interdiff_comments__filediff__diffset=diffset).distinct() for interdiff in interdiffs: yield { 'diffset': diffset, 'interdiff': interdiff, 'is_current': current_pair[0] == diffset and current_pair[1] == interdiff, } @register.filter def has_comments_in_diffsets_excluding(review, diffset_pair): """ Returns whether or not the specified review has any comments that aren't in the specified diffset or interdiff. """ if not review: return False current_diffset, interdiff = diffset_pair # See if there are any diffsets with comments on them in this review. q = DiffSet.objects.filter(files__comments__review=review) q = q.filter(files__comments__interfilediff__isnull=True).distinct() if not interdiff: # The user is browsing a standard diffset, so filter it out. q = q.exclude(pk=current_diffset.id) if q.count() > 0: return True # See if there are any interdiffs with comments on them in this review. q = DiffSet.objects.filter(files__comments__review=review) q = q.filter(files__comments__interfilediff__isnull=False) if interdiff: # The user is browsing an interdiff, so filter it out. q = q.exclude(pk=current_diffset.id, files__comments__interfilediff__diffset=interdiff) return q.count() > 0 @register.tag @basictag(takes_context=True) def star(context, obj): """ Renders the code for displaying a star used for starring items. The rendered code should handle click events so that the user can toggle the star. The star is rendered by the template :template:`reviews/star.html`. The passed object must be either a :model:`reviews.ReviewRequest` or a :model:`reviews.Group`. """ return render_star(context.get('user', None), obj) def render_star(user, obj): """ Does the actual work of rendering the star. The star tag is a wrapper around this. """ if user.is_anonymous(): return "" profile = None if not hasattr(obj, 'starred'): try: profile = user.get_profile() except Profile.DoesNotExist: return "" if isinstance(obj, ReviewRequest): obj_info = { 'type': 'reviewrequests', 'id': obj.id } if hasattr(obj, 'starred'): starred = obj.starred else: starred = \ profile.starred_review_requests.filter(pk=obj.id).count() > 0 elif isinstance(obj, Group): obj_info = { 'type': 'groups', 'id': obj.name } if hasattr(obj, 'starred'): starred = obj.starred else: starred = \ profile.starred_groups.filter(pk=obj.id).count() > 0 else: raise template.TemplateSyntaxError, \ "star tag received an incompatible object type (%s)" % \ type(obj) if starred: image_alt = _("Starred") else: image_alt = _("Click to star") return render_to_string('reviews/star.html', { 'object': obj_info, 'starred': int(starred), 'alt': image_alt, 'user': user, 'MEDIA_URL': settings.MEDIA_URL, })
./CrossVul/dataset_final_sorted/CWE-79/py/good_3530_2
crossvul-python_data_good_2196_0
import cgi import socketio import traceback from ajenti.http import HttpHandler from ajenti.api import BasePlugin, plugin, persistent, rootcontext from ajenti.api.http import HttpPlugin, SocketPlugin from ajenti.plugins import manager from ajenti.profiler import * class SocketIORouteHandler (HttpHandler): def __init__(self): self.namespaces = {} for cls in SocketPlugin.get_classes(): self.namespaces[cls.name] = cls def handle(self, context): return str(socketio.socketio_manage(context.env, self.namespaces, context)) class InvalidRouteHandler (HttpHandler): def handle(self, context): context.respond_not_found() return 'Invalid URL' @plugin @persistent @rootcontext class CentralDispatcher (BasePlugin, HttpHandler): def __init__(self): self.invalid = InvalidRouteHandler() self.io = SocketIORouteHandler() @profiled(lambda a, k: 'HTTP %s' % a[1].path) def handle(self, context): """ Dispatch the request to every HttpPlugin """ if hasattr(context.session, 'appcontext'): self.context = context.session.appcontext else: self.context = manager.context if context.path.startswith('/ajenti:socket'): return context.fallthrough(self.io) if not hasattr(self.context, 'http_handlers'): self.context.http_handlers = HttpPlugin.get_all() for instance in self.context.http_handlers: try: output = instance.handle(context) except Exception, e: return [self.respond_error(context, e)] if output is not None: return output return context.fallthrough(self.invalid) def respond_error(self, context, exception): context.respond_server_error() stack = traceback.format_exc() return """ <html> <body> <style> body { font-family: sans-serif; color: #888; text-align: center; } body pre { width: 600px; text-align: left; margin: auto; font-family: monospace; } </style> <img src="/ajenti:static/main/error.jpeg" /> <br/> <p> Server error </p> <pre> %s </pre> </body> </html> """ % cgi.escape(stack)
./CrossVul/dataset_final_sorted/CWE-79/py/good_2196_0
crossvul-python_data_good_4097_1
from collections import OrderedDict import django.forms from django.conf import settings from django.utils.html import conditional_escape from django.utils.translation import gettext_lazy as _ from wagtail.admin.forms import WagtailAdminPageForm from wagtail.contrib.forms.utils import get_field_clean_name class BaseForm(django.forms.Form): def __init__(self, *args, **kwargs): kwargs.setdefault('label_suffix', '') self.user = kwargs.pop('user', None) self.page = kwargs.pop('page', None) super().__init__(*args, **kwargs) class FormBuilder: def __init__(self, fields): self.fields = fields def create_singleline_field(self, field, options): # TODO: This is a default value - it may need to be changed options['max_length'] = 255 return django.forms.CharField(**options) def create_multiline_field(self, field, options): return django.forms.CharField(widget=django.forms.Textarea, **options) def create_date_field(self, field, options): return django.forms.DateField(**options) def create_datetime_field(self, field, options): return django.forms.DateTimeField(**options) def create_email_field(self, field, options): return django.forms.EmailField(**options) def create_url_field(self, field, options): return django.forms.URLField(**options) def create_number_field(self, field, options): return django.forms.DecimalField(**options) def create_dropdown_field(self, field, options): options['choices'] = map( lambda x: (x.strip(), x.strip()), field.choices.split(',') ) return django.forms.ChoiceField(**options) def create_multiselect_field(self, field, options): options['choices'] = map( lambda x: (x.strip(), x.strip()), field.choices.split(',') ) return django.forms.MultipleChoiceField(**options) def create_radio_field(self, field, options): options['choices'] = map( lambda x: (x.strip(), x.strip()), field.choices.split(',') ) return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options) def create_checkboxes_field(self, field, options): options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')] options['initial'] = [x.strip() for x in field.default_value.split(',')] return django.forms.MultipleChoiceField( widget=django.forms.CheckboxSelectMultiple, **options ) def create_checkbox_field(self, field, options): return django.forms.BooleanField(**options) def create_hidden_field(self, field, options): return django.forms.CharField(widget=django.forms.HiddenInput, **options) def get_create_field_function(self, type): """ Takes string of field type and returns a Django Form Field Instance. Assumes form field creation functions are in the format: 'create_fieldtype_field' """ create_field_function = getattr(self, 'create_%s_field' % type, None) if create_field_function: return create_field_function else: import inspect method_list = [ f[0] for f in inspect.getmembers(self.__class__, inspect.isfunction) if f[0].startswith('create_') and f[0].endswith('_field') ] raise AttributeError( "Could not find function matching format \ create_<fieldname>_field for type: " + type, "Must be one of: " + ", ".join(method_list) ) @property def formfields(self): formfields = OrderedDict() for field in self.fields: options = self.get_field_options(field) create_field = self.get_create_field_function(field.field_type) formfields[field.clean_name] = create_field(field, options) return formfields def get_field_options(self, field): options = {} options['label'] = field.label if getattr(settings, 'WAGTAILFORMS_HELP_TEXT_ALLOW_HTML', False): options['help_text'] = field.help_text else: options['help_text'] = conditional_escape(field.help_text) options['required'] = field.required options['initial'] = field.default_value return options def get_form_class(self): return type(str('WagtailForm'), (BaseForm,), self.formfields) class SelectDateForm(django.forms.Form): date_from = django.forms.DateTimeField( required=False, widget=django.forms.DateInput(attrs={'placeholder': _('Date from')}) ) date_to = django.forms.DateTimeField( required=False, widget=django.forms.DateInput(attrs={'placeholder': _('Date to')}) ) class WagtailAdminFormPageForm(WagtailAdminPageForm): def clean(self): super().clean() # Check for dupe form field labels - fixes #585 if 'form_fields' in self.formsets: _forms = self.formsets['form_fields'].forms for f in _forms: f.is_valid() for i, form in enumerate(_forms): if 'label' in form.changed_data: label = form.cleaned_data.get('label') clean_name = get_field_clean_name(label) for idx, ff in enumerate(_forms): # Exclude self ff_clean_name = get_field_clean_name(ff.cleaned_data.get('label')) if idx != i and clean_name == ff_clean_name: form.add_error( 'label', django.forms.ValidationError(_('There is another field with the label %s, please change one of them.' % label)) )
./CrossVul/dataset_final_sorted/CWE-79/py/good_4097_1
crossvul-python_data_bad_3147_0
# -*- coding: iso-8859-1 -*- """ MoinMoin - feed some FCKeditor dialogues @copyright: 2005-2006 Bastian Blank, Florian Festi, Thomas Waldmann @license: GNU GPL, see COPYING for details. """ from MoinMoin import config, wikiutil from MoinMoin.action.AttachFile import _get_files from MoinMoin.Page import Page import re ############################################################################## ### Macro dialog ############################################################################## def macro_dialog(request): help = get_macro_help(request) request.write( '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Insert Macro</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta content="noindex,nofollow" name="robots"> <script src="%s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script language="javascript"> var oEditor = window.parent.InnerDialogLoaded() ; var FCKLang = oEditor.FCKLang ; var FCKMacros = oEditor.FCKMacros ; window.onload = function () { // First of all, translate the dialog box texts oEditor.FCKLanguageManager.TranslatePage( document ) ; OnChange( "BR" ); // Show the "Ok" button. window.parent.SetOkButton( true ) ; } function Ok() { if ( document.getElementById('txtName').value.length == 0 ) { alert( FCKLang.MacroErrNoName ) ; return false ; } FCKMacros.Add( txtName.value ) ; return true ; } function OnChange( sMacro ) { // sMacro = GetE("txtName").value; oHelp = GetE("help"); for (var i=0; i<oHelp.childNodes.length; i++) { var oDiv = oHelp.childNodes[i]; if (oDiv.nodeType==1) { // oDiv.style.display = (GetAttribute(oDiv, "id", "")==sMacro) ? '' : 'none'; if (GetAttribute(oDiv, "id", "") == sMacro) { oDiv.style.display = '' ; // alert("enabled div id " + sMacro) ; } else { oDiv.style.display = 'none' ; } } } } </script> </head> <body scroll="no" style="OVERFLOW: hidden"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td valign="top"> <span fckLang="MacroDlgName">Macro Name</span><br> <select id="txtName" size="10" onchange="OnChange(this.value);"> ''' % request.cfg.url_prefix_static) macros = [] for macro in macro_list(request): if macro == "BR": selected = ' selected="selected"' else: selected = '' if macro in help: macros.append('<option value="%s"%s>%s</option>' % (help[macro].group('prototype'), selected, macro)) else: macros.append('<option value="%s"%s>%s</option>' % (macro, selected, macro)) request.write('\n'.join(macros)) request.write(''' </select> </td> <td id="help">''') helptexts = [] for macro in macro_list(request): if macro in help: match = help[macro] prototype = match.group('prototype') helptext = match.group('help') else: prototype = macro helptext = "" helptexts.append( '''<div id="%s" style="DISPLAY: none"> <b>&lt;&lt;%s&gt;&gt;</b> <br/> <textarea style="color:#000000" cols="37" rows="10" disabled="disabled">%s</textarea> </div>''' % (prototype, prototype, helptext)) request.write(''.join(helptexts)) request.write(''' </td> </tr> </table> </td> </tr> </table> </body> </html> ''') def macro_list(request): from MoinMoin import macro macros = macro.getNames(request.cfg) macros.sort() return macros def get_macro_help(request): """ Read help texts from SystemPage('HelpOnMacros')""" helppage = wikiutil.getLocalizedPage(request, "HelpOnMacros") content = helppage.get_raw_body() macro_re = re.compile( r"\|\|(<.*?>)?\{\{\{" + r"<<(?P<prototype>(?P<macro>\w*).*)>>" + r"\}\}\}\s*\|\|" + r"[^|]*\|\|[^|]*\|\|<[^>]*>" + r"\s*(?P<help>.*?)\s*\|\|\s*(?P<example>.*?)\s*(<<[^>]*>>)*\s*\|\|$", re.U|re.M) help = {} for match in macro_re.finditer(content): help[match.group('macro')] = match return help ############################################################################## ### Link dialog ############################################################################## def page_list(request): from MoinMoin import search name = request.values.get("pagename", "") if name: searchresult = search.searchPages(request, 't:"%s"' % name) pages = [p.page_name for p in searchresult.hits] else: pages = [name] request.write( '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Insert Page Link</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta content="noindex,nofollow" name="robots"> </head> <body scroll="no" style="OVERFLOW: hidden"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="PageDlgName">Page name</span><br> <select id="txtName" size="1"> %s </select> </td> </tr> </table> </td> </tr> </table> </body> </html> ''' % "".join(["<option>%s</option>\n" % wikiutil.escape(p) for p in pages])) def link_dialog(request): # list of wiki pages name = request.values.get("pagename", "") if name: from MoinMoin import search # XXX error handling! searchresult = search.searchPages(request, 't:"%s"' % name) pages = [p.page_name for p in searchresult.hits] pages.sort() pages[0:0] = [name] page_list = ''' <tr> <td colspan=2> <select id="sctPagename" size="1" onchange="OnChangePagename(this.value);"> %s </select> <td> </tr> ''' % "\n".join(['<option value="%s">%s</option>' % (wikiutil.escape(page), wikiutil.escape(page)) for page in pages]) else: page_list = "" # list of interwiki names interwiki_list = wikiutil.load_wikimap(request) interwiki = interwiki_list.keys() interwiki.sort() iwpreferred = request.cfg.interwiki_preferred[:] if not iwpreferred or iwpreferred and iwpreferred[-1] is not None: resultlist = iwpreferred for iw in interwiki: if not iw in iwpreferred: resultlist.append(iw) else: resultlist = iwpreferred[:-1] interwiki = "\n".join( ['<option value="%s">%s</option>' % (wikiutil.escape(key), wikiutil.escape(key)) for key in resultlist]) # wiki url url_prefix_static = request.cfg.url_prefix_static scriptname = request.script_root + '/' action = scriptname basepage = wikiutil.escape(request.page.page_name) request.write(u''' <!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Name: fck_link.html * Link dialog window. * * Version: 2.0 FC (Preview) * Modified: 2005-02-18 23:55:22 * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <meta http-equiv="Content-Type" content="text/html;charset=utf-8"> <meta name="robots" content="index,nofollow"> <html> <head> <title>Link Properties</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noindex,nofollow" /> <script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinlink/fck_link.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script> </head> <body scroll="no" style="OVERFLOW: hidden"> <div id="divInfo" style="DISPLAY: none"> <span fckLang="DlgLnkType">Link Type</span><br /> <select id="cmbLinkType" onchange="SetLinkType(this.value);"> <option value="wiki" selected="selected">WikiPage</option> <option value="interwiki">Interwiki</option> <option value="url" fckLang="DlgLnkTypeURL">URL</option> </select> <br /> <br /> <div id="divLinkTypeWiki"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <form action=%(action)s method="GET"> <input type="hidden" name="action" value="fckdialog"> <input type="hidden" name="dialog" value="link"> <input type="hidden" id="basepage" name="basepage" value="%(basepage)s"> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="PageDlgName">Page Name</span><br> <input id="txtPagename" name="pagename" size="30" value="%(name)s"> </td> <td valign="bottom"> <input id=btnSearchpage type="submit" value="Search"> </td> </tr> %(page_list)s </table> </form> </td> </tr> </table> </div> <div id="divLinkTypeInterwiki"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="WikiDlgName">Wiki:PageName</span><br> <select id="sctInterwiki" size="1"> %(interwiki)s </select>: <input id="txtInterwikipagename"></input> </td> </tr> </table> </td> </tr> </table> </div> <div id="divLinkTypeUrl"> <table cellspacing="0" cellpadding="0" width="100%%" border="0"> <tr> <td nowrap="nowrap"> <span fckLang="DlgLnkProto">Protocol</span><br /> <select id="cmbLinkProtocol"> <option value="http://" selected="selected">http://</option> <option value="https://">https://</option> <option value="ftp://">ftp://</option> <option value="file://">file://</option> <option value="news://">news://</option> <option value="mailto:">mailto:</option> <option value="" fckLang="DlgLnkProtoOther">&lt;other&gt;</option> </select> </td> <td nowrap="nowrap">&nbsp;</td> <td nowrap="nowrap" width="100%%"> <span fckLang="DlgLnkURL">URL</span><br /> <input id="txtUrl" style="WIDTH: 100%%" type="text" onkeyup="OnUrlChange();" onchange="OnUrlChange();" /> </td> </tr> </table> <br /> </div> </div> </body> </html> ''' % locals()) def attachment_dialog(request): """ Attachment dialog for GUI editor. """ """ Features: This dialog can... """ """ - list attachments in a drop down list """ """ - list attachments also for a different page than the current one """ """ - create new attachment """ _ = request.getText url_prefix_static = request.cfg.url_prefix_static # wiki url action = request.script_root + "/" # The following code lines implement the feature "list attachments for a different page". # Meaning of the variables: # - requestedPagename : Name of the page where attachments shall be listed from. # - attachmentsPagename : Name of the page where the attachments where retrieved from. # - destinationPagename : Name of the page where attachment will be placed on. requestedPagename = wikiutil.escape(request.values.get("requestedPagename", ""), quote=True) destinationPagename = wikiutil.escape(request.values.get("destinationPagename", request.page.page_name), quote=True) attachmentsPagename = requestedPagename or wikiutil.escape(request.page.page_name) attachments = _get_files(request, attachmentsPagename) attachments.sort() attachmentList = ''' <select id="sctAttachments" size="10" style="width:100%%;visibility:hidden;" onchange="OnAttachmentListChange();"> %s </select> ''' % "\n".join(['<option value="%s">%s</option>' % (wikiutil.escape(attachment, quote=True), wikiutil.escape(attachment, quote=True)) for attachment in attachments]) # Translation of dialog texts. langAttachmentLocation = _("Attachment location") langPagename = _("Page name") langAttachmentname = _("Attachment name") langListAttachmentsButton = _("Refresh attachment list") langAttachmentList = _("List of attachments") if len(attachmentsPagename) > 50: shortenedPagename = "%s ... %s" % (attachmentsPagename[0:25], attachmentsPagename[-25:]) else: shortenedPagename = attachmentsPagename langAvailableAttachments = "%s: %s" % (_("Available attachments for page"), shortenedPagename) request.write(''' <!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Name: fck_attachment.html * Attachment dialog window. * * Version: 2.0 FC (Preview) * Modified: 2005-02-18 23:55:22 * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <meta http-equiv="Content-Type" content="text/html;charset=utf-8"> <meta name="robots" content="index,nofollow"> <html> <head> <title>Attachment Properties</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noindex,nofollow" /> <script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinattachment/fck_attachment.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script> </head> <body scroll="no" style="OVERFLOW: hidden"> <form id="DlgAttachmentForm" name="DlgAttachmentForm" action=%(action)s method="GET"> <input type="hidden" name="action" value="fckdialog"> <input type="hidden" name="dialog" value="attachment"> <input type="hidden" id="requestedPagename" name="requestedPagename" value="%(requestedPagename)s"> <input type="hidden" id="attachmentsPagename" name="attachmentsPagename" value="%(attachmentsPagename)s"> <input type="hidden" id="destinationPagename" name="destinationPagename" value="%(destinationPagename)s"> <div id="divInfo" style="valign=top;"> <div id="divLinkTypeAttachment"> <fieldset> <legend>%(langAttachmentLocation)s</legend> <table cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td valign="bottom" style="width:90%%" style="padding-bottom:10px"> <span>%(langPagename)s</span><br> </td> </tr> <tr> <td valign="bottom" style="width:100%%" style="padding-bottom:10px;padding-right:10px;"> <input id="txtPagename" type="text" onkeyup="OnPagenameChange();" onchange="OnPagenameChange();" style="width:98%%"> </td> </tr> <tr> <td valign="bottom" style="width:90%%" style="padding-bottom:10px;"> <span>%(langAttachmentname)s</span><br> </td> </tr> <tr valign="bottom"> <td valign="bottom" style="width:100%%" style="padding-bottom:10px;padding-right:10px;"> <input id="txtAttachmentname" type="text" onkeyup="OnAttachmentnameChange();" onchange="OnPagenameChange();" style="width:98%%"><br> </td> </tr> </table> </fieldset> <fieldset> <legend>%(langAvailableAttachments)s</legend> <table cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td valign="bottom" style="width:100%%" style="padding-bottom:10px"> <input id="btnListAttachments" type="submit" value="%(langListAttachmentsButton)s"> </td> </tr> <tr> <td valign="top" style="padding-top:10px"> <label for="sctAttachments">%(langAttachmentList)s</label><br> %(attachmentList)s </td> </tr> </table> </fieldset> </div> </div> </form> </body> </html> ''' % locals()) ############################################################################## ### Image dialog ############################################################################## def image_dialog(request): url_prefix_static = request.cfg.url_prefix_static request.write(''' <!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) * Florian Festi --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Link Properties</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noindex,nofollow" /> <script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinimage/fck_image.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script> </head> <body scroll="no" style="OVERFLOW: hidden"> <table cellspacing="0" cellpadding="0" width="100%%" border="0"> <tr> <td nowrap="nowrap"> <span fckLang="DlgLnkProto">Protocol</span><br /> <select id="cmbLinkProtocol" onchange="OnProtocolChange();"> <option value="attachment:" selected="selected">attachment:</option> <option value="http://">http://</option> <option value="https://">https://</option> <!-- crashes often: <option value="drawing:">drawing:</option> --> <option value="" fckLang="DlgLnkProtoOther">&lt;other&gt;</option> </select> </td> <td nowrap="nowrap">&nbsp;</td> <td nowrap="nowrap" width="100%%"> <span fckLang="DlgLnkURL">URL or File Name (attachment:)</span><br /> <input id="txtUrl" style="WIDTH: 100%%" type="text" onkeyup="OnUrlChange();" onchange="OnUrlChange();" /> </td> </tr> <tr> <td colspan=2> <div id="divChkLink"> <input id="chkLink" type="checkbox"> Link to </div> </td> </table> </body> </html> ''' % locals()) ############################################################################# ### Main ############################################################################# def execute(pagename, request): dialog = request.values.get("dialog", "") if dialog == "macro": macro_dialog(request) elif dialog == "macrolist": macro_list(request) elif dialog == "pagelist": page_list(request) elif dialog == "link": link_dialog(request) elif dialog == "attachment": attachment_dialog(request) elif dialog == 'image': image_dialog(request) else: from MoinMoin.Page import Page request.theme.add_msg("Dialog unknown!", "error") Page(request, pagename).send_page()
./CrossVul/dataset_final_sorted/CWE-79/py/bad_3147_0
crossvul-python_data_bad_1091_0
from typing import Dict, Optional, Tuple from django.utils.translation import ugettext as _ from django.conf import settings from django.core.files import File from django.http import HttpRequest from django.db.models import Sum from jinja2 import Markup as mark_safe import unicodedata from zerver.lib.avatar_hash import user_avatar_path from zerver.lib.exceptions import JsonableError, ErrorCode from boto.s3.bucket import Bucket from boto.s3.key import Key from boto.s3.connection import S3Connection from mimetypes import guess_type, guess_extension from zerver.models import get_user_profile_by_id from zerver.models import Attachment from zerver.models import Realm, RealmEmoji, UserProfile, Message import urllib import base64 import os import re from PIL import Image, ImageOps, ExifTags from PIL.Image import DecompressionBombError from PIL.GifImagePlugin import GifImageFile import io import random import logging DEFAULT_AVATAR_SIZE = 100 MEDIUM_AVATAR_SIZE = 500 DEFAULT_EMOJI_SIZE = 64 # These sizes were selected based on looking at the maximum common # sizes in a library of animated custom emoji, balanced against the # network cost of very large emoji images. MAX_EMOJI_GIF_SIZE = 128 MAX_EMOJI_GIF_FILE_SIZE_BYTES = 128 * 1024 * 1024 # 128 kb # Performance Note: # # For writing files to S3, the file could either be stored in RAM # (if it is less than 2.5MiB or so) or an actual temporary file on disk. # # Because we set FILE_UPLOAD_MAX_MEMORY_SIZE to 0, only the latter case # should occur in practice. # # This is great, because passing the pseudofile object that Django gives # you to boto would be a pain. # To come up with a s3 key we randomly generate a "directory". The # "file name" is the original filename provided by the user run # through a sanitization function. class RealmUploadQuotaError(JsonableError): code = ErrorCode.REALM_UPLOAD_QUOTA attachment_url_re = re.compile(r'[/\-]user[\-_]uploads[/\.-].*?(?=[ )]|\Z)') def attachment_url_to_path_id(attachment_url: str) -> str: path_id_raw = re.sub(r'[/\-]user[\-_]uploads[/\.-]', '', attachment_url) # Remove any extra '.' after file extension. These are probably added by the user return re.sub('[.]+$', '', path_id_raw, re.M) def sanitize_name(value: str) -> str: """ Sanitizes a value to be safe to store in a Linux filesystem, in S3, and in a URL. So unicode is allowed, but not special characters other than ".", "-", and "_". This implementation is based on django.utils.text.slugify; it is modified by: * adding '.' and '_' to the list of allowed characters. * preserving the case of the value. """ value = unicodedata.normalize('NFKC', value) value = re.sub(r'[^\w\s._-]', '', value, flags=re.U).strip() return mark_safe(re.sub(r'[-\s]+', '-', value, flags=re.U)) def random_name(bytes: int=60) -> str: return base64.urlsafe_b64encode(os.urandom(bytes)).decode('utf-8') class BadImageError(JsonableError): code = ErrorCode.BAD_IMAGE name_to_tag_num = dict((name, num) for num, name in ExifTags.TAGS.items()) # https://stackoverflow.com/a/6218425 def exif_rotate(image: Image) -> Image: if not hasattr(image, '_getexif'): return image exif_data = image._getexif() if exif_data is None: return image exif_dict = dict(exif_data.items()) orientation = exif_dict.get(name_to_tag_num['Orientation']) if orientation == 3: return image.rotate(180, expand=True) elif orientation == 6: return image.rotate(270, expand=True) elif orientation == 8: return image.rotate(90, expand=True) return image def resize_avatar(image_data: bytes, size: int=DEFAULT_AVATAR_SIZE) -> bytes: try: im = Image.open(io.BytesIO(image_data)) im = exif_rotate(im) im = ImageOps.fit(im, (size, size), Image.ANTIALIAS) except IOError: raise BadImageError(_("Could not decode image; did you upload an image file?")) except DecompressionBombError: raise BadImageError(_("Image size exceeds limit.")) out = io.BytesIO() if im.mode == 'CMYK': im = im.convert('RGB') im.save(out, format='png') return out.getvalue() def resize_logo(image_data: bytes) -> bytes: try: im = Image.open(io.BytesIO(image_data)) im = exif_rotate(im) im.thumbnail((8*DEFAULT_AVATAR_SIZE, DEFAULT_AVATAR_SIZE), Image.ANTIALIAS) except IOError: raise BadImageError(_("Could not decode image; did you upload an image file?")) except DecompressionBombError: raise BadImageError(_("Image size exceeds limit.")) out = io.BytesIO() if im.mode == 'CMYK': im = im.convert('RGB') im.save(out, format='png') return out.getvalue() def resize_gif(im: GifImageFile, size: int=DEFAULT_EMOJI_SIZE) -> bytes: frames = [] duration_info = [] # If 'loop' info is not set then loop for infinite number of times. loop = im.info.get("loop", 0) for frame_num in range(0, im.n_frames): im.seek(frame_num) new_frame = Image.new("RGBA", im.size) new_frame.paste(im, (0, 0), im.convert("RGBA")) new_frame = ImageOps.fit(new_frame, (size, size), Image.ANTIALIAS) frames.append(new_frame) duration_info.append(im.info['duration']) out = io.BytesIO() frames[0].save(out, save_all=True, optimize=True, format="GIF", append_images=frames[1:], duration=duration_info, loop=loop) return out.getvalue() def resize_emoji(image_data: bytes, size: int=DEFAULT_EMOJI_SIZE) -> bytes: try: im = Image.open(io.BytesIO(image_data)) image_format = im.format if image_format == "GIF": # There are a number of bugs in Pillow.GifImagePlugin which cause # results in resized gifs being broken. To work around this we # only resize under certain conditions to minimize the chance of # creating ugly gifs. should_resize = any(( im.size[0] != im.size[1], # not square im.size[0] > MAX_EMOJI_GIF_SIZE, # dimensions too large len(image_data) > MAX_EMOJI_GIF_FILE_SIZE_BYTES, # filesize too large )) return resize_gif(im, size) if should_resize else image_data else: im = exif_rotate(im) im = ImageOps.fit(im, (size, size), Image.ANTIALIAS) out = io.BytesIO() im.save(out, format=image_format) return out.getvalue() except IOError: raise BadImageError(_("Could not decode image; did you upload an image file?")) except DecompressionBombError: raise BadImageError(_("Image size exceeds limit.")) ### Common class ZulipUploadBackend: def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: raise NotImplementedError() def upload_avatar_image(self, user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: raise NotImplementedError() def delete_avatar_image(self, user: UserProfile) -> None: raise NotImplementedError() def delete_message_image(self, path_id: str) -> bool: raise NotImplementedError() def get_avatar_url(self, hash_key: str, medium: bool=False) -> str: raise NotImplementedError() def copy_avatar(self, source_profile: UserProfile, target_profile: UserProfile) -> None: raise NotImplementedError() def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None: raise NotImplementedError() def ensure_basic_avatar_image(self, user_profile: UserProfile) -> None: raise NotImplementedError() def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None: raise NotImplementedError() def get_realm_icon_url(self, realm_id: int, version: int) -> str: raise NotImplementedError() def upload_realm_logo_image(self, logo_file: File, user_profile: UserProfile, night: bool) -> None: raise NotImplementedError() def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str: raise NotImplementedError() def upload_emoji_image(self, emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: raise NotImplementedError() def get_emoji_url(self, emoji_file_name: str, realm_id: int) -> str: raise NotImplementedError() ### S3 def get_bucket(conn: S3Connection, bucket_name: str) -> Bucket: # Calling get_bucket() with validate=True can apparently lead # to expensive S3 bills: # http://www.appneta.com/blog/s3-list-get-bucket-default/ # The benefits of validation aren't completely clear to us, and # we want to save on our bills, so we set the validate flag to False. # (We think setting validate to True would cause us to fail faster # in situations where buckets don't exist, but that shouldn't be # an issue for us.) bucket = conn.get_bucket(bucket_name, validate=False) return bucket def upload_image_to_s3( bucket_name: str, file_name: str, content_type: Optional[str], user_profile: UserProfile, contents: bytes) -> None: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = Key(bucket) key.key = file_name key.set_metadata("user_profile_id", str(user_profile.id)) key.set_metadata("realm_id", str(user_profile.realm_id)) if content_type is not None: headers = {'Content-Type': content_type} # type: Optional[Dict[str, str]] else: headers = None key.set_contents_from_string(contents, headers=headers) # type: ignore # https://github.com/python/typeshed/issues/1552 def currently_used_upload_space(realm: Realm) -> int: used_space = Attachment.objects.filter(realm=realm).aggregate(Sum('size'))['size__sum'] if used_space is None: return 0 return used_space def check_upload_within_quota(realm: Realm, uploaded_file_size: int) -> None: upload_quota = realm.upload_quota_bytes() if upload_quota is None: return used_space = currently_used_upload_space(realm) if (used_space + uploaded_file_size) > upload_quota: raise RealmUploadQuotaError(_("Upload would exceed your organization's upload quota.")) def get_file_info(request: HttpRequest, user_file: File) -> Tuple[str, int, Optional[str]]: uploaded_file_name = user_file.name assert isinstance(uploaded_file_name, str) content_type = request.GET.get('mimetype') if content_type is None: guessed_type = guess_type(uploaded_file_name)[0] if guessed_type is not None: content_type = guessed_type else: extension = guess_extension(content_type) if extension is not None: uploaded_file_name = uploaded_file_name + extension uploaded_file_name = urllib.parse.unquote(uploaded_file_name) uploaded_file_size = user_file.size return uploaded_file_name, uploaded_file_size, content_type def get_signed_upload_url(path: str) -> str: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) return conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=path) def get_realm_for_filename(path: str) -> Optional[int]: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) key = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET).get_key(path) if key is None: # This happens if the key does not exist. return None return get_user_profile_by_id(key.metadata["user_profile_id"]).realm_id class S3UploadBackend(ZulipUploadBackend): def delete_file_from_s3(self, path_id: str, bucket_name: str) -> bool: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) # check if file exists key = bucket.get_key(path_id) if key is not None: bucket.delete_key(key) return True file_name = path_id.split("/")[-1] logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,)) return False def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: bucket_name = settings.S3_AUTH_UPLOADS_BUCKET if target_realm is None: target_realm = user_profile.realm s3_file_name = "/".join([ str(target_realm.id), random_name(18), sanitize_name(uploaded_file_name) ]) url = "/user_uploads/%s" % (s3_file_name,) upload_image_to_s3( bucket_name, s3_file_name, content_type, user_profile, file_data ) create_attachment(uploaded_file_name, s3_file_name, user_profile, uploaded_file_size) return url def delete_message_image(self, path_id: str) -> bool: return self.delete_file_from_s3(path_id, settings.S3_AUTH_UPLOADS_BUCKET) def write_avatar_images(self, s3_file_name: str, target_user_profile: UserProfile, image_data: bytes, content_type: Optional[str]) -> None: bucket_name = settings.S3_AVATAR_BUCKET upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, target_user_profile, image_data, ) # custom 500px wide version resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) upload_image_to_s3( bucket_name, s3_file_name + "-medium.png", "image/png", target_user_profile, resized_medium ) resized_data = resize_avatar(image_data) upload_image_to_s3( bucket_name, s3_file_name, 'image/png', target_user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) def upload_avatar_image(self, user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: content_type = guess_type(user_file.name)[0] s3_file_name = user_avatar_path(target_user_profile) image_data = user_file.read() self.write_avatar_images(s3_file_name, target_user_profile, image_data, content_type) def delete_avatar_image(self, user: UserProfile) -> None: path_id = user_avatar_path(user) bucket_name = settings.S3_AVATAR_BUCKET self.delete_file_from_s3(path_id + ".original", bucket_name) self.delete_file_from_s3(path_id + "-medium.png", bucket_name) self.delete_file_from_s3(path_id, bucket_name) def get_avatar_key(self, file_name: str) -> Key: conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket_name = settings.S3_AVATAR_BUCKET bucket = get_bucket(conn, bucket_name) key = bucket.get_key(file_name) return key def copy_avatar(self, source_profile: UserProfile, target_profile: UserProfile) -> None: s3_source_file_name = user_avatar_path(source_profile) s3_target_file_name = user_avatar_path(target_profile) key = self.get_avatar_key(s3_source_file_name + ".original") image_data = key.get_contents_as_string() # type: ignore # https://github.com/python/typeshed/issues/1552 content_type = key.content_type self.write_avatar_images(s3_target_file_name, target_profile, image_data, content_type) # type: ignore # image_data is `bytes`, boto subs are wrong def get_avatar_url(self, hash_key: str, medium: bool=False) -> str: bucket = settings.S3_AVATAR_BUCKET medium_suffix = "-medium.png" if medium else "" # ?x=x allows templates to append additional parameters with &s return "https://%s.s3.amazonaws.com/%s%s?x=x" % (bucket, hash_key, medium_suffix) def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None: content_type = guess_type(icon_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET s3_file_name = os.path.join(str(user_profile.realm.id), 'realm', 'icon') image_data = icon_file.read() upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, user_profile, image_data, ) resized_data = resize_avatar(image_data) upload_image_to_s3( bucket_name, s3_file_name + ".png", 'image/png', user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) def get_realm_icon_url(self, realm_id: int, version: int) -> str: bucket = settings.S3_AVATAR_BUCKET # ?x=x allows templates to append additional parameters with &s return "https://%s.s3.amazonaws.com/%s/realm/icon.png?version=%s" % (bucket, realm_id, version) def upload_realm_logo_image(self, logo_file: File, user_profile: UserProfile, night: bool) -> None: content_type = guess_type(logo_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET if night: basename = 'night_logo' else: basename = 'logo' s3_file_name = os.path.join(str(user_profile.realm.id), 'realm', basename) image_data = logo_file.read() upload_image_to_s3( bucket_name, s3_file_name + ".original", content_type, user_profile, image_data, ) resized_data = resize_logo(image_data) upload_image_to_s3( bucket_name, s3_file_name + ".png", 'image/png', user_profile, resized_data, ) # See avatar_url in avatar.py for URL. (That code also handles the case # that users use gravatar.) def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str: bucket = settings.S3_AVATAR_BUCKET # ?x=x allows templates to append additional parameters with &s if not night: file_name = 'logo.png' else: file_name = 'night_logo.png' return "https://%s.s3.amazonaws.com/%s/realm/%s?version=%s" % (bucket, realm_id, file_name, version) def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None: file_path = user_avatar_path(user_profile) s3_file_name = file_path bucket_name = settings.S3_AVATAR_BUCKET conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = bucket.get_key(file_path + ".original") image_data = key.get_contents_as_string() resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) # type: ignore # image_data is `bytes`, boto subs are wrong upload_image_to_s3( bucket_name, s3_file_name + "-medium.png", "image/png", user_profile, resized_medium ) def ensure_basic_avatar_image(self, user_profile: UserProfile) -> None: # nocoverage # TODO: Refactor this to share code with ensure_medium_avatar_image file_path = user_avatar_path(user_profile) # Also TODO: Migrate to user_avatar_path(user_profile) + ".png". s3_file_name = file_path bucket_name = settings.S3_AVATAR_BUCKET conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY) bucket = get_bucket(conn, bucket_name) key = bucket.get_key(file_path + ".original") image_data = key.get_contents_as_string() resized_avatar = resize_avatar(image_data) # type: ignore # image_data is `bytes`, boto subs are wrong upload_image_to_s3( bucket_name, s3_file_name, "image/png", user_profile, resized_avatar ) def upload_emoji_image(self, emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: content_type = guess_type(emoji_file.name)[0] bucket_name = settings.S3_AVATAR_BUCKET emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format( realm_id=user_profile.realm_id, emoji_file_name=emoji_file_name ) image_data = emoji_file.read() resized_image_data = resize_emoji(image_data) upload_image_to_s3( bucket_name, ".".join((emoji_path, "original")), content_type, user_profile, image_data, ) upload_image_to_s3( bucket_name, emoji_path, content_type, user_profile, resized_image_data, ) def get_emoji_url(self, emoji_file_name: str, realm_id: int) -> str: bucket = settings.S3_AVATAR_BUCKET emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(realm_id=realm_id, emoji_file_name=emoji_file_name) return "https://%s.s3.amazonaws.com/%s" % (bucket, emoji_path) ### Local def write_local_file(type: str, path: str, file_data: bytes) -> None: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(file_path, 'wb') as f: f.write(file_data) def read_local_file(type: str, path: str) -> bytes: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) with open(file_path, 'rb') as f: return f.read() def delete_local_file(type: str, path: str) -> bool: file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path) if os.path.isfile(file_path): # This removes the file but the empty folders still remain. os.remove(file_path) return True file_name = path.split("/")[-1] logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,)) return False def get_local_file_path(path_id: str) -> Optional[str]: local_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id) if os.path.isfile(local_path): return local_path else: return None class LocalUploadBackend(ZulipUploadBackend): def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: # Split into 256 subdirectories to prevent directories from getting too big path = "/".join([ str(user_profile.realm_id), format(random.randint(0, 255), 'x'), random_name(18), sanitize_name(uploaded_file_name) ]) write_local_file('files', path, file_data) create_attachment(uploaded_file_name, path, user_profile, uploaded_file_size) return '/user_uploads/' + path def delete_message_image(self, path_id: str) -> bool: return delete_local_file('files', path_id) def write_avatar_images(self, file_path: str, image_data: bytes) -> None: write_local_file('avatars', file_path + '.original', image_data) resized_data = resize_avatar(image_data) write_local_file('avatars', file_path + '.png', resized_data) resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) write_local_file('avatars', file_path + '-medium.png', resized_medium) def upload_avatar_image(self, user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: file_path = user_avatar_path(target_user_profile) image_data = user_file.read() self.write_avatar_images(file_path, image_data) def delete_avatar_image(self, user: UserProfile) -> None: path_id = user_avatar_path(user) delete_local_file("avatars", path_id + ".original") delete_local_file("avatars", path_id + ".png") delete_local_file("avatars", path_id + "-medium.png") def get_avatar_url(self, hash_key: str, medium: bool=False) -> str: # ?x=x allows templates to append additional parameters with &s medium_suffix = "-medium" if medium else "" return "/user_avatars/%s%s.png?x=x" % (hash_key, medium_suffix) def copy_avatar(self, source_profile: UserProfile, target_profile: UserProfile) -> None: source_file_path = user_avatar_path(source_profile) target_file_path = user_avatar_path(target_profile) image_data = read_local_file('avatars', source_file_path + '.original') self.write_avatar_images(target_file_path, image_data) def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None: upload_path = os.path.join('avatars', str(user_profile.realm.id), 'realm') image_data = icon_file.read() write_local_file( upload_path, 'icon.original', image_data) resized_data = resize_avatar(image_data) write_local_file(upload_path, 'icon.png', resized_data) def get_realm_icon_url(self, realm_id: int, version: int) -> str: # ?x=x allows templates to append additional parameters with &s return "/user_avatars/%s/realm/icon.png?version=%s" % (realm_id, version) def upload_realm_logo_image(self, logo_file: File, user_profile: UserProfile, night: bool) -> None: upload_path = os.path.join('avatars', str(user_profile.realm.id), 'realm') if night: original_file = 'night_logo.original' resized_file = 'night_logo.png' else: original_file = 'logo.original' resized_file = 'logo.png' image_data = logo_file.read() write_local_file( upload_path, original_file, image_data) resized_data = resize_logo(image_data) write_local_file(upload_path, resized_file, resized_data) def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str: # ?x=x allows templates to append additional parameters with &s if night: file_name = 'night_logo.png' else: file_name = 'logo.png' return "/user_avatars/%s/realm/%s?version=%s" % (realm_id, file_name, version) def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None: file_path = user_avatar_path(user_profile) output_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + "-medium.png") if os.path.isfile(output_path): return image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original") image_data = open(image_path, "rb").read() resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE) write_local_file('avatars', file_path + '-medium.png', resized_medium) def ensure_basic_avatar_image(self, user_profile: UserProfile) -> None: # nocoverage # TODO: Refactor this to share code with ensure_medium_avatar_image file_path = user_avatar_path(user_profile) output_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".png") if os.path.isfile(output_path): return image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original") image_data = open(image_path, "rb").read() resized_avatar = resize_avatar(image_data) write_local_file('avatars', file_path + '.png', resized_avatar) def upload_emoji_image(self, emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format( realm_id= user_profile.realm_id, emoji_file_name=emoji_file_name ) image_data = emoji_file.read() resized_image_data = resize_emoji(image_data) write_local_file( 'avatars', ".".join((emoji_path, "original")), image_data) write_local_file( 'avatars', emoji_path, resized_image_data) def get_emoji_url(self, emoji_file_name: str, realm_id: int) -> str: return os.path.join( "/user_avatars", RealmEmoji.PATH_ID_TEMPLATE.format(realm_id=realm_id, emoji_file_name=emoji_file_name)) # Common and wrappers if settings.LOCAL_UPLOADS_DIR is not None: upload_backend = LocalUploadBackend() # type: ZulipUploadBackend else: upload_backend = S3UploadBackend() # nocoverage def delete_message_image(path_id: str) -> bool: return upload_backend.delete_message_image(path_id) def upload_avatar_image(user_file: File, acting_user_profile: UserProfile, target_user_profile: UserProfile) -> None: upload_backend.upload_avatar_image(user_file, acting_user_profile, target_user_profile) def delete_avatar_image(user_profile: UserProfile) -> None: upload_backend.delete_avatar_image(user_profile) def copy_avatar(source_profile: UserProfile, target_profile: UserProfile) -> None: upload_backend.copy_avatar(source_profile, target_profile) def upload_icon_image(user_file: File, user_profile: UserProfile) -> None: upload_backend.upload_realm_icon_image(user_file, user_profile) def upload_logo_image(user_file: File, user_profile: UserProfile, night: bool) -> None: upload_backend.upload_realm_logo_image(user_file, user_profile, night) def upload_emoji_image(emoji_file: File, emoji_file_name: str, user_profile: UserProfile) -> None: upload_backend.upload_emoji_image(emoji_file, emoji_file_name, user_profile) def upload_message_file(uploaded_file_name: str, uploaded_file_size: int, content_type: Optional[str], file_data: bytes, user_profile: UserProfile, target_realm: Optional[Realm]=None) -> str: return upload_backend.upload_message_file(uploaded_file_name, uploaded_file_size, content_type, file_data, user_profile, target_realm=target_realm) def claim_attachment(user_profile: UserProfile, path_id: str, message: Message, is_message_realm_public: bool) -> Attachment: attachment = Attachment.objects.get(path_id=path_id) attachment.messages.add(message) attachment.is_realm_public = attachment.is_realm_public or is_message_realm_public attachment.save() return attachment def create_attachment(file_name: str, path_id: str, user_profile: UserProfile, file_size: int) -> bool: attachment = Attachment.objects.create(file_name=file_name, path_id=path_id, owner=user_profile, realm=user_profile.realm, size=file_size) from zerver.lib.actions import notify_attachment_update notify_attachment_update(user_profile, 'add', attachment.to_dict()) return True def upload_message_image_from_request(request: HttpRequest, user_file: File, user_profile: UserProfile) -> str: uploaded_file_name, uploaded_file_size, content_type = get_file_info(request, user_file) return upload_message_file(uploaded_file_name, uploaded_file_size, content_type, user_file.read(), user_profile)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1091_0
crossvul-python_data_bad_1728_0
# coding: utf-8 """A tornado based Jupyter notebook server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import absolute_import, print_function import base64 import datetime import errno import importlib import io import json import logging import os import random import re import select import signal import socket import ssl import sys import threading import webbrowser from jinja2 import Environment, FileSystemLoader # Install the pyzmq ioloop. This has to be done before anything else from # tornado is imported. from zmq.eventloop import ioloop ioloop.install() # check for tornado 3.1.0 msg = "The Jupyter Notebook requires tornado >= 4.0" try: import tornado except ImportError: raise ImportError(msg) try: version_info = tornado.version_info except AttributeError: raise ImportError(msg + ", but you have < 1.1.0") if version_info < (4,0): raise ImportError(msg + ", but you have %s" % tornado.version) from tornado import httpserver from tornado import web from tornado.log import LogFormatter, app_log, access_log, gen_log from notebook import ( DEFAULT_STATIC_FILES_PATH, DEFAULT_TEMPLATE_PATH_LIST, __version__, ) from .base.handlers import Template404 from .log import log_request from .services.kernels.kernelmanager import MappingKernelManager from .services.config import ConfigManager from .services.contents.manager import ContentsManager from .services.contents.filemanager import FileContentsManager from .services.sessions.sessionmanager import SessionManager from .auth.login import LoginHandler from .auth.logout import LogoutHandler from .base.handlers import FileFindHandler, IPythonHandler from traitlets.config import Config from traitlets.config.application import catch_config_error, boolean_flag from jupyter_core.application import ( JupyterApp, base_flags, base_aliases, ) from jupyter_client import KernelManager from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel, NATIVE_KERNEL_NAME from jupyter_client.session import Session from nbformat.sign import NotebookNotary from traitlets import ( Dict, Unicode, Integer, List, Bool, Bytes, Instance, TraitError, Type, ) from ipython_genutils import py3compat from IPython.paths import get_ipython_dir from jupyter_core.paths import jupyter_runtime_dir, jupyter_path from notebook._sysinfo import get_sys_info from .utils import url_path_join, check_pid #----------------------------------------------------------------------------- # Module globals #----------------------------------------------------------------------------- _examples = """ jupyter notebook # start the notebook jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate """ #----------------------------------------------------------------------------- # Helper functions #----------------------------------------------------------------------------- def random_ports(port, n): """Generate a list of n random ports near the given port. The first 5 ports will be sequential, and the remaining n-5 will be randomly selected in the range [port-2*n, port+2*n]. """ for i in range(min(5, n)): yield port + i for i in range(n-5): yield max(1, port + random.randint(-2*n, 2*n)) def load_handlers(name): """Load the (URL pattern, handler) tuples for each component.""" name = 'notebook.' + name mod = __import__(name, fromlist=['default_handlers']) return mod.default_handlers class DeprecationHandler(IPythonHandler): def get(self, url_path): self.set_header("Content-Type", 'text/javascript') self.finish(""" console.warn('`/static/widgets/js` is deprecated. Use `/nbextensions/widgets/widgets/js` instead.'); define(['%s'], function(x) { return x; }); """ % url_path_join('nbextensions', 'widgets', 'widgets', url_path.rstrip('.js'))) self.log.warn('Deprecated widget Javascript path /static/widgets/js/*.js was used') #----------------------------------------------------------------------------- # The Tornado web application #----------------------------------------------------------------------------- class NotebookWebApplication(web.Application): def __init__(self, ipython_app, kernel_manager, contents_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options): settings = self.init_settings( ipython_app, kernel_manager, contents_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options) handlers = self.init_handlers(settings) super(NotebookWebApplication, self).__init__(handlers, **settings) def init_settings(self, ipython_app, kernel_manager, contents_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options=None): _template_path = settings_overrides.get( "template_path", ipython_app.template_file_path, ) if isinstance(_template_path, py3compat.string_types): _template_path = (_template_path,) template_path = [os.path.expanduser(path) for path in _template_path] jenv_opt = jinja_env_options if jinja_env_options else {} env = Environment(loader=FileSystemLoader(template_path), **jenv_opt) sys_info = get_sys_info() if sys_info['commit_source'] == 'repository': # don't cache (rely on 304) when working from master version_hash = '' else: # reset the cache on server restart version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S") settings = dict( # basics log_function=log_request, base_url=base_url, default_url=default_url, template_path=template_path, static_path=ipython_app.static_file_path, static_custom_path=ipython_app.static_custom_path, static_handler_class = FileFindHandler, static_url_prefix = url_path_join(base_url,'/static/'), static_handler_args = { # don't cache custom.js 'no_cache_paths': [url_path_join(base_url, 'static', 'custom')], }, version_hash=version_hash, ignore_minified_js=ipython_app.ignore_minified_js, # authentication cookie_secret=ipython_app.cookie_secret, login_url=url_path_join(base_url,'/login'), login_handler_class=ipython_app.login_handler_class, logout_handler_class=ipython_app.logout_handler_class, password=ipython_app.password, # managers kernel_manager=kernel_manager, contents_manager=contents_manager, session_manager=session_manager, kernel_spec_manager=kernel_spec_manager, config_manager=config_manager, # IPython stuff jinja_template_vars=ipython_app.jinja_template_vars, nbextensions_path=ipython_app.nbextensions_path, websocket_url=ipython_app.websocket_url, mathjax_url=ipython_app.mathjax_url, config=ipython_app.config, config_dir=ipython_app.config_dir, jinja2_env=env, terminals_available=False, # Set later if terminals are available ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) return settings def init_handlers(self, settings): """Load the (URL pattern, handler) tuples for each component.""" # Order matters. The first handler to match the URL will handle the request. handlers = [] handlers.append((r'/deprecatedwidgets/(.*)', DeprecationHandler)) handlers.extend(load_handlers('tree.handlers')) handlers.extend([(r"/login", settings['login_handler_class'])]) handlers.extend([(r"/logout", settings['logout_handler_class'])]) handlers.extend(load_handlers('files.handlers')) handlers.extend(load_handlers('notebook.handlers')) handlers.extend(load_handlers('nbconvert.handlers')) handlers.extend(load_handlers('kernelspecs.handlers')) handlers.extend(load_handlers('edit.handlers')) handlers.extend(load_handlers('services.api.handlers')) handlers.extend(load_handlers('services.config.handlers')) handlers.extend(load_handlers('services.kernels.handlers')) handlers.extend(load_handlers('services.contents.handlers')) handlers.extend(load_handlers('services.sessions.handlers')) handlers.extend(load_handlers('services.nbconvert.handlers')) handlers.extend(load_handlers('services.kernelspecs.handlers')) handlers.extend(load_handlers('services.security.handlers')) # BEGIN HARDCODED WIDGETS HACK try: import ipywidgets handlers.append( (r"/nbextensions/widgets/(.*)", FileFindHandler, { 'path': ipywidgets.find_static_assets(), 'no_cache_paths': ['/'], # don't cache anything in nbextensions }), ) except: app_log.warn('ipywidgets package not installed. Widgets are unavailable.') # END HARDCODED WIDGETS HACK handlers.append( (r"/nbextensions/(.*)", FileFindHandler, { 'path': settings['nbextensions_path'], 'no_cache_paths': ['/'], # don't cache anything in nbextensions }), ) handlers.append( (r"/custom/(.*)", FileFindHandler, { 'path': settings['static_custom_path'], 'no_cache_paths': ['/'], # don't cache anything in custom }) ) # register base handlers last handlers.extend(load_handlers('base.handlers')) # set the URL that will be redirected from `/` handlers.append( (r'/?', web.RedirectHandler, { 'url' : settings['default_url'], 'permanent': False, # want 302, not 301 }) ) # prepend base_url onto the patterns that we match new_handlers = [] for handler in handlers: pattern = url_path_join(settings['base_url'], handler[0]) new_handler = tuple([pattern] + list(handler[1:])) new_handlers.append(new_handler) # add 404 on the end, which will catch everything that falls through new_handlers.append((r'(.*)', Template404)) return new_handlers class NbserverListApp(JupyterApp): version = __version__ description="List currently running notebook servers in this profile." flags = dict( json=({'NbserverListApp': {'json': True}}, "Produce machine-readable JSON output."), ) json = Bool(False, config=True, help="If True, each line of output will be a JSON object with the " "details from the server info file.") def start(self): if not self.json: print("Currently running servers:") for serverinfo in list_running_servers(self.runtime_dir): if self.json: print(json.dumps(serverinfo)) else: print(serverinfo['url'], "::", serverinfo['notebook_dir']) #----------------------------------------------------------------------------- # Aliases and Flags #----------------------------------------------------------------------------- flags = dict(base_flags) flags['no-browser']=( {'NotebookApp' : {'open_browser' : False}}, "Don't open the notebook in a browser after startup." ) flags['pylab']=( {'NotebookApp' : {'pylab' : 'warn'}}, "DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib." ) flags['no-mathjax']=( {'NotebookApp' : {'enable_mathjax' : False}}, """Disable MathJax MathJax is the javascript library Jupyter uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. """ ) # Add notebook manager flags flags.update(boolean_flag('script', 'FileContentsManager.save_script', 'DEPRECATED, IGNORED', 'DEPRECATED, IGNORED')) aliases = dict(base_aliases) aliases.update({ 'ip': 'NotebookApp.ip', 'port': 'NotebookApp.port', 'port-retries': 'NotebookApp.port_retries', 'transport': 'KernelManager.transport', 'keyfile': 'NotebookApp.keyfile', 'certfile': 'NotebookApp.certfile', 'notebook-dir': 'NotebookApp.notebook_dir', 'browser': 'NotebookApp.browser', 'pylab': 'NotebookApp.pylab', }) #----------------------------------------------------------------------------- # NotebookApp #----------------------------------------------------------------------------- class NotebookApp(JupyterApp): name = 'jupyter-notebook' version = __version__ description = """ The Jupyter HTML Notebook. This launches a Tornado based HTML Notebook Server that serves up an HTML5/Javascript Notebook client. """ examples = _examples aliases = aliases flags = flags classes = [ KernelManager, Session, MappingKernelManager, ContentsManager, FileContentsManager, NotebookNotary, KernelSpecManager, ] flags = Dict(flags) aliases = Dict(aliases) subcommands = dict( list=(NbserverListApp, NbserverListApp.description.splitlines()[0]), ) _log_formatter_cls = LogFormatter def _log_level_default(self): return logging.INFO def _log_datefmt_default(self): """Exclude date from default date format""" return "%H:%M:%S" def _log_format_default(self): """override default log format to include time""" return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" # create requested profiles by default, if they don't exist: auto_create = Bool(True) ignore_minified_js = Bool(False, config=True, help='Use minified JS file or not, mainly use during dev to avoid JS recompilation', ) # file to be opened in the notebook server file_to_run = Unicode('', config=True) # Network related information allow_origin = Unicode('', config=True, help="""Set the Access-Control-Allow-Origin header Use '*' to allow any origin to access your server. Takes precedence over allow_origin_pat. """ ) allow_origin_pat = Unicode('', config=True, help="""Use a regular expression for the Access-Control-Allow-Origin header Requests from an origin matching the expression will get replies with: Access-Control-Allow-Origin: origin where `origin` is the origin of the request. Ignored if allow_origin is set. """ ) allow_credentials = Bool(False, config=True, help="Set the Access-Control-Allow-Credentials: true header" ) default_url = Unicode('/tree', config=True, help="The default URL to redirect to from `/`" ) ip = Unicode('localhost', config=True, help="The IP address the notebook server will listen on." ) def _ip_default(self): """Return localhost if available, 127.0.0.1 otherwise. On some (horribly broken) systems, localhost cannot be bound. """ s = socket.socket() try: s.bind(('localhost', 0)) except socket.error as e: self.log.warn("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e) return '127.0.0.1' else: s.close() return 'localhost' def _ip_changed(self, name, old, new): if new == u'*': self.ip = u'' port = Integer(8888, config=True, help="The port the notebook server will listen on." ) port_retries = Integer(50, config=True, help="The number of additional ports to try if the specified port is not available." ) certfile = Unicode(u'', config=True, help="""The full path to an SSL/TLS certificate file.""" ) keyfile = Unicode(u'', config=True, help="""The full path to a private key file for usage with SSL/TLS.""" ) cookie_secret_file = Unicode(config=True, help="""The file where the cookie secret is stored.""" ) def _cookie_secret_file_default(self): return os.path.join(self.runtime_dir, 'notebook_cookie_secret') cookie_secret = Bytes(b'', config=True, help="""The random bytes used to secure cookies. By default this is a new random number every time you start the Notebook. Set it to a value in a config file to enable logins to persist across server sessions. Note: Cookie secrets should be kept private, do not share config files with cookie_secret stored in plaintext (you can read the value from a file). """ ) def _cookie_secret_default(self): if os.path.exists(self.cookie_secret_file): with io.open(self.cookie_secret_file, 'rb') as f: return f.read() else: secret = base64.encodestring(os.urandom(1024)) self._write_cookie_secret_file(secret) return secret def _write_cookie_secret_file(self, secret): """write my secret to my secret_file""" self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file) with io.open(self.cookie_secret_file, 'wb') as f: f.write(secret) try: os.chmod(self.cookie_secret_file, 0o600) except OSError: self.log.warn( "Could not set permissions on %s", self.cookie_secret_file ) password = Unicode(u'', config=True, help="""Hashed password to use for web authentication. To generate, type in a python/IPython shell: from notebook.auth import passwd; passwd() The string should be of the form type:salt:hashed-password. """ ) open_browser = Bool(True, config=True, help="""Whether to open in a browser after starting. The specific browser used is platform dependent and determined by the python standard library `webbrowser` module, unless it is overridden using the --browser (NotebookApp.browser) configuration option. """) browser = Unicode(u'', config=True, help="""Specify what command to use to invoke a web browser when opening the notebook. If not specified, the default browser will be determined by the `webbrowser` standard library module, which allows setting of the BROWSER environment variable to override it. """) webapp_settings = Dict(config=True, help="DEPRECATED, use tornado_settings" ) def _webapp_settings_changed(self, name, old, new): self.log.warn("\n webapp_settings is deprecated, use tornado_settings.\n") self.tornado_settings = new tornado_settings = Dict(config=True, help="Supply overrides for the tornado.web.Application that the " "Jupyter notebook uses.") ssl_options = Dict(config=True, help="""Supply SSL options for the tornado HTTPServer. See the tornado docs for details.""") jinja_environment_options = Dict(config=True, help="Supply extra arguments that will be passed to Jinja environment.") jinja_template_vars = Dict( config=True, help="Extra variables to supply to jinja templates when rendering.", ) enable_mathjax = Bool(True, config=True, help="""Whether to enable MathJax for typesetting math/TeX MathJax is the javascript library Jupyter uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. """ ) def _enable_mathjax_changed(self, name, old, new): """set mathjax url to empty if mathjax is disabled""" if not new: self.mathjax_url = u'' base_url = Unicode('/', config=True, help='''The base URL for the notebook server. Leading and trailing slashes can be omitted, and will automatically be added. ''') def _base_url_changed(self, name, old, new): if not new.startswith('/'): self.base_url = '/'+new elif not new.endswith('/'): self.base_url = new+'/' base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""") def _base_project_url_changed(self, name, old, new): self.log.warn("base_project_url is deprecated, use base_url") self.base_url = new extra_static_paths = List(Unicode(), config=True, help="""Extra paths to search for serving static files. This allows adding javascript/css to be available from the notebook server machine, or overriding individual files in the IPython""" ) @property def static_file_path(self): """return extra paths + the default location""" return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH] static_custom_path = List(Unicode(), help="""Path to search for custom.js, css""" ) def _static_custom_path_default(self): return [ os.path.join(d, 'custom') for d in ( self.config_dir, # FIXME: serve IPython profile while we don't have `jupyter migrate` os.path.join(get_ipython_dir(), 'profile_default', 'static'), DEFAULT_STATIC_FILES_PATH) ] extra_template_paths = List(Unicode(), config=True, help="""Extra paths to search for serving jinja templates. Can be used to override templates from notebook.templates.""" ) @property def template_file_path(self): """return extra paths + the default locations""" return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST extra_nbextensions_path = List(Unicode(), config=True, help="""extra paths to look for Javascript notebook extensions""" ) @property def nbextensions_path(self): """The path to look for Javascript notebook extensions""" path = self.extra_nbextensions_path + jupyter_path('nbextensions') # FIXME: remove IPython nbextensions path once migration is setup path.append(os.path.join(get_ipython_dir(), 'nbextensions')) return path websocket_url = Unicode("", config=True, help="""The base URL for websockets, if it differs from the HTTP server (hint: it almost certainly doesn't). Should be in the form of an HTTP origin: ws[s]://hostname[:port] """ ) mathjax_url = Unicode("", config=True, help="""The url for MathJax.js.""" ) def _mathjax_url_default(self): if not self.enable_mathjax: return u'' static_url_prefix = self.tornado_settings.get("static_url_prefix", url_path_join(self.base_url, "static") ) return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js') def _mathjax_url_changed(self, name, old, new): if new and not self.enable_mathjax: # enable_mathjax=False overrides mathjax_url self.mathjax_url = u'' else: self.log.info("Using MathJax: %s", new) contents_manager_class = Type( default_value=FileContentsManager, klass=ContentsManager, config=True, help='The notebook manager class to use.' ) kernel_manager_class = Type( default_value=MappingKernelManager, config=True, help='The kernel manager class to use.' ) session_manager_class = Type( default_value=SessionManager, config=True, help='The session manager class to use.' ) config_manager_class = Type( default_value=ConfigManager, config = True, help='The config manager class to use' ) kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) kernel_spec_manager_class = Type( default_value=KernelSpecManager, config=True, help=""" The kernel spec manager class to use. Should be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. The Api of KernelSpecManager is provisional and might change without warning between this version of Jupyter and the next stable one. """ ) login_handler_class = Type( default_value=LoginHandler, klass=web.RequestHandler, config=True, help='The login handler class to use.', ) logout_handler_class = Type( default_value=LogoutHandler, klass=web.RequestHandler, config=True, help='The logout handler class to use.', ) trust_xheaders = Bool(False, config=True, help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers" "sent by the upstream reverse proxy. Necessary if the proxy handles SSL") ) info_file = Unicode() def _info_file_default(self): info_file = "nbserver-%s.json" % os.getpid() return os.path.join(self.runtime_dir, info_file) pylab = Unicode('disabled', config=True, help=""" DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. """ ) def _pylab_changed(self, name, old, new): """when --pylab is specified, display a warning and exit""" if new != 'warn': backend = ' %s' % new else: backend = '' self.log.error("Support for specifying --pylab on the command line has been removed.") self.log.error( "Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend) ) self.exit(1) notebook_dir = Unicode(config=True, help="The directory to use for notebooks and kernels." ) def _notebook_dir_default(self): if self.file_to_run: return os.path.dirname(os.path.abspath(self.file_to_run)) else: return py3compat.getcwd() def _notebook_dir_changed(self, name, old, new): """Do a bit of validation of the notebook dir.""" if not os.path.isabs(new): # If we receive a non-absolute path, make it absolute. self.notebook_dir = os.path.abspath(new) return if not os.path.isdir(new): raise TraitError("No such notebook dir: %r" % new) # setting App.notebook_dir implies setting notebook and kernel dirs as well self.config.FileContentsManager.root_dir = new self.config.MappingKernelManager.root_dir = new server_extensions = List(Unicode(), config=True, help=("Python modules to load as notebook server extensions. " "This is an experimental API, and may change in future releases.") ) reraise_server_extension_failures = Bool( False, config=True, help="Reraise exceptions encountered loading server extensions?", ) def parse_command_line(self, argv=None): super(NotebookApp, self).parse_command_line(argv) if self.extra_args: arg0 = self.extra_args[0] f = os.path.abspath(arg0) self.argv.remove(arg0) if not os.path.exists(f): self.log.critical("No such file or directory: %s", f) self.exit(1) # Use config here, to ensure that it takes higher priority than # anything that comes from the profile. c = Config() if os.path.isdir(f): c.NotebookApp.notebook_dir = f elif os.path.isfile(f): c.NotebookApp.file_to_run = f self.update_config(c) def init_configurables(self): self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ) self.kernel_manager = self.kernel_manager_class( parent=self, log=self.log, connection_dir=self.runtime_dir, kernel_spec_manager=self.kernel_spec_manager, ) self.contents_manager = self.contents_manager_class( parent=self, log=self.log, ) self.session_manager = self.session_manager_class( parent=self, log=self.log, kernel_manager=self.kernel_manager, contents_manager=self.contents_manager, ) self.config_manager = self.config_manager_class( parent=self, log=self.log, config_dir=os.path.join(self.config_dir, 'nbconfig'), ) def init_logging(self): # This prevents double log messages because tornado use a root logger that # self.log is a child of. The logging module dipatches log messages to a log # and all of its ancenstors until propagate is set to False. self.log.propagate = False for log in app_log, access_log, gen_log: # consistent log output name (NotebookApp instead of tornado.access, etc.) log.name = self.log.name # hook up tornado 3's loggers to our app handlers logger = logging.getLogger('tornado') logger.propagate = True logger.parent = self.log logger.setLevel(self.log.level) def init_webapp(self): """initialize tornado webapp and httpserver""" self.tornado_settings['allow_origin'] = self.allow_origin if self.allow_origin_pat: self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat) self.tornado_settings['allow_credentials'] = self.allow_credentials # ensure default_url starts with base_url if not self.default_url.startswith(self.base_url): self.default_url = url_path_join(self.base_url, self.default_url) self.web_app = NotebookWebApplication( self, self.kernel_manager, self.contents_manager, self.session_manager, self.kernel_spec_manager, self.config_manager, self.log, self.base_url, self.default_url, self.tornado_settings, self.jinja_environment_options ) ssl_options = self.ssl_options if self.certfile: ssl_options['certfile'] = self.certfile if self.keyfile: ssl_options['keyfile'] = self.keyfile if not ssl_options: # None indicates no SSL config ssl_options = None else: # Disable SSLv3, since its use is discouraged. ssl_options['ssl_version']=ssl.PROTOCOL_TLSv1 self.login_handler_class.validate_security(self, ssl_options=ssl_options) self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options, xheaders=self.trust_xheaders) success = None for port in random_ports(self.port, self.port_retries+1): try: self.http_server.listen(port, self.ip) except socket.error as e: if e.errno == errno.EADDRINUSE: self.log.info('The port %i is already in use, trying another random port.' % port) continue elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)): self.log.warn("Permission to listen on port %i denied" % port) continue else: raise else: self.port = port success = True break if not success: self.log.critical('ERROR: the notebook server could not be started because ' 'no available port could be found.') self.exit(1) @property def display_url(self): ip = self.ip if self.ip else '[all ip addresses on your system]' return self._url(ip) @property def connection_url(self): ip = self.ip if self.ip else 'localhost' return self._url(ip) def _url(self, ip): proto = 'https' if self.certfile else 'http' return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url) def init_terminals(self): try: from .terminal import initialize initialize(self.web_app, self.notebook_dir, self.connection_url) self.web_app.settings['terminals_available'] = True except ImportError as e: log = self.log.debug if sys.platform == 'win32' else self.log.warn log("Terminals not available (error was %s)", e) def init_signal(self): if not sys.platform.startswith('win') and sys.stdin.isatty(): signal.signal(signal.SIGINT, self._handle_sigint) signal.signal(signal.SIGTERM, self._signal_stop) if hasattr(signal, 'SIGUSR1'): # Windows doesn't support SIGUSR1 signal.signal(signal.SIGUSR1, self._signal_info) if hasattr(signal, 'SIGINFO'): # only on BSD-based systems signal.signal(signal.SIGINFO, self._signal_info) def _handle_sigint(self, sig, frame): """SIGINT handler spawns confirmation dialog""" # register more forceful signal handler for ^C^C case signal.signal(signal.SIGINT, self._signal_stop) # request confirmation dialog in bg thread, to avoid # blocking the App thread = threading.Thread(target=self._confirm_exit) thread.daemon = True thread.start() def _restore_sigint_handler(self): """callback for restoring original SIGINT handler""" signal.signal(signal.SIGINT, self._handle_sigint) def _confirm_exit(self): """confirm shutdown on ^C A second ^C, or answering 'y' within 5s will cause shutdown, otherwise original SIGINT handler will be restored. This doesn't work on Windows. """ info = self.log.info info('interrupted') print(self.notebook_info()) sys.stdout.write("Shutdown this notebook server (y/[n])? ") sys.stdout.flush() r,w,x = select.select([sys.stdin], [], [], 5) if r: line = sys.stdin.readline() if line.lower().startswith('y') and 'n' not in line.lower(): self.log.critical("Shutdown confirmed") ioloop.IOLoop.current().stop() return else: print("No answer for 5s:", end=' ') print("resuming operation...") # no answer, or answer is no: # set it back to original SIGINT handler # use IOLoop.add_callback because signal.signal must be called # from main thread ioloop.IOLoop.current().add_callback(self._restore_sigint_handler) def _signal_stop(self, sig, frame): self.log.critical("received signal %s, stopping", sig) ioloop.IOLoop.current().stop() def _signal_info(self, sig, frame): print(self.notebook_info()) def init_components(self): """Check the components submodule, and warn if it's unclean""" # TODO: this should still check, but now we use bower, not git submodule pass def init_server_extensions(self): """Load any extensions specified by config. Import the module, then call the load_jupyter_server_extension function, if one exists. The extension API is experimental, and may change in future releases. """ for modulename in self.server_extensions: try: mod = importlib.import_module(modulename) func = getattr(mod, 'load_jupyter_server_extension', None) if func is not None: func(self) except Exception: if self.reraise_server_extension_failures: raise self.log.warn("Error loading server extension %s", modulename, exc_info=True) @catch_config_error def initialize(self, argv=None): super(NotebookApp, self).initialize(argv) self.init_logging() if self._dispatching: return self.init_configurables() self.init_components() self.init_webapp() self.init_terminals() self.init_signal() self.init_server_extensions() def cleanup_kernels(self): """Shutdown all kernels. The kernels will shutdown themselves when this process no longer exists, but explicit shutdown allows the KernelManagers to cleanup the connection files. """ self.log.info('Shutting down kernels') self.kernel_manager.shutdown_all() def notebook_info(self): "Return the current working directory and the server url information" info = self.contents_manager.info_string() + "\n" info += "%d active kernels \n" % len(self.kernel_manager._kernels) return info + "The Jupyter Notebook is running at: %s" % self.display_url def server_info(self): """Return a JSONable dict of information about this server.""" return {'url': self.connection_url, 'hostname': self.ip if self.ip else 'localhost', 'port': self.port, 'secure': bool(self.certfile), 'base_url': self.base_url, 'notebook_dir': os.path.abspath(self.notebook_dir), 'pid': os.getpid() } def write_server_info_file(self): """Write the result of server_info() to the JSON file info_file.""" with open(self.info_file, 'w') as f: json.dump(self.server_info(), f, indent=2) def remove_server_info_file(self): """Remove the nbserver-<pid>.json file created for this server. Ignores the error raised when the file has already been removed. """ try: os.unlink(self.info_file) except OSError as e: if e.errno != errno.ENOENT: raise def start(self): """ Start the Notebook server app, after initialization This method takes no arguments so all configuration and initialization must be done prior to calling this method.""" super(NotebookApp, self).start() info = self.log.info for line in self.notebook_info().split("\n"): info(line) info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).") self.write_server_info_file() if self.open_browser or self.file_to_run: try: browser = webbrowser.get(self.browser or None) except webbrowser.Error as e: self.log.warn('No web browser found: %s.' % e) browser = None if self.file_to_run: if not os.path.exists(self.file_to_run): self.log.critical("%s does not exist" % self.file_to_run) self.exit(1) relpath = os.path.relpath(self.file_to_run, self.notebook_dir) uri = url_path_join('notebooks', *relpath.split(os.sep)) else: uri = 'tree' if browser: b = lambda : browser.open(url_path_join(self.connection_url, uri), new=2) threading.Thread(target=b).start() self.io_loop = ioloop.IOLoop.current() if sys.platform.startswith('win'): # add no-op to wake every 5s # to handle signals that may be ignored by the inner loop pc = ioloop.PeriodicCallback(lambda : None, 5000) pc.start() try: self.io_loop.start() except KeyboardInterrupt: info("Interrupted...") finally: self.cleanup_kernels() self.remove_server_info_file() def stop(self): def _stop(): self.http_server.stop() self.io_loop.stop() self.io_loop.add_callback(_stop) def list_running_servers(runtime_dir=None): """Iterate over the server info files of running notebook servers. Given a profile name, find nbserver-* files in the security directory of that profile, and yield dicts of their information, each one pertaining to a currently running notebook server instance. """ if runtime_dir is None: runtime_dir = jupyter_runtime_dir() # The runtime dir might not exist if not os.path.isdir(runtime_dir): return for file in os.listdir(runtime_dir): if file.startswith('nbserver-'): with io.open(os.path.join(runtime_dir, file), encoding='utf-8') as f: info = json.load(f) # Simple check whether that process is really still running # Also remove leftover files from IPython 2.x without a pid field if ('pid' in info) and check_pid(info['pid']): yield info else: # If the process has died, try to delete its info file try: os.unlink(file) except OSError: pass # TODO: This should warn or log or something #----------------------------------------------------------------------------- # Main entry point #----------------------------------------------------------------------------- main = launch_new_instance = NotebookApp.launch_instance
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1728_0
crossvul-python_data_good_2105_1
import re import random import datetime from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils.html import escape def get_from_dict_or_object(source, key): try: return source[key] except: return getattr(source, key) def enumerate_string_list(strings): """for a list or a tuple ('one', 'two',) return a list formatted as ['1) one', '2) two',] """ numbered_strings = enumerate(strings, start = 1) return [ '%d) %s' % item for item in numbered_strings ] def pad_string(text): """Inserts one space between words, including one space before the first word and after the last word. String without words is collapsed to '' """ words = text.strip().split() if len(words) > 0: return ' ' + ' '.join(words) + ' ' else: return '' def split_list(text): """Takes text, representing a loosely formatted list (comma, semicolon, empty space separated words) and returns a list() of words. """ text = text.replace(',', ' ').replace(';', ' ') return text.strip().split() def is_iterable(thing): if hasattr(thing, '__iter__'): return True else: return isinstance(thing, basestring) BOT_REGEX = re.compile( r'bot|http|\.com|crawl|spider|python|curl|yandex' ) BROWSER_REGEX = re.compile( r'^(Mozilla.*(Gecko|KHTML|MSIE|Presto|Trident)|Opera).*$' ) MOBILE_REGEX = re.compile( r'(BlackBerry|HTC|LG|MOT|Nokia|NOKIAN|PLAYSTATION|PSP|SAMSUNG|SonyEricsson)' ) def strip_plus(text): """returns text with redundant spaces replaced with just one, and stripped leading and the trailing spaces""" return re.sub('\s+', ' ', text).strip() def not_a_robot_request(request): if 'HTTP_ACCEPT_LANGUAGE' not in request.META: return False user_agent = request.META.get('HTTP_USER_AGENT', None) if user_agent is None: return False if BOT_REGEX.match(user_agent, re.IGNORECASE): return False if MOBILE_REGEX.match(user_agent): return True if BROWSER_REGEX.search(user_agent): return True return False def diff_date(date, use_on_prefix = False): now = datetime.datetime.now()#datetime(*time.localtime()[0:6])#??? diff = now - date days = diff.days hours = int(diff.seconds/3600) minutes = int(diff.seconds/60) if days > 2: if date.year == now.year: date_token = date.strftime("%b %d") else: date_token = date.strftime("%b %d '%y") if use_on_prefix: return _('on %(date)s') % { 'date': date_token } else: return date_token elif days == 2: return _('2 days ago') elif days == 1: return _('yesterday') elif minutes >= 60: return ungettext( '%(hr)d hour ago', '%(hr)d hours ago', hours ) % {'hr':hours} else: return ungettext( '%(min)d min ago', '%(min)d mins ago', minutes ) % {'min':minutes} #todo: this function may need to be removed to simplify the paginator functionality LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 5 LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 4 NUM_PAGES_OUTSIDE_RANGE = 1 ADJACENT_PAGES = 2 def setup_paginator(context): """ custom paginator tag Inspired from http://blog.localkinegrinds.com/2007/09/06/digg-style-pagination-in-django/ """ if (context["is_paginated"]): " Initialize variables " in_leading_range = in_trailing_range = False pages_outside_leading_range = pages_outside_trailing_range = range(0) if (context["pages"] <= LEADING_PAGE_RANGE_DISPLAYED): in_leading_range = in_trailing_range = True page_numbers = [n for n in range(1, context["pages"] + 1) if n > 0 and n <= context["pages"]] elif (context["current_page_number"] <= LEADING_PAGE_RANGE): in_leading_range = True page_numbers = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1) if n > 0 and n <= context["pages"]] pages_outside_leading_range = [n + context["pages"] for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] elif (context["current_page_number"] > context["pages"] - TRAILING_PAGE_RANGE): in_trailing_range = True page_numbers = [n for n in range(context["pages"] - TRAILING_PAGE_RANGE_DISPLAYED + 1, context["pages"] + 1) if n > 0 and n <= context["pages"]] pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] else: page_numbers = [n for n in range(context["current_page_number"] - ADJACENT_PAGES, context["current_page_number"] + ADJACENT_PAGES + 1) if n > 0 and n <= context["pages"]] pages_outside_leading_range = [n + context["pages"] for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] page_object = context['page_object'] #patch for change in django 1.5 if page_object.has_previous(): previous_page_number = page_object.previous_page_number() else: previous_page_number = None if page_object.has_next(): next_page_number = page_object.next_page_number() else: next_page_number = None return { "base_url": escape(context["base_url"]), "is_paginated": context["is_paginated"], "previous": previous_page_number, "has_previous": page_object.has_previous(), "next": next_page_number, "has_next": page_object.has_next(), "page": context["current_page_number"], "pages": context["pages"], "page_numbers": page_numbers, "in_leading_range" : in_leading_range, "in_trailing_range" : in_trailing_range, "pages_outside_leading_range": pages_outside_leading_range, "pages_outside_trailing_range": pages_outside_trailing_range, } def get_admin(): """Returns an admin users, usefull for raising flags""" try: from django.contrib.auth.models import User return User.objects.filter(is_superuser=True)[0] except: raise Exception('there is no admin users') def generate_random_key(length=16): """return random string, length is number of characters""" random.seed() assert(isinstance(length, int)) format_string = '%0' + str(2*length) + 'x' return format_string % random.getrandbits(length*8)
./CrossVul/dataset_final_sorted/CWE-79/py/good_2105_1
crossvul-python_data_good_1644_1
"""Tornado handlers for cluster web service.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json from tornado import web from ...base.handlers import APIHandler #----------------------------------------------------------------------------- # Cluster handlers #----------------------------------------------------------------------------- class MainClusterHandler(APIHandler): @web.authenticated def get(self): self.finish(json.dumps(self.cluster_manager.list_profiles())) class ClusterProfileHandler(APIHandler): @web.authenticated def get(self, profile): self.finish(json.dumps(self.cluster_manager.profile_info(profile))) class ClusterActionHandler(APIHandler): @web.authenticated def post(self, profile, action): cm = self.cluster_manager if action == 'start': n = self.get_argument('n', default=None) if not n: data = cm.start_cluster(profile) else: data = cm.start_cluster(profile, int(n)) if action == 'stop': data = cm.stop_cluster(profile) self.finish(json.dumps(data)) #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _cluster_action_regex = r"(?P<action>start|stop)" _profile_regex = r"(?P<profile>[^\/]+)" # there is almost no text that is invalid default_handlers = [ (r"/clusters", MainClusterHandler), (r"/clusters/%s/%s" % (_profile_regex, _cluster_action_regex), ClusterActionHandler), (r"/clusters/%s" % _profile_regex, ClusterProfileHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_1
crossvul-python_data_good_4389_0
from typing import Any, List import bleach from .rest_api import ValidationError allowed_tags_strict = [ "a", "img", # links and images "br", "p", "span", "blockquote", # text layout "strike", "del", "ins", "strong", "u", "em", "sup", "sub", "pre", # text formatting "h1", "h2", "h3", "h4", "h5", "h6", # headings "ol", "ul", "li", # lists "table", "caption", "thead", "tbody", "th", "tr", "td", # tables "div", ] allowed_tags_permissive = allowed_tags_strict + [ "video", ] allowed_attributes = [ "align", "alt", "autoplay", "background", "bgcolor", "border", "class", "colspan", "controls", "dir", "height", "hidden", "href", "hreflang", "id", "lang", "loop", "muted", "poster", "preload", "rel", "rowspan", "scope", "sizes", "src", "srcset", "start", "style", "target", "title", "width", ] allowed_styles = [ "color", "background-color", "height", "width", "text-align", "vertical-align", "float", "text-decoration", "margin", "padding", "line-height", "max-width", "min-width", "max-height", "min-height", "overflow", "word-break", "word-wrap", ] def validate_html_strict(html: str) -> str: """ This method takes a string and escapes all non-whitelisted html entries. Every field of a model that is loaded trusted in the DOM should be validated. During copy and paste from Word maybe some tabs are spread over the html. Remove them. """ return base_validate_html(html, allowed_tags_strict) def validate_html_permissive(html: str) -> str: """ See validate_html_strict, but allows some more tags, like iframes and videos. Do not use on validation for normal users, only for admins! """ return base_validate_html(html, allowed_tags_permissive) def base_validate_html(html: str, allowed_tags: List[str]) -> str: """ For internal use only. """ html = html.replace("\t", "") return bleach.clean( html, tags=allowed_tags, attributes=allowed_attributes, styles=allowed_styles ) def validate_json(json: Any, max_depth: int) -> Any: """ Traverses through the JSON structure (dicts and lists) and runs validate_html_strict on every found string. Give max-depth to protect against stack-overflows. This should be the maximum nested depth of the object expected. """ if max_depth == 0: raise ValidationError({"detail": "The JSON is too nested."}) if isinstance(json, dict): return {key: validate_json(value, max_depth - 1) for key, value in json.items()} if isinstance(json, list): return [validate_json(item, max_depth - 1) for item in json] if isinstance(json, str): return validate_html_strict(json) return json
./CrossVul/dataset_final_sorted/CWE-79/py/good_4389_0
crossvul-python_data_good_4208_1
# -*- coding: iso-8859-1 -*- """ MoinMoin - Multiple configuration handler and Configuration defaults class @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 2005-2008 MoinMoin:ThomasWaldmann. 2008 MoinMoin:JohannesBerg @license: GNU GPL, see COPYING for details. """ import hashlib import re import os import sys import time from MoinMoin import log logging = log.getLogger(__name__) from MoinMoin import config, error, util, wikiutil, web from MoinMoin import datastruct from MoinMoin.auth import MoinAuth import MoinMoin.auth as authmodule import MoinMoin.events as events from MoinMoin.events import PageChangedEvent, PageRenamedEvent from MoinMoin.events import PageDeletedEvent, PageCopiedEvent from MoinMoin.events import PageRevertedEvent, FileAttachedEvent import MoinMoin.web.session from MoinMoin.packages import packLine from MoinMoin.security import AccessControlList _url_re_cache = None _farmconfig_mtime = None _config_cache = {} def _importConfigModule(name): """ Import and return configuration module and its modification time Handle all errors except ImportError, because missing file is not always an error. @param name: module name @rtype: tuple @return: module, modification time """ try: module = __import__(name, globals(), {}) mtime = os.path.getmtime(module.__file__) except ImportError: raise except IndentationError, err: logging.exception('Your source code / config file is not correctly indented!') msg = """IndentationError: %(err)s The configuration files are Python modules. Therefore, whitespace is important. Make sure that you use only spaces, no tabs are allowed here! You have to use four spaces at the beginning of the line mostly. """ % { 'err': err, } raise error.ConfigurationError(msg) except Exception, err: logging.exception('An exception happened.') msg = '%s: %s' % (err.__class__.__name__, str(err)) raise error.ConfigurationError(msg) return module, mtime def _url_re_list(): """ Return url matching regular expression Import wikis list from farmconfig on the first call and compile the regexes. Later just return the cached regex list. @rtype: list of tuples of (name, compiled re object) @return: url to wiki config name matching list """ global _url_re_cache, _farmconfig_mtime if _url_re_cache is None: try: farmconfig, _farmconfig_mtime = _importConfigModule('farmconfig') except ImportError, err: if 'farmconfig' in str(err): # we failed importing farmconfig logging.debug("could not import farmconfig, mapping all URLs to wikiconfig") _farmconfig_mtime = 0 _url_re_cache = [('wikiconfig', re.compile(r'.')), ] # matches everything else: # maybe there was a failing import statement inside farmconfig raise else: logging.info("using farm config: %s" % os.path.abspath(farmconfig.__file__)) try: cache = [] for name, regex in farmconfig.wikis: cache.append((name, re.compile(regex))) _url_re_cache = cache except AttributeError: logging.error("required 'wikis' list missing in farmconfig") msg = """ Missing required 'wikis' list in 'farmconfig.py'. If you run a single wiki you do not need farmconfig.py. Delete it and use wikiconfig.py. """ raise error.ConfigurationError(msg) return _url_re_cache def _makeConfig(name): """ Create and return a config instance Timestamp config with either module mtime or farmconfig mtime. This mtime can be used later to invalidate older caches. @param name: module name @rtype: DefaultConfig sub class instance @return: new configuration instance """ global _farmconfig_mtime try: module, mtime = _importConfigModule(name) configClass = getattr(module, 'Config') cfg = configClass(name) cfg.cfg_mtime = max(mtime, _farmconfig_mtime) logging.info("using wiki config: %s" % os.path.abspath(module.__file__)) except ImportError, err: logging.exception('Could not import.') msg = """ImportError: %(err)s Check that the file is in the same directory as the server script. If it is not, you must add the path of the directory where the file is located to the python path in the server script. See the comments at the top of the server script. Check that the configuration file name is either "wikiconfig.py" or the module name specified in the wikis list in farmconfig.py. Note that the module name does not include the ".py" suffix. """ % { 'err': err, } raise error.ConfigurationError(msg) except AttributeError, err: logging.exception('An exception occurred.') msg = """AttributeError: %(err)s Could not find required "Config" class in "%(name)s.py". This might happen if you are trying to use a pre 1.3 configuration file, or made a syntax or spelling error. Another reason for this could be a name clash. It is not possible to have config names like e.g. stats.py - because that collides with MoinMoin/stats/ - have a look into your MoinMoin code directory what other names are NOT possible. Please check your configuration file. As an example for correct syntax, use the wikiconfig.py file from the distribution. """ % { 'name': name, 'err': err, } raise error.ConfigurationError(msg) return cfg def _getConfigName(url): """ Return config name for url or raise """ for name, regex in _url_re_list(): match = regex.match(url) if match: return name raise error.NoConfigMatchedError def getConfig(url): """ Return cached config instance for url or create new one If called by many threads in the same time multiple config instances might be created. The first created item will be returned, using dict.setdefault. @param url: the url from request, possibly matching specific wiki @rtype: DefaultConfig subclass instance @return: config object for specific wiki """ cfgName = _getConfigName(url) try: cfg = _config_cache[cfgName] except KeyError: cfg = _makeConfig(cfgName) cfg = _config_cache.setdefault(cfgName, cfg) return cfg # This is a way to mark some text for the gettext tools so that they don't # get orphaned. See http://www.python.org/doc/current/lib/node278.html. def _(text): return text class CacheClass: """ just a container for stuff we cache """ pass class ConfigFunctionality(object): """ Configuration base class with config class behaviour. This class contains the functionality for the DefaultConfig class for the benefit of the WikiConfig macro. """ # attributes of this class that should not be shown # in the WikiConfig() macro. cfg_mtime = None siteid = None cache = None mail_enabled = None jabber_enabled = None auth_can_logout = None auth_have_login = None auth_login_inputs = None _site_plugin_lists = None _iwid = None _iwid_full = None xapian_searchers = None moinmoin_dir = None # will be lazily loaded by interwiki code when needed (?) shared_intermap_files = None def __init__(self, siteid): """ Init Config instance """ self.siteid = siteid self.cache = CacheClass() from MoinMoin.Page import ItemCache self.cache.meta = ItemCache('meta') self.cache.pagelists = ItemCache('pagelists') if self.config_check_enabled: self._config_check() # define directories self.moinmoin_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) data_dir = os.path.normpath(self.data_dir) self.data_dir = data_dir for dirname in ('user', 'cache', 'plugin'): name = dirname + '_dir' if not getattr(self, name, None): setattr(self, name, os.path.abspath(os.path.join(data_dir, dirname))) # directories below cache_dir (using __dirname__ to avoid conflicts) for dirname in ('session', ): name = dirname + '_dir' if not getattr(self, name, None): setattr(self, name, os.path.abspath(os.path.join(self.cache_dir, '__%s__' % dirname))) # Try to decode certain names which allow unicode self._decode() # After that, pre-compile some regexes self.cache.page_category_regex = re.compile(self.page_category_regex, re.UNICODE) self.cache.page_dict_regex = re.compile(self.page_dict_regex, re.UNICODE) self.cache.page_group_regex = re.compile(self.page_group_regex, re.UNICODE) self.cache.page_template_regex = re.compile(self.page_template_regex, re.UNICODE) # the ..._regexact versions only match if nothing is left (exact match) self.cache.page_category_regexact = re.compile(u'^%s$' % self.page_category_regex, re.UNICODE) self.cache.page_dict_regexact = re.compile(u'^%s$' % self.page_dict_regex, re.UNICODE) self.cache.page_group_regexact = re.compile(u'^%s$' % self.page_group_regex, re.UNICODE) self.cache.page_template_regexact = re.compile(u'^%s$' % self.page_template_regex, re.UNICODE) self.cache.ua_spiders = self.ua_spiders and re.compile(self.ua_spiders, re.IGNORECASE) self._check_directories() if not isinstance(self.superuser, list): msg = """The superuser setting in your wiki configuration is not a list (e.g. ['Sample User', 'AnotherUser']). Please change it in your wiki configuration and try again.""" raise error.ConfigurationError(msg) if not isinstance(self.actions_superuser, list): msg = """The actions_superuser setting in your wiki configuration is not a list (e.g. ['newaccount', 'some_other_action']). Please change it in your wiki configuration and try again.""" raise error.ConfigurationError(msg) # moin < 1.9 used cookie_lifetime = <float> (but converted it to int) for logged-in users and # anonymous_session_lifetime = <float> or None for anon users # moin >= 1.9 uses cookie_lifetime = (<float>, <float>) - first is anon, second is logged-in if not (isinstance(self.cookie_lifetime, tuple) and len(self.cookie_lifetime) == 2): logging.error("wiki configuration has an invalid setting: " + "cookie_lifetime = %r" % (self.cookie_lifetime, )) try: anon_lifetime = self.anonymous_session_lifetime logging.warning("wiki configuration has an unsupported setting: " + "anonymous_session_lifetime = %r - " % anon_lifetime + "please remove it.") if anon_lifetime is None: anon_lifetime = 0 anon_lifetime = float(anon_lifetime) except: # if anything goes wrong, use default value anon_lifetime = 0 try: logged_in_lifetime = int(self.cookie_lifetime) except: # if anything goes wrong, use default value logged_in_lifetime = 12 self.cookie_lifetime = (anon_lifetime, logged_in_lifetime) logging.warning("using cookie_lifetime = %r - " % (self.cookie_lifetime, ) + "please fix your wiki configuration.") self._loadPluginModule() # Preparse user dicts self._fillDicts() # Normalize values self.language_default = self.language_default.lower() # Use site name as default name-logo if self.logo_string is None: self.logo_string = self.sitename # Check for needed modules # FIXME: maybe we should do this check later, just before a # chart is needed, maybe in the chart module, instead doing it # for each request. But this require a large refactoring of # current code. if self.chart_options: try: import gdchart except ImportError: self.chart_options = None # 'setuid' special auth method auth method can log out self.auth_can_logout = ['setuid'] self.auth_login_inputs = [] found_names = [] for auth in self.auth: if not auth.name: raise error.ConfigurationError("Auth methods must have a name.") if auth.name in found_names: raise error.ConfigurationError("Auth method names must be unique.") found_names.append(auth.name) if auth.logout_possible and auth.name: self.auth_can_logout.append(auth.name) for input in auth.login_inputs: if not input in self.auth_login_inputs: self.auth_login_inputs.append(input) self.auth_have_login = len(self.auth_login_inputs) > 0 self.auth_methods = found_names # internal dict for plugin `modules' lists self._site_plugin_lists = {} # we replace any string placeholders with config values # e.g u'%(page_front_page)s' % self self.navi_bar = [elem % self for elem in self.navi_bar] # check if python-xapian is installed if self.xapian_search: try: import xapian except ImportError, err: self.xapian_search = False logging.error("xapian_search was auto-disabled because python-xapian is not installed [%s]." % str(err)) # list to cache xapian searcher objects self.xapian_searchers = [] # check if mail is possible and set flag: self.mail_enabled = (self.mail_smarthost is not None or self.mail_sendmail is not None) and self.mail_from self.mail_enabled = self.mail_enabled and True or False # check if jabber bot is available and set flag: self.jabber_enabled = self.notification_bot_uri is not None # if we are to use the jabber bot, instantiate a server object for future use if self.jabber_enabled: from xmlrpclib import Server self.notification_server = Server(self.notification_bot_uri, ) # Cache variables for the properties below self._iwid = self._iwid_full = self._meta_dict = None self.cache.acl_rights_before = AccessControlList(self, [self.acl_rights_before]) self.cache.acl_rights_default = AccessControlList(self, [self.acl_rights_default]) self.cache.acl_rights_after = AccessControlList(self, [self.acl_rights_after]) action_prefix = self.url_prefix_action if action_prefix is not None and action_prefix.endswith('/'): # make sure there is no trailing '/' self.url_prefix_action = action_prefix[:-1] if self.url_prefix_local is None: self.url_prefix_local = self.url_prefix_static if self.url_prefix_fckeditor is None: self.url_prefix_fckeditor = self.url_prefix_local + '/applets/FCKeditor' if self.secrets is None: # admin did not setup a real secret, so make up something self.secrets = self.calc_secrets() secret_key_names = ['action/cache', 'wikiutil/tickets', 'xmlrpc/ProcessMail', 'xmlrpc/RemoteScript', ] if self.jabber_enabled: secret_key_names.append('jabberbot') if self.textchas: secret_key_names.append('security/textcha') secret_min_length = 10 if isinstance(self.secrets, str): if len(self.secrets) < secret_min_length: raise error.ConfigurationError("The secrets = '...' wiki config setting is a way too short string (minimum length is %d chars)!" % ( secret_min_length)) # for lazy people: set all required secrets to same value secrets = {} for key in secret_key_names: secrets[key] = self.secrets self.secrets = secrets # we check if we have all secrets we need and that they have minimum length for secret_key_name in secret_key_names: try: secret = self.secrets[secret_key_name] if len(secret) < secret_min_length: raise ValueError except (KeyError, ValueError): raise error.ConfigurationError("You must set a (at least %d chars long) secret string for secrets['%s']!" % ( secret_min_length, secret_key_name)) if self.password_scheme not in config.password_schemes_configurable: raise error.ConfigurationError("not supported: password_scheme = %r" % self.password_scheme) if self.passlib_support: try: from passlib.context import CryptContext except ImportError, err: raise error.ConfigurationError("Wiki is configured to use passlib, but importing passlib failed [%s]!" % str(err)) try: self.cache.pwd_context = CryptContext(**self.passlib_crypt_context) except (ValueError, KeyError, TypeError, UserWarning), err: # ValueError: wrong configuration values # KeyError: unsupported hash (seen with passlib 1.3) # TypeError: configuration value has wrong type raise error.ConfigurationError("passlib_crypt_context configuration is invalid [%s]." % str(err)) elif self.password_scheme == '{PASSLIB}': raise error.ConfigurationError("passlib_support is switched off, thus you can't use password_scheme = '{PASSLIB}'.") def calc_secrets(self): """ make up some 'secret' using some config values """ varnames = ['data_dir', 'data_underlay_dir', 'language_default', 'mail_smarthost', 'mail_from', 'page_front_page', 'theme_default', 'sitename', 'logo_string', 'interwikiname', 'user_homewiki', 'acl_rights_before', ] secret = '' for varname in varnames: var = getattr(self, varname, None) if isinstance(var, (str, unicode)): secret += repr(var) return secret _meta_dict = None def load_meta_dict(self): """ The meta_dict contains meta data about the wiki instance. """ if self._meta_dict is None: self._meta_dict = wikiutil.MetaDict(os.path.join(self.data_dir, 'meta'), self.cache_dir) return self._meta_dict meta_dict = property(load_meta_dict) # lazily load iwid(_full) def make_iwid_property(attr): def getter(self): if getattr(self, attr, None) is None: self.load_IWID() return getattr(self, attr) return property(getter) iwid = make_iwid_property("_iwid") iwid_full = make_iwid_property("_iwid_full") # lazily create a list of event handlers _event_handlers = None def make_event_handlers_prop(): def getter(self): if self._event_handlers is None: self._event_handlers = events.get_handlers(self) return self._event_handlers def setter(self, new_handlers): self._event_handlers = new_handlers return property(getter, setter) event_handlers = make_event_handlers_prop() def load_IWID(self): """ Loads the InterWikiID of this instance. It is used to identify the instance globally. The IWID is available as cfg.iwid The full IWID containing the interwiki name is available as cfg.iwid_full This method is called by the property. """ try: iwid = self.meta_dict['IWID'] except KeyError: iwid = util.random_string(16).encode("hex") + "-" + str(int(time.time())) self.meta_dict['IWID'] = iwid self.meta_dict.sync() self._iwid = iwid if self.interwikiname is not None: self._iwid_full = packLine([iwid, self.interwikiname]) else: self._iwid_full = packLine([iwid]) def _config_check(self): """ Check namespace and warn about unknown names Warn about names which are not used by DefaultConfig, except modules, classes, _private or __magic__ names. This check is disabled by default, when enabled, it will show an error message with unknown names. """ unknown = ['"%s"' % name for name in dir(self) if not name.startswith('_') and name not in DefaultConfig.__dict__ and not isinstance(getattr(self, name), (type(sys), type(DefaultConfig)))] if unknown: msg = """ Unknown configuration options: %s. For more information, visit HelpOnConfiguration. Please check your configuration for typos before requesting support or reporting a bug. """ % ', '.join(unknown) raise error.ConfigurationError(msg) def _decode(self): """ Try to decode certain names, ignore unicode values Try to decode str using utf-8. If the decode fail, raise FatalError. Certain config variables should contain unicode values, and should be defined with u'text' syntax. Python decode these if the file have a 'coding' line. This will allow utf-8 users to use simple strings using, without using u'string'. Other users will have to use u'string' for these names, because we don't know what is the charset of the config files. """ charset = 'utf-8' message = u""" "%(name)s" configuration variable is a string, but should be unicode. Use %(name)s = u"value" syntax for unicode variables. Also check your "-*- coding -*-" line at the top of your configuration file. It should match the actual charset of the configuration file. """ decode_names = ( 'sitename', 'interwikiname', 'user_homewiki', 'logo_string', 'navi_bar', 'page_front_page', 'page_category_regex', 'page_dict_regex', 'page_group_regex', 'page_template_regex', 'page_license_page', 'page_local_spelling_words', 'acl_rights_default', 'acl_rights_before', 'acl_rights_after', 'mail_from', 'quicklinks_default', 'subscribed_pages_default', ) for name in decode_names: attr = getattr(self, name, None) if attr: # Try to decode strings if isinstance(attr, str): try: setattr(self, name, unicode(attr, charset)) except UnicodeError: raise error.ConfigurationError(message % {'name': name}) # Look into lists and try to decode strings inside them elif isinstance(attr, list): for i in xrange(len(attr)): item = attr[i] if isinstance(item, str): try: attr[i] = unicode(item, charset) except UnicodeError: raise error.ConfigurationError(message % {'name': name}) def _check_directories(self): """ Make sure directories are accessible Both data and underlay should exists and allow read, write and execute. """ mode = os.F_OK | os.R_OK | os.W_OK | os.X_OK for attr in ('data_dir', 'data_underlay_dir'): path = getattr(self, attr) # allow an empty underlay path or None if attr == 'data_underlay_dir' and not path: continue path_pages = os.path.join(path, "pages") if not (os.path.isdir(path_pages) and os.access(path_pages, mode)): msg = """ %(attr)s "%(path)s" does not exist, or has incorrect ownership or permissions. Make sure the directory and the subdirectory "pages" are owned by the web server and are readable, writable and executable by the web server user and group. It is recommended to use absolute paths and not relative paths. Check also the spelling of the directory name. """ % {'attr': attr, 'path': path, } raise error.ConfigurationError(msg) def _loadPluginModule(self): """ import all plugin modules To be able to import plugin from arbitrary path, we have to load the base package once using imp.load_module. Later, we can use standard __import__ call to load plugins in this package. Since each configured plugin path has unique plugins, we load the plugin packages as "moin_plugin_<sha1(path)>.plugin". """ import imp plugin_dirs = [self.plugin_dir] + self.plugin_dirs self._plugin_modules = [] try: # Lock other threads while we check and import imp.acquire_lock() try: for pdir in plugin_dirs: csum = 'p_%s' % hashlib.new('sha1', pdir).hexdigest() modname = '%s.%s' % (self.siteid, csum) # If the module is not loaded, try to load it if not modname in sys.modules: # Find module on disk and try to load - slow! abspath = os.path.abspath(pdir) parent_dir, pname = os.path.split(abspath) fp, path, info = imp.find_module(pname, [parent_dir]) try: # Load the module and set in sys.modules module = imp.load_module(modname, fp, path, info) setattr(sys.modules[self.siteid], 'csum', module) finally: # Make sure fp is closed properly if fp: fp.close() if modname not in self._plugin_modules: self._plugin_modules.append(modname) finally: imp.release_lock() except ImportError, err: msg = """ Could not import plugin package "%(path)s" because of ImportError: %(err)s. Make sure your data directory path is correct, check permissions, and that the data/plugin directory has an __init__.py file. """ % { 'path': pdir, 'err': str(err), } raise error.ConfigurationError(msg) def _fillDicts(self): """ fill config dicts Fills in missing dict keys of derived user config by copying them from this base class. """ # user checkbox defaults for key, value in DefaultConfig.user_checkbox_defaults.items(): if key not in self.user_checkbox_defaults: self.user_checkbox_defaults[key] = value def __getitem__(self, item): """ Make it possible to access a config object like a dict """ return getattr(self, item) class DefaultConfig(ConfigFunctionality): """ Configuration base class with default config values (added below) """ # Do not add anything into this class. Functionality must # be added above to avoid having the methods show up in # the WikiConfig macro. Settings must be added below to # the options dictionary. _default_backlink_method = lambda cfg, req: 'backlink' if req.user.valid else 'pagelink' def _default_password_checker(cfg, request, username, password, min_length=6, min_different=4): """ Check if a password is secure enough. We use a built-in check to get rid of the worst passwords. We do NOT use cracklib / python-crack here any more because it is not thread-safe (we experienced segmentation faults when using it). If you don't want to check passwords, use password_checker = None. @return: None if there is no problem with the password, some unicode object with an error msg, if the password is problematic. """ _ = request.getText # in any case, do a very simple built-in check to avoid the worst passwords if len(password) < min_length: return _("Password is too short.") if len(set(password)) < min_different: return _("Password has not enough different characters.") username_lower = username.lower() password_lower = password.lower() if username in password or password in username or \ username_lower in password_lower or password_lower in username_lower: return _("Password is too easy (password contains name or name contains password).") keyboards = (ur"`1234567890-=qwertyuiop[]\asdfghjkl;'zxcvbnm,./", # US kbd ur"^1234567890ߴqwertzuiop�+asdfghjkl��#yxcvbnm,.-", # german kbd ) # add more keyboards! for kbd in keyboards: rev_kbd = kbd[::-1] if password in kbd or password in rev_kbd or \ password_lower in kbd or password_lower in rev_kbd: return _("Password is too easy (keyboard sequence).") return None class DefaultExpression(object): def __init__(self, exprstr): self.text = exprstr self.value = eval(exprstr) # # Options that are not prefixed automatically with their # group name, see below (at the options dict) for more # information on the layout of this structure. # options_no_group_name = { # ========================================================================= 'attachment_extension': ("Mapping of attachment extensions to actions", None, ( ('extensions_mapping', {'.tdraw': {'modify': 'twikidraw'}, '.adraw': {'modify': 'anywikidraw'}, }, "file extension -> do -> action"), )), # ========================================================================== 'datastruct': ('Datastruct settings', None, ( ('dicts', lambda cfg, request: datastruct.WikiDicts(request), "function f(cfg, request) that returns a backend which is used to access dicts definitions."), ('groups', lambda cfg, request: datastruct.WikiGroups(request), "function f(cfg, request) that returns a backend which is used to access groups definitions."), )), # ========================================================================== 'session': ('Session settings', "Session-related settings, see HelpOnSessions.", ( ('session_service', DefaultExpression('web.session.FileSessionService()'), "The session service."), ('cookie_name', None, 'The variable part of the session cookie name. (None = determine from URL, siteidmagic = use siteid, any other string = use that)'), ('cookie_secure', None, 'Use secure cookie. (None = auto-enable secure cookie for https, True = ever use secure cookie, False = never use secure cookie).'), ('cookie_httponly', False, 'Use a httponly cookie that can only be used by the server, not by clientside scripts.'), ('cookie_domain', None, 'Domain used in the session cookie. (None = do not specify domain).'), ('cookie_path', None, 'Path used in the session cookie (None = auto-detect). Please only set if you know exactly what you are doing.'), ('cookie_lifetime', (0, 12), 'Session lifetime [h] of (anonymous, logged-in) users (see HelpOnSessions for details).'), )), # ========================================================================== 'auth': ('Authentication / Authorization / Security settings', None, ( ('superuser', [], "List of trusted user names with wiki system administration super powers (not to be confused with ACL admin rights!). Used for e.g. software installation, language installation via SystemPagesSetup and more. See also HelpOnSuperUser."), ('auth', DefaultExpression('[MoinAuth()]'), "list of auth objects, to be called in this order (see HelpOnAuthentication)"), ('auth_methods_trusted', ['http', 'given', 'xmlrpc_applytoken'], # Note: 'http' auth method is currently just a redirect to 'given' 'authentication methods for which users should be included in the special "Trusted" ACL group.'), ('secrets', None, """Either a long shared secret string used for multiple purposes or a dict {"purpose": "longsecretstring", ...} for setting up different shared secrets for different purposes. If you don't setup own secret(s), a secret string will be auto-generated from other config settings."""), ('DesktopEdition', False, "if True, give all local users special powers - ''only use this for a local desktop wiki!''"), ('SecurityPolicy', None, "Class object hook for implementing security restrictions or relaxations"), ('actions_superuser', ['newaccount', # spam bots create tons of user accounts, so better allow it only for superuser ], "Restrict actions to superuser only (list of strings)"), ('actions_excluded', ['xmlrpc', # we do not want wiki admins unknowingly offering xmlrpc service 'MyPages', # only works when used with a non-default SecurityPolicy (e.g. autoadmin) 'CopyPage', # has questionable behaviour regarding subpages a user can't read, but can copy ], "Exclude unwanted actions (list of strings)"), ('allow_xslt', False, "if True, enables XSLT processing via 4Suite (Note that this is DANGEROUS. It enables anyone who can edit the wiki to get '''read/write access to your filesystem as the moin process uid/gid''' and to insert '''arbitrary HTML''' into your wiki pages, which is why this setting defaults to `False` (XSLT disabled). Do not set it to other values, except if you know what you do and if you have very trusted editors only)."), ('password_checker', DefaultExpression('_default_password_checker'), 'checks whether a password is acceptable (default check is length >= 6, at least 4 different chars, no keyboard sequence, not username used somehow (you can switch this off by using `None`)'), ('password_scheme', '{PASSLIB}', 'Either "{PASSLIB}" (default) to use passlib for creating and upgrading password hashes (see also passlib_crypt_context for passlib configuration), ' 'or "{SSHA}" (or any other of the builtin password schemes) to not use passlib (not recommended).'), ('passlib_support', True, 'If True (default), import passlib and support password hashes offered by it.'), ('passlib_crypt_context', dict( # schemes we want to support (or deprecated schemes for which we still have # hashes in our storage). # note: bcrypt: we did not include it as it needs additional code (that is not pure python # and thus either needs compiling or installing platform-specific binaries) and # also there was some bcrypt issue in passlib < 1.5.3. # pbkdf2_sha512: not included as it needs at least passlib 1.4.0 # sha512_crypt: supported since passlib 1.3.0 (first public release) schemes=["sha512_crypt", ], # default scheme for creating new pw hashes (if not given, passlib uses first from schemes) #default="sha512_crypt", # deprecated schemes get auto-upgraded to the default scheme at login # time or when setting a password (including doing a moin account pwreset). # for passlib >= 1.6, giving ["auto"] means that all schemes except the default are deprecated: #deprecated=["auto"], # to support also older passlib versions, rather give a explicit list: #deprecated=[], # vary rounds parameter randomly when creating new hashes... #all__vary_rounds=0.1, ), "passlib CryptContext arguments, see passlib docs"), ('recovery_token_lifetime', 12, 'how long the password recovery token is valid [h]'), )), # ========================================================================== 'spam_leech_dos': ('Anti-Spam/Leech/DOS', 'These settings help limiting ressource usage and avoiding abuse.', ( ('hosts_deny', [], "List of denied IPs; if an IP ends with a dot, it denies a whole subnet (class A, B or C)"), ('surge_action_limits', {# allow max. <count> <action> requests per <dt> secs # action: (count, dt) 'all': (30, 30), # all requests (except cache/AttachFile action) count for this limit 'default': (30, 60), # default limit for actions without a specific limit 'show': (30, 60), 'recall': (10, 120), 'raw': (20, 40), # some people use this for css 'diff': (30, 60), 'fullsearch': (10, 120), 'edit': (30, 300), # can be lowered after making preview different from edit 'rss_rc': (1, 60), # The following actions are often used for images - to avoid pages with lots of images # (like photo galleries) triggering surge protection, we assign rather high limits: 'AttachFile': (300, 30), 'cache': (600, 30), # cache action is very cheap/efficient # special stuff to prevent someone trying lots of usernames / passwords to log in. # we keep this commented / disabled so that this feature does not get activated by default # (if somebody does not override surge_action_limits with own values): #'auth-ip': (10, 3600), # same remote ip (any name) #'auth-name': (10, 3600), # same name (any remote ip) }, "Surge protection tries to deny clients causing too much load/traffic, see HelpOnConfiguration/SurgeProtection."), ('surge_lockout_time', 3600, "time [s] someone gets locked out when ignoring the warnings"), ('textchas', None, "Spam protection setup using site-specific questions/answers, see HelpOnSpam."), ('textchas_disabled_group', None, "Name of a group of trusted users who do not get asked !TextCha questions."), ('textchas_expiry_time', 600, "Time [s] for a !TextCha to expire."), ('antispam_master_url', "http://master.moinmo.in/?action=xmlrpc2", "where antispam security policy fetches spam pattern updates (if it is enabled)"), # a regex of HTTP_USER_AGENTS that should be excluded from logging # and receive a FORBIDDEN for anything except viewing a page # list must not contain 'java' because of twikidraw wanting to save drawing uses this useragent ('ua_spiders', ('archiver|bingbot|cfetch|charlotte|crawler|gigabot|googlebot|heritrix|holmes|htdig|httrack|httpunit|' 'intelix|jeeves|larbin|leech|libwww-perl|linkbot|linkmap|linkwalk|litefinder|mercator|' 'microsoft.url.control|mirror| mj12bot|msnbot|msrbot|neomo|nutbot|omniexplorer|puf|robot|scooter|seekbot|' 'sherlock|slurp|sitecheck|snoopy|spider|teleport|twiceler|voilabot|voyager|webreaper|wget|yeti'), "A regex of HTTP_USER_AGENTs that should be excluded from logging and are not allowed to use actions."), ('unzip_single_file_size', 2.0 * 1000 ** 2, "max. size of a single file in the archive which will be extracted [bytes]"), ('unzip_attachments_space', 200.0 * 1000 ** 2, "max. total amount of bytes can be used to unzip files [bytes]"), ('unzip_attachments_count', 101, "max. number of files which are extracted from the zip file"), )), # ========================================================================== 'style': ('Style / Theme / UI related', 'These settings control how the wiki user interface will look like.', ( ('sitename', u'Untitled Wiki', "Short description of your wiki site, displayed below the logo on each page, and used in RSS documents as the channel title [Unicode]"), ('interwikiname', None, "unique and stable InterWiki name (prefix, moniker) of the site [Unicode], or None"), ('logo_string', None, "The wiki logo top of page, HTML is allowed (`<img>` is possible as well) [Unicode]"), ('html_pagetitle', None, "Allows you to set a specific HTML page title (if None, it defaults to the value of `sitename`)"), ('navi_bar', [u'RecentChanges', u'FindPage', u'HelpContents', ], 'Most important page names. Users can add more names in their quick links in user preferences. To link to URL, use `u"[[url|link title]]"`, to use a shortened name for long page name, use `u"[[LongLongPageName|title]]"`. [list of Unicode strings]'), ('theme_default', 'modernized', "the name of the theme that is used by default (see HelpOnThemes)"), ('theme_force', False, "if True, do not allow to change the theme"), ('stylesheets', [], "List of tuples (media, csshref) to insert after theme css, before user css, see HelpOnThemes."), ('supplementation_page', False, "if True, show a link to the supplementation page in the theme"), ('supplementation_page_name', u'Discussion', "default name of the supplementation (sub)page [unicode]"), ('supplementation_page_template', u'DiscussionTemplate', "default template used for creation of the supplementation page [unicode]"), ('interwiki_preferred', [], "In dialogues, show those wikis at the top of the list."), ('sistersites', [], "list of tuples `('WikiName', 'sisterpagelist_fetch_url')`"), ('trail_size', 5, "Number of pages in the trail of visited pages"), ('page_footer1', '', "Custom HTML markup sent ''before'' the system footer."), ('page_footer2', '', "Custom HTML markup sent ''after'' the system footer."), ('page_header1', '', "Custom HTML markup sent ''before'' the system header / title area but after the body tag."), ('page_header2', '', "Custom HTML markup sent ''after'' the system header / title area (and body tag)."), ('changed_time_fmt', '%H:%M', "Time format used on Recent``Changes for page edits within the last 24 hours"), ('date_fmt', '%Y-%m-%d', "System date format, used mostly in Recent``Changes"), ('datetime_fmt', '%Y-%m-%d %H:%M:%S', 'Default format for dates and times (when the user has no preferences or chose the "default" date format)'), ('chart_options', None, "If you have gdchart, use something like chart_options = {'width': 720, 'height': 540}"), ('edit_bar', ['Edit', 'Comments', 'Discussion', 'Info', 'Subscribe', 'Quicklink', 'Attachments', 'ActionsMenu'], 'list of edit bar entries'), ('history_count', (100, 200, 5, 10, 25, 50), "Number of revisions shown for info/history action (default_count_shown, max_count_shown, [other values shown as page size choices]). At least first two values (default and maximum) should be provided. If additional values are provided, user will be able to change number of items per page in the UI."), ('history_paging', True, "Enable paging functionality for info action's history display."), ('show_hosts', True, "if True, show host names and IPs. Set to False to hide them."), ('show_interwiki', False, "if True, let the theme display your interwiki name"), ('show_names', True, "if True, show user names in the revision history and on Recent``Changes. Set to False to hide them."), ('show_section_numbers', False, 'show section numbers in headings by default'), ('show_timings', False, "show some timing values at bottom of a page"), ('show_version', False, "show moin's version at the bottom of a page"), ('show_rename_redirect', False, "if True, offer creation of redirect pages when renaming wiki pages"), ('backlink_method', DefaultExpression('_default_backlink_method'), "function determining how the (last part of the) pagename should be rendered in the title area"), ('packagepages_actions_excluded', ['setthemename', # related to questionable theme stuff, see below 'copythemefile', # maybe does not work, e.g. if no fs write permissions or real theme file path is unknown to moin 'installplugin', # code installation, potentially dangerous 'renamepage', # dangerous with hierarchical acls 'deletepage', # dangerous with hierarchical acls 'delattachment', # dangerous, no revisioning ], 'list with excluded package actions (e.g. because they are dangerous / questionable)'), ('page_credits', [ '<a href="http://moinmo.in/" title="This site uses the MoinMoin Wiki software.">MoinMoin Powered</a>', '<a href="http://moinmo.in/Python" title="MoinMoin is written in Python.">Python Powered</a>', '<a href="http://moinmo.in/GPL" title="MoinMoin is GPL licensed.">GPL licensed</a>', '<a href="http://validator.w3.org/check?uri=referer" title="Click here to validate this page.">Valid HTML 4.01</a>', ], 'list with html fragments with logos or strings for crediting.'), # These icons will show in this order in the iconbar, unless they # are not relevant, e.g email icon when the wiki is not configured # for email. ('page_iconbar', ["up", "edit", "view", "diff", "info", "subscribe", "raw", "print", ], 'list of icons to show in iconbar, valid values are only those in page_icons_table. Available only in classic theme.'), # Standard buttons in the iconbar ('page_icons_table', { # key pagekey, querystr dict, title, icon-key 'diff': ('page', {'action': 'diff'}, _("Diffs"), "diff"), 'info': ('page', {'action': 'info'}, _("Info"), "info"), 'edit': ('page', {'action': 'edit'}, _("Edit"), "edit"), 'unsubscribe': ('page', {'action': 'unsubscribe'}, _("UnSubscribe"), "unsubscribe"), 'subscribe': ('page', {'action': 'subscribe'}, _("Subscribe"), "subscribe"), 'raw': ('page', {'action': 'raw'}, _("Raw"), "raw"), 'xml': ('page', {'action': 'show', 'mimetype': 'text/xml'}, _("XML"), "xml"), 'print': ('page', {'action': 'print'}, _("Print"), "print"), 'view': ('page', {}, _("View"), "view"), 'up': ('page_parent_page', {}, _("Up"), "up"), }, "dict of {'iconname': (url, title, icon-img-key), ...}. Available only in classic theme."), ('show_highlight_msg', False, "Show message that page has highlighted text " "and provide link to non-highlighted " "version."), )), # ========================================================================== 'editor': ('Editor related', None, ( ('editor_default', 'text', "Editor to use by default, 'text' or 'gui'"), ('editor_force', True, "if True, force using the default editor"), ('editor_ui', 'theonepreferred', "Editor choice shown on the user interface, 'freechoice' or 'theonepreferred'"), ('page_license_enabled', False, 'if True, show a license hint in page editor.'), ('page_license_page', u'WikiLicense', 'Page linked from the license hint. [Unicode]'), ('edit_locking', 'warn 10', "Editor locking policy: `None`, `'warn <timeout in minutes>'`, or `'lock <timeout in minutes>'`"), ('edit_ticketing', True, None), ('edit_rows', 20, "Default height of the edit box"), ('comment_required', False, "if True, only allow saving if a comment is filled in"), )), # ========================================================================== 'paths': ('Paths', None, ( ('data_dir', './data/', "Path to the data directory containing your (locally made) wiki pages."), ('data_underlay_dir', './underlay/', "Path to the underlay directory containing distribution system and help pages."), ('cache_dir', None, "Directory for caching, by default computed from `data_dir`/cache."), ('session_dir', None, "Directory for session storage, by default computed to be `cache_dir`/__session__."), ('user_dir', None, "Directory for user storage, by default computed to be `data_dir`/user."), ('plugin_dir', None, "Plugin directory, by default computed to be `data_dir`/plugin."), ('plugin_dirs', [], "Additional plugin directories."), ('docbook_html_dir', r"/usr/share/xml/docbook/stylesheet/nwalsh/html/", 'Path to the directory with the Docbook to HTML XSLT files (optional, used by the docbook parser). The default value is correct for Debian Etch.'), ('shared_intermap', None, "Path to a file containing global InterWiki definitions (or a list of such filenames)"), )), # ========================================================================== 'urls': ('URLs', None, ( # includes the moin version number, so we can have a unlimited cache lifetime # for the static stuff. if stuff changes on version upgrade, url will change # immediately and we have no problem with stale caches. ('url_prefix_static', config.url_prefix_static, "used as the base URL for icons, css, etc. - includes the moin version number and changes on every release. This replaces the deprecated and sometimes confusing `url_prefix = '/wiki'` setting."), ('url_prefix_local', None, "used as the base URL for some Javascript - set this to a URL on same server as the wiki if your url_prefix_static points to a different server."), ('url_prefix_fckeditor', None, "used as the base URL for FCKeditor - similar to url_prefix_local, but just for FCKeditor."), ('url_prefix_action', None, "Use 'action' to enable action URL generation to be compatible with robots.txt. It will generate .../action/info/PageName?action=info then. Recommended for internet wikis."), ('notification_bot_uri', None, "URI of the Jabber notification bot."), ('url_mappings', {}, "lookup table to remap URL prefixes (dict of {{{'prefix': 'replacement'}}}); especially useful in intranets, when whole trees of externally hosted documents move around"), )), # ========================================================================== 'pages': ('Special page names', None, ( ('page_front_page', u'LanguageSetup', "Name of the front page. We don't expect you to keep the default. Just read LanguageSetup in case you're wondering... [Unicode]"), # the following regexes should match the complete name when used in free text # the group 'all' shall match all, while the group 'key' shall match the key only # e.g. CategoryFoo -> group 'all' == CategoryFoo, group 'key' == Foo # moin's code will add ^ / $ at beginning / end when needed ('page_category_regex', ur'(?P<all>Category(?P<key>(?!Template)\S+))', 'Pagenames exactly matching this regex are regarded as Wiki categories [Unicode]'), ('page_dict_regex', ur'(?P<all>(?P<key>\S+)Dict)', 'Pagenames exactly matching this regex are regarded as pages containing variable dictionary definitions [Unicode]'), ('page_group_regex', ur'(?P<all>(?P<key>\S+)Group)', 'Pagenames exactly matching this regex are regarded as pages containing group definitions [Unicode]'), ('page_template_regex', ur'(?P<all>(?P<key>\S+)Template)', 'Pagenames exactly matching this regex are regarded as pages containing templates for new pages [Unicode]'), ('page_local_spelling_words', u'LocalSpellingWords', 'Name of the page containing user-provided spellchecker words [Unicode]'), )), # ========================================================================== 'user': ('User Preferences related', None, ( ('quicklinks_default', [], 'List of preset quicklinks for a newly created user accounts. Existing accounts are not affected by this option whereas changes in navi_bar do always affect existing accounts. Preset quicklinks can be removed by the user in the user preferences menu, navi_bar settings not.'), ('subscribed_pages_default', [], "List of pagenames used for presetting page subscriptions for newly created user accounts."), ('email_subscribed_events_default', [ PageChangedEvent.__name__, PageRenamedEvent.__name__, PageDeletedEvent.__name__, PageCopiedEvent.__name__, PageRevertedEvent.__name__, FileAttachedEvent.__name__, ], None), ('jabber_subscribed_events_default', [], None), ('tz_offset', 0.0, "default time zone offset in hours from UTC"), ('userprefs_disabled', [], "Disable the listed user preferences plugins."), )), # ========================================================================== 'various': ('Various', None, ( ('bang_meta', True, 'if True, enable {{{!NoWikiName}}} markup'), ('caching_formats', ['text_html'], "output formats that are cached; set to [] to turn off caching (useful for development)"), ('config_check_enabled', False, "if True, check configuration for unknown settings."), ('default_markup', 'wiki', 'Default page parser / format (name of module in `MoinMoin.parser`)'), ('html_head', '', "Additional <HEAD> tags, see HelpOnThemes."), ('html_head_queries', '<meta name="robots" content="noindex,nofollow">\n', "Additional <HEAD> tags for requests with query strings, like actions."), ('html_head_posts', '<meta name="robots" content="noindex,nofollow">\n', "Additional <HEAD> tags for POST requests."), ('html_head_index', '<meta name="robots" content="index,follow">\n', "Additional <HEAD> tags for some few index pages."), ('html_head_normal', '<meta name="robots" content="index,nofollow">\n', "Additional <HEAD> tags for most normal pages."), ('language_default', 'en', "Default language for user interface and page content, see HelpOnLanguages."), ('language_ignore_browser', False, "if True, ignore user's browser language settings, see HelpOnLanguages."), ('log_remote_addr', True, "if True, log the remote IP address (and maybe hostname)."), ('log_reverse_dns_lookups', False, "if True, do a reverse DNS lookup on page SAVE."), ('log_timing', False, "if True, add timing infos to the log output to analyse load conditions"), ('log_events_format', 1, "0 = no events logging, 1 = standard format (like <= 1.9.7) [default], 2 = extended format"), # some dangerous mimetypes (we don't use "content-disposition: inline" for them when a user # downloads such attachments, because the browser might execute e.g. Javascript contained # in the HTML and steal your moin session cookie or do other nasty stuff) ('mimetypes_xss_protect', [ 'text/html', 'image/svg+xml', 'application/x-shockwave-flash', 'application/xhtml+xml', ], '"content-disposition: inline" isn\'t used for them when a user downloads such attachments'), ('mimetypes_embed', [ 'application/x-dvi', 'application/postscript', 'application/pdf', 'application/ogg', 'application/vnd.visio', 'image/x-ms-bmp', 'image/svg+xml', 'image/tiff', 'image/x-photoshop', 'audio/mpeg', 'audio/midi', 'audio/x-wav', 'video/fli', 'video/mpeg', 'video/quicktime', 'video/x-msvideo', 'chemical/x-pdb', 'x-world/x-vrml', ], 'mimetypes that can be embedded by the [[HelpOnMacros/EmbedObject|EmbedObject macro]]'), ('refresh', None, "refresh = (minimum_delay_s, targets_allowed) enables use of `#refresh 5 PageName` processing instruction, targets_allowed must be either `'internal'` or `'external'`"), ('rss_cache', 60, "suggested caching time for Recent''''''Changes RSS, in second"), ('search_results_per_page', 25, "Number of hits shown per page in the search results"), ('siteid', 'default', None), ('xmlrpc_overwrite_user', True, "Overwrite authenticated user at start of xmlrpc code"), )), } # # The 'options' dict carries default MoinMoin options. The dict is a # group name to tuple mapping. # Each group tuple consists of the following items: # group section heading, group help text, option list # # where each 'option list' is a tuple or list of option tuples # # each option tuple consists of # option name, default value, help text # # All the help texts will be displayed by the WikiConfigHelp() macro. # # Unlike the options_no_group_name dict, option names in this dict # are automatically prefixed with "group name '_'" (i.e. the name of # the group they are in and an underscore), e.g. the 'hierarchic' # below creates an option called "acl_hierarchic". # # If you need to add a complex default expression that results in an # object and should not be shown in the __repr__ form in WikiConfigHelp(), # you can use the DefaultExpression class, see 'auth' above for example. # # options = { 'acl': ('Access control lists', 'ACLs control who may do what, see HelpOnAccessControlLists.', ( ('hierarchic', False, 'True to use hierarchical ACLs'), ('rights_default', u"Trusted:read,write,delete,revert Known:read All:read", "ACL used if no ACL is specified on the page"), ('rights_before', u"", "ACL that is processed before the on-page/default ACL"), ('rights_after', u"", "ACL that is processed after the on-page/default ACL"), ('rights_valid', ['read', 'write', 'delete', 'revert', 'admin'], "Valid tokens for right sides of ACL entries."), )), 'xapian': ('Xapian search', "Configuration of the Xapian based indexed search, see HelpOnXapian.", ( ('search', False, "True to enable the fast, indexed search (based on the Xapian search library)"), ('index_dir', None, "Directory where the Xapian search index is stored (None = auto-configure wiki local storage)"), ('stemming', False, "True to enable Xapian word stemmer usage for indexing / searching."), ('index_history', False, "True to enable indexing of non-current page revisions."), )), 'user': ('Users / User settings', None, ( ('email_unique', True, "if True, check email addresses for uniqueness and don't accept duplicates."), ('jid_unique', True, "if True, check Jabber IDs for uniqueness and don't accept duplicates."), ('homewiki', u'Self', "interwiki name of the wiki where the user home pages are located [Unicode] - useful if you have ''many'' users. You could even link to nonwiki \"user pages\" if the wiki username is in the target URL."), ('checkbox_fields', [ ('mailto_author', lambda _: _('Publish my email (not my wiki homepage) in author info')), ('edit_on_doubleclick', lambda _: _('Open editor on double click')), ('remember_last_visit', lambda _: _('After login, jump to last visited page')), ('show_comments', lambda _: _('Show comment sections')), ('show_nonexist_qm', lambda _: _('Show question mark for non-existing pagelinks')), ('show_page_trail', lambda _: _('Show page trail')), ('show_toolbar', lambda _: _('Show icon toolbar')), ('show_topbottom', lambda _: _('Show top/bottom links in headings')), ('show_fancy_diff', lambda _: _('Show fancy diffs')), ('wikiname_add_spaces', lambda _: _('Add spaces to displayed wiki names')), ('remember_me', lambda _: _('Remember login information')), ('disabled', lambda _: _('Disable this account forever')), # if an account is disabled, it may be used for looking up # id -> username for page info and recent changes, but it # is not usable for the user any more: ], "Describes user preferences, see HelpOnConfiguration/UserPreferences."), ('checkbox_defaults', { 'mailto_author': 0, 'edit_on_doubleclick': 1, 'remember_last_visit': 0, 'show_comments': 0, 'show_nonexist_qm': False, 'show_page_trail': 1, 'show_toolbar': 1, 'show_topbottom': 0, 'show_fancy_diff': 1, 'wikiname_add_spaces': 0, 'remember_me': 1, }, "Defaults for user preferences, see HelpOnConfiguration/UserPreferences."), ('checkbox_disable', [], "Disable user preferences, see HelpOnConfiguration/UserPreferences."), ('checkbox_remove', [], "Remove user preferences, see HelpOnConfiguration/UserPreferences."), ('form_fields', [ ('name', _('Name'), "text", "36", _("(Use FirstnameLastname)")), ('aliasname', _('Alias-Name'), "text", "36", ''), ('email', _('Email'), "text", "36", ''), ('jid', _('Jabber ID'), "text", "36", ''), ('css_url', _('User CSS URL'), "text", "40", _('(Leave it empty for disabling user CSS)')), ('edit_rows', _('Editor size'), "text", "3", ''), ], None), ('form_defaults', {# key: default - do NOT remove keys from here! 'name': '', 'aliasname': '', 'password': '', 'password2': '', 'email': '', 'jid': '', 'css_url': '', 'edit_rows': "20", }, None), ('form_disable', [], "list of field names used to disable user preferences form fields"), ('form_remove', [], "list of field names used to remove user preferences form fields"), ('transient_fields', ['id', 'valid', 'may', 'auth_username', 'password', 'password2', 'auth_method', 'auth_attribs', ], "User object attributes that are not persisted to permanent storage (internal use)."), )), 'openidrp': ('OpenID Relying Party', 'These settings control the built-in OpenID Relying Party (client).', ( ('allowed_op', [], "List of forced providers"), )), 'openid_server': ('OpenID Server', 'These settings control the built-in OpenID Identity Provider (server).', ( ('enabled', False, "True to enable the built-in OpenID server."), ('restricted_users_group', None, "If set to a group name, the group members are allowed to use the wiki as an OpenID provider. (None = allow for all users)"), ('enable_user', False, "If True, the OpenIDUser processing instruction is allowed."), )), 'mail': ('Mail settings', 'These settings control outgoing and incoming email from and to the wiki.', ( ('from', None, "Used as From: address for generated mail."), ('login', None, "'username userpass' for SMTP server authentication (None = don't use auth)."), ('smarthost', None, "Address of SMTP server to use for sending mail (None = don't use SMTP server)."), ('sendmail', None, "sendmail command to use for sending mail (None = don't use sendmail)"), ('import_subpage_template', u"$from-$date-$subject", "Create subpages using this template when importing mail."), ('import_pagename_search', ['subject', 'to', ], "Where to look for target pagename specification."), ('import_pagename_envelope', u"%s", "Use this to add some fixed prefix/postfix to the generated target pagename."), ('import_pagename_regex', r'\[\[([^\]]*)\]\]', "Regular expression used to search for target pagename specification."), ('import_wiki_addrs', [], "Target mail addresses to consider when importing mail"), ('notify_page_text', '%(intro)s%(difflink)s\n\n%(comment)s%(diff)s', "Template for putting together the pieces for the page changed/deleted/renamed notification mail text body"), ('notify_page_changed_subject', _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s'), "Template for the page changed notification mail subject header"), ('notify_page_changed_intro', _("Dear Wiki user,\n\n" 'You have subscribed to a wiki page or wiki category on "%(sitename)s" for change notification.\n\n' 'The "%(pagename)s" page has been changed by %(editor)s:\n'), "Template for the page changed notification mail intro text"), ('notify_page_deleted_subject', _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s'), "Template for the page deleted notification mail subject header"), ('notify_page_deleted_intro', _("Dear wiki user,\n\n" 'You have subscribed to a wiki page "%(sitename)s" for change notification.\n\n' 'The page "%(pagename)s" has been deleted by %(editor)s:\n\n'), "Template for the page deleted notification mail intro text"), ('notify_page_renamed_subject', _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s'), "Template for the page renamed notification mail subject header"), ('notify_page_renamed_intro', _("Dear wiki user,\n\n" 'You have subscribed to a wiki page "%(sitename)s" for change notification.\n\n' 'The page "%(pagename)s" has been renamed from "%(oldname)s" by %(editor)s:\n'), "Template for the page renamed notification mail intro text"), ('notify_att_added_subject', _('[%(sitename)s] New attachment added to page %(pagename)s'), "Template for the attachment added notification mail subject header"), ('notify_att_added_intro', _("Dear Wiki user,\n\n" 'You have subscribed to a wiki page "%(page_name)s" for change notification. ' "An attachment has been added to that page by %(editor)s. " "Following detailed information is available:\n\n" "Attachment name: %(attach_name)s\n" "Attachment size: %(attach_size)s\n"), "Template for the attachment added notification mail intro text"), ('notify_att_removed_subject', _('[%(sitename)s] Removed attachment from page %(pagename)s'), "Template for the attachment removed notification mail subject header"), ('notify_att_removed_intro', _("Dear Wiki user,\n\n" 'You have subscribed to a wiki page "%(page_name)s" for change notification. ' "An attachment has been removed from that page by %(editor)s. " "Following detailed information is available:\n\n" "Attachment name: %(attach_name)s\n" "Attachment size: %(attach_size)s\n"), "Template for the attachment removed notification mail intro text"), ('notify_user_created_subject', _("[%(sitename)s] New user account created"), "Template for the user created notification mail subject header"), ('notify_user_created_intro', _('Dear Superuser, a new user has just been created on "%(sitename)s". Details follow:\n\n' ' User name: %(username)s\n' ' Email address: %(useremail)s'), "Template for the user created notification mail intro text"), )), 'backup': ('Backup settings', 'These settings control how the backup action works and who is allowed to use it.', ( ('compression', 'gz', 'What compression to use for the backup ("gz" or "bz2").'), ('users', [], 'List of trusted user names who are allowed to get a backup.'), ('include', [], 'List of pathes to backup.'), ('exclude', lambda self, filename: False, 'Function f(self, filename) that tells whether a file should be excluded from backup. By default, nothing is excluded.'), )), 'rss': ('RSS settings', 'These settings control RSS behaviour.', ( ('items_default', 15, "Default maximum items value for RSS feed. Can be " "changed via items URL query parameter of rss_rc " "action."), ('items_limit', 100, "Limit for item count got via RSS (i. e. user " "can't get more than items_limit items even via " "changing items URL query parameter)."), ('unique', 0, "If set to 1, for each page name only one RSS item would " "be shown. Can be changed via unique rss_rc action URL " "query parameter."), ('diffs', 0, "Add diffs in RSS item descriptions by default. Can be " "changed via diffs URL query parameter of rss_rc action."), ('ddiffs', 0, "If set to 1, links to diff view instead of page itself " "would be generated by default. Can be changed via ddiffs " "URL query parameter of rss_rc action."), ('lines_default', 20, "Default line count limit for diffs added as item " "descriptions for RSS items. Can be changed via " "lines URL query parameter of rss_rc action."), ('lines_limit', 100, "Limit for possible line count for diffs added as " "item descriptions in RSS."), ('show_attachment_entries', 0, "If set to 1, items, related to " "attachment management, would be added to " "RSS feed. Can be changed via show_att " "URL query parameter of rss_rc action."), ('page_filter_pattern', "", "Default page filter pattern for RSS feed. " "Empty pattern matches to any page. Pattern " "beginning with circumflex is interpreted as " "regular expression. Pattern ending with " "slash matches page and all its subpages. " "Otherwise pattern sets specific pagename. " "Can be changed via page URL query parameter " "of rss_rc action."), ('show_page_history_link', True, "Add link to page change history " "RSS feed in theme."), )), 'search_macro': ('Search macro settings', 'Settings related to behaviour of search macros (such as FullSearch, ' 'FullSearchCached, PageList)', ( ('parse_args', False, "Do search macro parameter parsing. In previous " "versions of MoinMoin, whole search macro " "parameter string had been interpreted as needle. " "Now, to provide ability to pass additional " "parameters, this behaviour should be changed."), ('highlight_titles', 1, "Perform title matches highlighting by default " "in search results generated by macro."), ('highlight_pages', 1, "Add highlight parameter to links in search " "results generated by search macros by default."), )), } def _add_options_to_defconfig(opts, addgroup=True): for groupname in opts: group_short, group_doc, group_opts = opts[groupname] for name, default, doc in group_opts: if addgroup: name = groupname + '_' + name if isinstance(default, DefaultExpression): default = default.value setattr(DefaultConfig, name, default) _add_options_to_defconfig(options) _add_options_to_defconfig(options_no_group_name, False) # remove the gettext pseudo function del _
./CrossVul/dataset_final_sorted/CWE-79/py/good_4208_1
crossvul-python_data_bad_1644_4
"""Tornado handlers for kernels.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import logging from tornado import gen, web from tornado.concurrent import Future from tornado.ioloop import IOLoop from IPython.utils.jsonutil import date_default from IPython.utils.py3compat import cast_unicode from IPython.html.utils import url_path_join, url_escape from ...base.handlers import IPythonHandler, json_errors from ...base.zmqhandlers import AuthenticatedZMQStreamHandler, deserialize_binary_message from IPython.core.release import kernel_protocol_version class MainKernelHandler(IPythonHandler): @web.authenticated @json_errors def get(self): km = self.kernel_manager self.finish(json.dumps(km.list_kernels())) @web.authenticated @json_errors def post(self): km = self.kernel_manager model = self.get_json_body() if model is None: model = { 'name': km.default_kernel_name } else: model.setdefault('name', km.default_kernel_name) kernel_id = km.start_kernel(kernel_name=model['name']) model = km.kernel_model(kernel_id) location = url_path_join(self.base_url, 'api', 'kernels', kernel_id) self.set_header('Location', url_escape(location)) self.set_status(201) self.finish(json.dumps(model)) class KernelHandler(IPythonHandler): SUPPORTED_METHODS = ('DELETE', 'GET') @web.authenticated @json_errors def get(self, kernel_id): km = self.kernel_manager km._check_kernel_id(kernel_id) model = km.kernel_model(kernel_id) self.finish(json.dumps(model)) @web.authenticated @json_errors def delete(self, kernel_id): km = self.kernel_manager km.shutdown_kernel(kernel_id) self.set_status(204) self.finish() class KernelActionHandler(IPythonHandler): @web.authenticated @json_errors def post(self, kernel_id, action): km = self.kernel_manager if action == 'interrupt': km.interrupt_kernel(kernel_id) self.set_status(204) if action == 'restart': km.restart_kernel(kernel_id) model = km.kernel_model(kernel_id) self.set_header('Location', '{0}api/kernels/{1}'.format(self.base_url, kernel_id)) self.write(json.dumps(model)) self.finish() class ZMQChannelsHandler(AuthenticatedZMQStreamHandler): @property def kernel_info_timeout(self): return self.settings.get('kernel_info_timeout', 10) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, getattr(self, 'kernel_id', 'uninitialized')) def create_stream(self): km = self.kernel_manager identity = self.session.bsession for channel in ('shell', 'iopub', 'stdin'): meth = getattr(km, 'connect_' + channel) self.channels[channel] = stream = meth(self.kernel_id, identity=identity) stream.channel = channel km.add_restart_callback(self.kernel_id, self.on_kernel_restarted) km.add_restart_callback(self.kernel_id, self.on_restart_failed, 'dead') def request_kernel_info(self): """send a request for kernel_info""" km = self.kernel_manager kernel = km.get_kernel(self.kernel_id) try: # check for previous request future = kernel._kernel_info_future except AttributeError: self.log.debug("Requesting kernel info from %s", self.kernel_id) # Create a kernel_info channel to query the kernel protocol version. # This channel will be closed after the kernel_info reply is received. if self.kernel_info_channel is None: self.kernel_info_channel = km.connect_shell(self.kernel_id) self.kernel_info_channel.on_recv(self._handle_kernel_info_reply) self.session.send(self.kernel_info_channel, "kernel_info_request") # store the future on the kernel, so only one request is sent kernel._kernel_info_future = self._kernel_info_future else: if not future.done(): self.log.debug("Waiting for pending kernel_info request") future.add_done_callback(lambda f: self._finish_kernel_info(f.result())) return self._kernel_info_future def _handle_kernel_info_reply(self, msg): """process the kernel_info_reply enabling msg spec adaptation, if necessary """ idents,msg = self.session.feed_identities(msg) try: msg = self.session.deserialize(msg) except: self.log.error("Bad kernel_info reply", exc_info=True) self._kernel_info_future.set_result({}) return else: info = msg['content'] self.log.debug("Received kernel info: %s", info) if msg['msg_type'] != 'kernel_info_reply' or 'protocol_version' not in info: self.log.error("Kernel info request failed, assuming current %s", info) info = {} self._finish_kernel_info(info) # close the kernel_info channel, we don't need it anymore if self.kernel_info_channel: self.kernel_info_channel.close() self.kernel_info_channel = None def _finish_kernel_info(self, info): """Finish handling kernel_info reply Set up protocol adaptation, if needed, and signal that connection can continue. """ protocol_version = info.get('protocol_version', kernel_protocol_version) if protocol_version != kernel_protocol_version: self.session.adapt_version = int(protocol_version.split('.')[0]) self.log.info("Adapting to protocol v%s for kernel %s", protocol_version, self.kernel_id) if not self._kernel_info_future.done(): self._kernel_info_future.set_result(info) def initialize(self): super(ZMQChannelsHandler, self).initialize() self.zmq_stream = None self.channels = {} self.kernel_id = None self.kernel_info_channel = None self._kernel_info_future = Future() @gen.coroutine def pre_get(self): # authenticate first super(ZMQChannelsHandler, self).pre_get() # then request kernel info, waiting up to a certain time before giving up. # We don't want to wait forever, because browsers don't take it well when # servers never respond to websocket connection requests. kernel = self.kernel_manager.get_kernel(self.kernel_id) self.session.key = kernel.session.key future = self.request_kernel_info() def give_up(): """Don't wait forever for the kernel to reply""" if future.done(): return self.log.warn("Timeout waiting for kernel_info reply from %s", self.kernel_id) future.set_result({}) loop = IOLoop.current() loop.add_timeout(loop.time() + self.kernel_info_timeout, give_up) # actually wait for it yield future @gen.coroutine def get(self, kernel_id): self.kernel_id = cast_unicode(kernel_id, 'ascii') yield super(ZMQChannelsHandler, self).get(kernel_id=kernel_id) def open(self, kernel_id): super(ZMQChannelsHandler, self).open() try: self.create_stream() except web.HTTPError as e: self.log.error("Error opening stream: %s", e) # WebSockets don't response to traditional error codes so we # close the connection. for channel, stream in self.channels.items(): if not stream.closed(): stream.close() self.close() else: for channel, stream in self.channels.items(): stream.on_recv_stream(self._on_zmq_reply) def on_message(self, msg): if not self.channels: # already closed, ignore the message self.log.debug("Received message on closed websocket %r", msg) return if isinstance(msg, bytes): msg = deserialize_binary_message(msg) else: msg = json.loads(msg) channel = msg.pop('channel', None) if channel is None: self.log.warn("No channel specified, assuming shell: %s", msg) channel = 'shell' if channel not in self.channels: self.log.warn("No such channel: %r", channel) return stream = self.channels[channel] self.session.send(stream, msg) def on_close(self): km = self.kernel_manager if self.kernel_id in km: km.remove_restart_callback( self.kernel_id, self.on_kernel_restarted, ) km.remove_restart_callback( self.kernel_id, self.on_restart_failed, 'dead', ) # This method can be called twice, once by self.kernel_died and once # from the WebSocket close event. If the WebSocket connection is # closed before the ZMQ streams are setup, they could be None. for channel, stream in self.channels.items(): if stream is not None and not stream.closed(): stream.on_recv(None) # close the socket directly, don't wait for the stream socket = stream.socket stream.close() socket.close() self.channels = {} def _send_status_message(self, status): msg = self.session.msg("status", {'execution_state': status} ) msg['channel'] = 'iopub' self.write_message(json.dumps(msg, default=date_default)) def on_kernel_restarted(self): logging.warn("kernel %s restarted", self.kernel_id) self._send_status_message('restarting') def on_restart_failed(self): logging.error("kernel %s restarted failed!", self.kernel_id) self._send_status_message('dead') #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)" _kernel_action_regex = r"(?P<action>restart|interrupt)" default_handlers = [ (r"/api/kernels", MainKernelHandler), (r"/api/kernels/%s" % _kernel_id_regex, KernelHandler), (r"/api/kernels/%s/%s" % (_kernel_id_regex, _kernel_action_regex), KernelActionHandler), (r"/api/kernels/%s/channels" % _kernel_id_regex, ZMQChannelsHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_4
crossvul-python_data_bad_1644_6
"""Tornado handlers for kernel specifications.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import glob import json import os pjoin = os.path.join from tornado import web from ...base.handlers import IPythonHandler, json_errors from ...utils import url_path_join def kernelspec_model(handler, name): """Load a KernelSpec by name and return the REST API model""" ksm = handler.kernel_spec_manager spec = ksm.get_kernel_spec(name) d = {'name': name} d['spec'] = spec.to_dict() d['resources'] = resources = {} resource_dir = spec.resource_dir for resource in ['kernel.js', 'kernel.css']: if os.path.exists(pjoin(resource_dir, resource)): resources[resource] = url_path_join( handler.base_url, 'kernelspecs', name, resource ) for logo_file in glob.glob(pjoin(resource_dir, 'logo-*')): fname = os.path.basename(logo_file) no_ext, _ = os.path.splitext(fname) resources[no_ext] = url_path_join( handler.base_url, 'kernelspecs', name, fname ) return d class MainKernelSpecHandler(IPythonHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated @json_errors def get(self): ksm = self.kernel_spec_manager km = self.kernel_manager model = {} model['default'] = km.default_kernel_name model['kernelspecs'] = specs = {} for kernel_name in ksm.find_kernel_specs(): try: d = kernelspec_model(self, kernel_name) except Exception: self.log.error("Failed to load kernel spec: '%s'", kernel_name, exc_info=True) continue specs[kernel_name] = d self.set_header("Content-Type", 'application/json') self.finish(json.dumps(model)) class KernelSpecHandler(IPythonHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated @json_errors def get(self, kernel_name): try: model = kernelspec_model(self, kernel_name) except KeyError: raise web.HTTPError(404, u'Kernel spec %s not found' % kernel_name) self.set_header("Content-Type", 'application/json') self.finish(json.dumps(model)) # URL to handler mappings kernel_name_regex = r"(?P<kernel_name>\w+)" default_handlers = [ (r"/api/kernelspecs", MainKernelSpecHandler), (r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_6
crossvul-python_data_bad_41_0
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Backend-independent qute://* code. Module attributes: pyeval_output: The output of the last :pyeval command. _HANDLERS: The handlers registered via decorators. """ import json import os import time import textwrap import mimetypes import urllib import collections import pkg_resources import sip from PyQt5.QtCore import QUrlQuery, QUrl import qutebrowser from qutebrowser.config import config, configdata, configexc, configdiff from qutebrowser.utils import (version, utils, jinja, log, message, docutils, objreg, urlutils) from qutebrowser.misc import objects pyeval_output = ":pyeval was never called" spawn_output = ":spawn was never called" _HANDLERS = {} class NoHandlerFound(Exception): """Raised when no handler was found for the given URL.""" pass class QuteSchemeOSError(Exception): """Called when there was an OSError inside a handler.""" pass class QuteSchemeError(Exception): """Exception to signal that a handler should return an ErrorReply. Attributes correspond to the arguments in networkreply.ErrorNetworkReply. Attributes: errorstring: Error string to print. error: Numerical error value. """ def __init__(self, errorstring, error): self.errorstring = errorstring self.error = error super().__init__(errorstring) class Redirect(Exception): """Exception to signal a redirect should happen. Attributes: url: The URL to redirect to, as a QUrl. """ def __init__(self, url): super().__init__(url.toDisplayString()) self.url = url class add_handler: # noqa: N801,N806 pylint: disable=invalid-name """Decorator to register a qute://* URL handler. Attributes: _name: The 'foo' part of qute://foo backend: Limit which backends the handler can run with. """ def __init__(self, name, backend=None): self._name = name self._backend = backend self._function = None def __call__(self, function): self._function = function _HANDLERS[self._name] = self.wrapper return function def wrapper(self, *args, **kwargs): """Call the underlying function.""" if self._backend is not None and objects.backend != self._backend: return self.wrong_backend_handler(*args, **kwargs) else: return self._function(*args, **kwargs) def wrong_backend_handler(self, url): """Show an error page about using the invalid backend.""" html = jinja.render('error.html', title="Error while opening qute://url", url=url.toDisplayString(), error='{} is not available with this ' 'backend'.format(url.toDisplayString())) return 'text/html', html def data_for_url(url): """Get the data to show for the given URL. Args: url: The QUrl to show. Return: A (mimetype, data) tuple. """ norm_url = url.adjusted(QUrl.NormalizePathSegments | QUrl.StripTrailingSlash) if norm_url != url: raise Redirect(norm_url) path = url.path() host = url.host() query = urlutils.query_string(url) # A url like "qute:foo" is split as "scheme:path", not "scheme:host". log.misc.debug("url: {}, path: {}, host {}".format( url.toDisplayString(), path, host)) if not path or not host: new_url = QUrl() new_url.setScheme('qute') # When path is absent, e.g. qute://help (with no trailing slash) if host: new_url.setHost(host) # When host is absent, e.g. qute:help else: new_url.setHost(path) new_url.setPath('/') if query: new_url.setQuery(query) if new_url.host(): # path was a valid host raise Redirect(new_url) try: handler = _HANDLERS[host] except KeyError: raise NoHandlerFound(url) try: mimetype, data = handler(url) except OSError as e: # FIXME:qtwebengine how to handle this? raise QuteSchemeOSError(e) except QuteSchemeError: raise assert mimetype is not None, url if mimetype == 'text/html' and isinstance(data, str): # We let handlers return HTML as text data = data.encode('utf-8', errors='xmlcharrefreplace') return mimetype, data @add_handler('bookmarks') def qute_bookmarks(_url): """Handler for qute://bookmarks. Display all quickmarks / bookmarks.""" bookmarks = sorted(objreg.get('bookmark-manager').marks.items(), key=lambda x: x[1]) # Sort by title quickmarks = sorted(objreg.get('quickmark-manager').marks.items(), key=lambda x: x[0]) # Sort by name html = jinja.render('bookmarks.html', title='Bookmarks', bookmarks=bookmarks, quickmarks=quickmarks) return 'text/html', html @add_handler('tabs') def qute_tabs(_url): """Handler for qute://tabs. Display information about all open tabs.""" tabs = collections.defaultdict(list) for win_id, window in objreg.window_registry.items(): if sip.isdeleted(window): continue tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) for tab in tabbed_browser.widgets(): if tab.url() not in [QUrl("qute://tabs/"), QUrl("qute://tabs")]: urlstr = tab.url().toDisplayString() tabs[str(win_id)].append((tab.title(), urlstr)) html = jinja.render('tabs.html', title='Tabs', tab_list_by_window=tabs) return 'text/html', html def history_data(start_time, offset=None): """Return history data. Arguments: start_time: select history starting from this timestamp. offset: number of items to skip """ # history atimes are stored as ints, ensure start_time is not a float start_time = int(start_time) hist = objreg.get('web-history') if offset is not None: entries = hist.entries_before(start_time, limit=1000, offset=offset) else: # end is 24hrs earlier than start end_time = start_time - 24*60*60 entries = hist.entries_between(end_time, start_time) return [{"url": e.url, "title": e.title or e.url, "time": e.atime} for e in entries] @add_handler('history') def qute_history(url): """Handler for qute://history. Display and serve history.""" if url.path() == '/data': try: offset = QUrlQuery(url).queryItemValue("offset") offset = int(offset) if offset else None except ValueError as e: raise QuteSchemeError("Query parameter offset is invalid", e) # Use start_time in query or current time. try: start_time = QUrlQuery(url).queryItemValue("start_time") start_time = float(start_time) if start_time else time.time() except ValueError as e: raise QuteSchemeError("Query parameter start_time is invalid", e) return 'text/html', json.dumps(history_data(start_time, offset)) else: return 'text/html', jinja.render( 'history.html', title='History', gap_interval=config.val.history_gap_interval ) @add_handler('javascript') def qute_javascript(url): """Handler for qute://javascript. Return content of file given as query parameter. """ path = url.path() if path: path = "javascript" + os.sep.join(path.split('/')) return 'text/html', utils.read_file(path, binary=False) else: raise QuteSchemeError("No file specified", ValueError()) @add_handler('pyeval') def qute_pyeval(_url): """Handler for qute://pyeval.""" html = jinja.render('pre.html', title='pyeval', content=pyeval_output) return 'text/html', html @add_handler('spawn-output') def qute_spawn_output(_url): """Handler for qute://spawn-output.""" html = jinja.render('pre.html', title='spawn output', content=spawn_output) return 'text/html', html @add_handler('version') @add_handler('verizon') def qute_version(_url): """Handler for qute://version.""" html = jinja.render('version.html', title='Version info', version=version.version(), copyright=qutebrowser.__copyright__) return 'text/html', html @add_handler('plainlog') def qute_plainlog(url): """Handler for qute://plainlog. An optional query parameter specifies the minimum log level to print. For example, qute://log?level=warning prints warnings and errors. Level can be one of: vdebug, debug, info, warning, error, critical. """ if log.ram_handler is None: text = "Log output was disabled." else: level = QUrlQuery(url).queryItemValue('level') if not level: level = 'vdebug' text = log.ram_handler.dump_log(html=False, level=level) html = jinja.render('pre.html', title='log', content=text) return 'text/html', html @add_handler('log') def qute_log(url): """Handler for qute://log. An optional query parameter specifies the minimum log level to print. For example, qute://log?level=warning prints warnings and errors. Level can be one of: vdebug, debug, info, warning, error, critical. """ if log.ram_handler is None: html_log = None else: level = QUrlQuery(url).queryItemValue('level') if not level: level = 'vdebug' html_log = log.ram_handler.dump_log(html=True, level=level) html = jinja.render('log.html', title='log', content=html_log) return 'text/html', html @add_handler('gpl') def qute_gpl(_url): """Handler for qute://gpl. Return HTML content as string.""" return 'text/html', utils.read_file('html/license.html') @add_handler('help') def qute_help(url): """Handler for qute://help.""" urlpath = url.path() if not urlpath or urlpath == '/': urlpath = 'index.html' else: urlpath = urlpath.lstrip('/') if not docutils.docs_up_to_date(urlpath): message.error("Your documentation is outdated! Please re-run " "scripts/asciidoc2html.py.") path = 'html/doc/{}'.format(urlpath) if not urlpath.endswith('.html'): try: bdata = utils.read_file(path, binary=True) except OSError as e: raise QuteSchemeOSError(e) mimetype, _encoding = mimetypes.guess_type(urlpath) assert mimetype is not None, url return mimetype, bdata try: data = utils.read_file(path) except OSError: # No .html around, let's see if we find the asciidoc asciidoc_path = path.replace('.html', '.asciidoc') if asciidoc_path.startswith('html/doc/'): asciidoc_path = asciidoc_path.replace('html/doc/', '../doc/help/') try: asciidoc = utils.read_file(asciidoc_path) except OSError: asciidoc = None if asciidoc is None: raise preamble = textwrap.dedent(""" There was an error loading the documentation! This most likely means the documentation was not generated properly. If you are running qutebrowser from the git repository, please (re)run scripts/asciidoc2html.py and reload this page. If you're running a released version this is a bug, please use :report to report it. Falling back to the plaintext version. --------------------------------------------------------------- """) return 'text/plain', (preamble + asciidoc).encode('utf-8') else: return 'text/html', data @add_handler('backend-warning') def qute_backend_warning(_url): """Handler for qute://backend-warning.""" html = jinja.render('backend-warning.html', distribution=version.distribution(), Distribution=version.Distribution, version=pkg_resources.parse_version, title="Legacy backend warning") return 'text/html', html def _qute_settings_set(url): """Handler for qute://settings/set.""" query = QUrlQuery(url) option = query.queryItemValue('option', QUrl.FullyDecoded) value = query.queryItemValue('value', QUrl.FullyDecoded) # https://github.com/qutebrowser/qutebrowser/issues/727 if option == 'content.javascript.enabled' and value == 'false': msg = ("Refusing to disable javascript via qute://settings " "as it needs javascript support.") message.error(msg) return 'text/html', b'error: ' + msg.encode('utf-8') try: config.instance.set_str(option, value, save_yaml=True) return 'text/html', b'ok' except configexc.Error as e: message.error(str(e)) return 'text/html', b'error: ' + str(e).encode('utf-8') @add_handler('settings') def qute_settings(url): """Handler for qute://settings. View/change qute configuration.""" if url.path() == '/set': return _qute_settings_set(url) html = jinja.render('settings.html', title='settings', configdata=configdata, confget=config.instance.get_str) return 'text/html', html @add_handler('bindings') def qute_bindings(_url): """Handler for qute://bindings. View keybindings.""" bindings = {} defaults = config.val.bindings.default modes = set(defaults.keys()).union(config.val.bindings.commands) modes.remove('normal') modes = ['normal'] + sorted(list(modes)) for mode in modes: bindings[mode] = config.key_instance.get_bindings_for(mode) html = jinja.render('bindings.html', title='Bindings', bindings=bindings) return 'text/html', html @add_handler('back') def qute_back(url): """Handler for qute://back. Simple page to free ram / lazy load a site, goes back on focusing the tab. """ html = jinja.render( 'back.html', title='Suspended: ' + urllib.parse.unquote(url.fragment())) return 'text/html', html @add_handler('configdiff') def qute_configdiff(url): """Handler for qute://configdiff.""" if url.path() == '/old': try: return 'text/html', configdiff.get_diff() except OSError as e: error = (b'Failed to read old config: ' + str(e.strerror).encode('utf-8')) return 'text/plain', error else: data = config.instance.dump_userconfig().encode('utf-8') return 'text/plain', data @add_handler('pastebin-version') def qute_pastebin_version(_url): """Handler that pastebins the version string.""" version.pastebin_version() return 'text/plain', b'Paste called.'
./CrossVul/dataset_final_sorted/CWE-79/py/bad_41_0
crossvul-python_data_bad_1644_9
"""Tornado handlers for the sessions web service.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json from tornado import web from ...base.handlers import IPythonHandler, json_errors from IPython.utils.jsonutil import date_default from IPython.html.utils import url_path_join, url_escape from IPython.kernel.kernelspec import NoSuchKernel class SessionRootHandler(IPythonHandler): @web.authenticated @json_errors def get(self): # Return a list of running sessions sm = self.session_manager sessions = sm.list_sessions() self.finish(json.dumps(sessions, default=date_default)) @web.authenticated @json_errors def post(self): # Creates a new session #(unless a session already exists for the named nb) sm = self.session_manager cm = self.contents_manager km = self.kernel_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, "No JSON data provided") try: path = model['notebook']['path'] except KeyError: raise web.HTTPError(400, "Missing field in JSON data: notebook.path") try: kernel_name = model['kernel']['name'] except KeyError: self.log.debug("No kernel name specified, using default kernel") kernel_name = None # Check to see if session exists if sm.session_exists(path=path): model = sm.get_session(path=path) else: try: model = sm.create_session(path=path, kernel_name=kernel_name) except NoSuchKernel: msg = ("The '%s' kernel is not available. Please pick another " "suitable kernel instead, or install that kernel." % kernel_name) status_msg = '%s not found' % kernel_name self.log.warn('Kernel not found: %s' % kernel_name) self.set_status(501) self.finish(json.dumps(dict(message=msg, short_message=status_msg))) return location = url_path_join(self.base_url, 'api', 'sessions', model['id']) self.set_header('Location', url_escape(location)) self.set_status(201) self.finish(json.dumps(model, default=date_default)) class SessionHandler(IPythonHandler): SUPPORTED_METHODS = ('GET', 'PATCH', 'DELETE') @web.authenticated @json_errors def get(self, session_id): # Returns the JSON model for a single session sm = self.session_manager model = sm.get_session(session_id=session_id) self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors def patch(self, session_id): # Currently, this handler is strictly for renaming notebooks sm = self.session_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, "No JSON data provided") changes = {} if 'notebook' in model: notebook = model['notebook'] if 'path' in notebook: changes['path'] = notebook['path'] sm.update_session(session_id, **changes) model = sm.get_session(session_id=session_id) self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors def delete(self, session_id): # Deletes the session with given session_id sm = self.session_manager try: sm.delete_session(session_id) except KeyError: # the kernel was deleted but the session wasn't! raise web.HTTPError(410, "Kernel deleted before session") self.set_status(204) self.finish() #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _session_id_regex = r"(?P<session_id>\w+-\w+-\w+-\w+-\w+)" default_handlers = [ (r"/api/sessions/%s" % _session_id_regex, SessionHandler), (r"/api/sessions", SessionRootHandler) ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_9
crossvul-python_data_good_1727_0
# coding: utf-8 """A tornado based IPython notebook server.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import base64 import datetime import errno import importlib import io import json import logging import os import random import re import select import signal import socket import ssl import sys import threading import webbrowser # check for pyzmq from IPython.utils.zmqrelated import check_for_zmq check_for_zmq('13', 'IPython.html') from jinja2 import Environment, FileSystemLoader # Install the pyzmq ioloop. This has to be done before anything else from # tornado is imported. from zmq.eventloop import ioloop ioloop.install() # check for tornado 3.1.0 msg = "The IPython Notebook requires tornado >= 4.0" try: import tornado except ImportError: raise ImportError(msg) try: version_info = tornado.version_info except AttributeError: raise ImportError(msg + ", but you have < 1.1.0") if version_info < (4,0): raise ImportError(msg + ", but you have %s" % tornado.version) from tornado import httpserver from tornado import web from tornado.log import LogFormatter, app_log, access_log, gen_log from IPython.html import ( DEFAULT_STATIC_FILES_PATH, DEFAULT_TEMPLATE_PATH_LIST, ) from .base.handlers import Template404 from .log import log_request from .services.kernels.kernelmanager import MappingKernelManager from .services.config import ConfigManager from .services.contents.manager import ContentsManager from .services.contents.filemanager import FileContentsManager from .services.clusters.clustermanager import ClusterManager from .services.sessions.sessionmanager import SessionManager from .auth.login import LoginHandler from .auth.logout import LogoutHandler from .base.handlers import IPythonHandler, FileFindHandler from IPython.config import Config from IPython.config.application import catch_config_error, boolean_flag from IPython.core.application import ( BaseIPythonApplication, base_flags, base_aliases, ) from IPython.core.profiledir import ProfileDir from IPython.kernel import KernelManager from IPython.kernel.kernelspec import KernelSpecManager from IPython.kernel.zmq.session import Session from IPython.nbformat.sign import NotebookNotary from IPython.utils.importstring import import_item from IPython.utils import submodule from IPython.utils.process import check_pid from IPython.utils.traitlets import ( Dict, Unicode, Integer, List, Bool, Bytes, Instance, TraitError, Type, ) from IPython.utils import py3compat from IPython.utils.path import filefind, get_ipython_dir from IPython.utils.sysinfo import get_sys_info from .nbextensions import SYSTEM_NBEXTENSIONS_DIRS from .utils import url_path_join #----------------------------------------------------------------------------- # Module globals #----------------------------------------------------------------------------- _examples = """ ipython notebook # start the notebook ipython notebook --profile=sympy # use the sympy profile ipython notebook --certfile=mycert.pem # use SSL/TLS certificate """ #----------------------------------------------------------------------------- # Helper functions #----------------------------------------------------------------------------- def random_ports(port, n): """Generate a list of n random ports near the given port. The first 5 ports will be sequential, and the remaining n-5 will be randomly selected in the range [port-2*n, port+2*n]. """ for i in range(min(5, n)): yield port + i for i in range(n-5): yield max(1, port + random.randint(-2*n, 2*n)) def load_handlers(name): """Load the (URL pattern, handler) tuples for each component.""" name = 'IPython.html.' + name mod = __import__(name, fromlist=['default_handlers']) return mod.default_handlers #----------------------------------------------------------------------------- # The Tornado web application #----------------------------------------------------------------------------- class NotebookWebApplication(web.Application): def __init__(self, ipython_app, kernel_manager, contents_manager, cluster_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options): settings = self.init_settings( ipython_app, kernel_manager, contents_manager, cluster_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options) handlers = self.init_handlers(settings) super(NotebookWebApplication, self).__init__(handlers, **settings) def init_settings(self, ipython_app, kernel_manager, contents_manager, cluster_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options=None): _template_path = settings_overrides.get( "template_path", ipython_app.template_file_path, ) if isinstance(_template_path, py3compat.string_types): _template_path = (_template_path,) template_path = [os.path.expanduser(path) for path in _template_path] jenv_opt = {"autoescape": True} jenv_opt.update(jinja_env_options if jinja_env_options else {}) env = Environment(loader=FileSystemLoader(template_path), **jenv_opt) sys_info = get_sys_info() if sys_info['commit_source'] == 'repository': # don't cache (rely on 304) when working from master version_hash = '' else: # reset the cache on server restart version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S") settings = dict( # basics log_function=log_request, base_url=base_url, default_url=default_url, template_path=template_path, static_path=ipython_app.static_file_path, static_handler_class = FileFindHandler, static_url_prefix = url_path_join(base_url,'/static/'), static_handler_args = { # don't cache custom.js 'no_cache_paths': [url_path_join(base_url, 'static', 'custom')], }, version_hash=version_hash, # authentication cookie_secret=ipython_app.cookie_secret, login_url=url_path_join(base_url,'/login'), login_handler_class=ipython_app.login_handler_class, logout_handler_class=ipython_app.logout_handler_class, password=ipython_app.password, # managers kernel_manager=kernel_manager, contents_manager=contents_manager, cluster_manager=cluster_manager, session_manager=session_manager, kernel_spec_manager=kernel_spec_manager, config_manager=config_manager, # IPython stuff jinja_template_vars=ipython_app.jinja_template_vars, nbextensions_path=ipython_app.nbextensions_path, websocket_url=ipython_app.websocket_url, mathjax_url=ipython_app.mathjax_url, config=ipython_app.config, jinja2_env=env, terminals_available=False, # Set later if terminals are available ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) return settings def init_handlers(self, settings): """Load the (URL pattern, handler) tuples for each component.""" # Order matters. The first handler to match the URL will handle the request. handlers = [] handlers.extend(load_handlers('tree.handlers')) handlers.extend([(r"/login", settings['login_handler_class'])]) handlers.extend([(r"/logout", settings['logout_handler_class'])]) handlers.extend(load_handlers('files.handlers')) handlers.extend(load_handlers('notebook.handlers')) handlers.extend(load_handlers('nbconvert.handlers')) handlers.extend(load_handlers('kernelspecs.handlers')) handlers.extend(load_handlers('edit.handlers')) handlers.extend(load_handlers('services.config.handlers')) handlers.extend(load_handlers('services.kernels.handlers')) handlers.extend(load_handlers('services.contents.handlers')) handlers.extend(load_handlers('services.clusters.handlers')) handlers.extend(load_handlers('services.sessions.handlers')) handlers.extend(load_handlers('services.nbconvert.handlers')) handlers.extend(load_handlers('services.kernelspecs.handlers')) handlers.extend(load_handlers('services.security.handlers')) handlers.append( (r"/nbextensions/(.*)", FileFindHandler, { 'path': settings['nbextensions_path'], 'no_cache_paths': ['/'], # don't cache anything in nbextensions }), ) # register base handlers last handlers.extend(load_handlers('base.handlers')) # set the URL that will be redirected from `/` handlers.append( (r'/?', web.RedirectHandler, { 'url' : settings['default_url'], 'permanent': False, # want 302, not 301 }) ) # prepend base_url onto the patterns that we match new_handlers = [] for handler in handlers: pattern = url_path_join(settings['base_url'], handler[0]) new_handler = tuple([pattern] + list(handler[1:])) new_handlers.append(new_handler) # add 404 on the end, which will catch everything that falls through new_handlers.append((r'(.*)', Template404)) return new_handlers class NbserverListApp(BaseIPythonApplication): description="List currently running notebook servers in this profile." flags = dict( json=({'NbserverListApp': {'json': True}}, "Produce machine-readable JSON output."), ) json = Bool(False, config=True, help="If True, each line of output will be a JSON object with the " "details from the server info file.") def start(self): if not self.json: print("Currently running servers:") for serverinfo in list_running_servers(self.profile): if self.json: print(json.dumps(serverinfo)) else: print(serverinfo['url'], "::", serverinfo['notebook_dir']) #----------------------------------------------------------------------------- # Aliases and Flags #----------------------------------------------------------------------------- flags = dict(base_flags) flags['no-browser']=( {'NotebookApp' : {'open_browser' : False}}, "Don't open the notebook in a browser after startup." ) flags['pylab']=( {'NotebookApp' : {'pylab' : 'warn'}}, "DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib." ) flags['no-mathjax']=( {'NotebookApp' : {'enable_mathjax' : False}}, """Disable MathJax MathJax is the javascript library IPython uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. """ ) # Add notebook manager flags flags.update(boolean_flag('script', 'FileContentsManager.save_script', 'DEPRECATED, IGNORED', 'DEPRECATED, IGNORED')) aliases = dict(base_aliases) aliases.update({ 'ip': 'NotebookApp.ip', 'port': 'NotebookApp.port', 'port-retries': 'NotebookApp.port_retries', 'transport': 'KernelManager.transport', 'keyfile': 'NotebookApp.keyfile', 'certfile': 'NotebookApp.certfile', 'notebook-dir': 'NotebookApp.notebook_dir', 'browser': 'NotebookApp.browser', 'pylab': 'NotebookApp.pylab', }) #----------------------------------------------------------------------------- # NotebookApp #----------------------------------------------------------------------------- class NotebookApp(BaseIPythonApplication): name = 'ipython-notebook' description = """ The IPython HTML Notebook. This launches a Tornado based HTML Notebook Server that serves up an HTML5/Javascript Notebook client. """ examples = _examples aliases = aliases flags = flags classes = [ KernelManager, ProfileDir, Session, MappingKernelManager, ContentsManager, FileContentsManager, NotebookNotary, KernelSpecManager, ] flags = Dict(flags) aliases = Dict(aliases) subcommands = dict( list=(NbserverListApp, NbserverListApp.description.splitlines()[0]), ) ipython_kernel_argv = List(Unicode) _log_formatter_cls = LogFormatter def _log_level_default(self): return logging.INFO def _log_datefmt_default(self): """Exclude date from default date format""" return "%H:%M:%S" def _log_format_default(self): """override default log format to include time""" return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" # create requested profiles by default, if they don't exist: auto_create = Bool(True) # file to be opened in the notebook server file_to_run = Unicode('', config=True) # Network related information allow_origin = Unicode('', config=True, help="""Set the Access-Control-Allow-Origin header Use '*' to allow any origin to access your server. Takes precedence over allow_origin_pat. """ ) allow_origin_pat = Unicode('', config=True, help="""Use a regular expression for the Access-Control-Allow-Origin header Requests from an origin matching the expression will get replies with: Access-Control-Allow-Origin: origin where `origin` is the origin of the request. Ignored if allow_origin is set. """ ) allow_credentials = Bool(False, config=True, help="Set the Access-Control-Allow-Credentials: true header" ) default_url = Unicode('/tree', config=True, help="The default URL to redirect to from `/`" ) ip = Unicode('localhost', config=True, help="The IP address the notebook server will listen on." ) def _ip_default(self): """Return localhost if available, 127.0.0.1 otherwise. On some (horribly broken) systems, localhost cannot be bound. """ s = socket.socket() try: s.bind(('localhost', 0)) except socket.error as e: self.log.warn("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e) return '127.0.0.1' else: s.close() return 'localhost' def _ip_changed(self, name, old, new): if new == u'*': self.ip = u'' port = Integer(8888, config=True, help="The port the notebook server will listen on." ) port_retries = Integer(50, config=True, help="The number of additional ports to try if the specified port is not available." ) certfile = Unicode(u'', config=True, help="""The full path to an SSL/TLS certificate file.""" ) keyfile = Unicode(u'', config=True, help="""The full path to a private key file for usage with SSL/TLS.""" ) cookie_secret_file = Unicode(config=True, help="""The file where the cookie secret is stored.""" ) def _cookie_secret_file_default(self): if self.profile_dir is None: return '' return os.path.join(self.profile_dir.security_dir, 'notebook_cookie_secret') cookie_secret = Bytes(b'', config=True, help="""The random bytes used to secure cookies. By default this is a new random number every time you start the Notebook. Set it to a value in a config file to enable logins to persist across server sessions. Note: Cookie secrets should be kept private, do not share config files with cookie_secret stored in plaintext (you can read the value from a file). """ ) def _cookie_secret_default(self): if os.path.exists(self.cookie_secret_file): with io.open(self.cookie_secret_file, 'rb') as f: return f.read() else: secret = base64.encodestring(os.urandom(1024)) self._write_cookie_secret_file(secret) return secret def _write_cookie_secret_file(self, secret): """write my secret to my secret_file""" self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file) with io.open(self.cookie_secret_file, 'wb') as f: f.write(secret) try: os.chmod(self.cookie_secret_file, 0o600) except OSError: self.log.warn( "Could not set permissions on %s", self.cookie_secret_file ) password = Unicode(u'', config=True, help="""Hashed password to use for web authentication. To generate, type in a python/IPython shell: from IPython.lib import passwd; passwd() The string should be of the form type:salt:hashed-password. """ ) open_browser = Bool(True, config=True, help="""Whether to open in a browser after starting. The specific browser used is platform dependent and determined by the python standard library `webbrowser` module, unless it is overridden using the --browser (NotebookApp.browser) configuration option. """) browser = Unicode(u'', config=True, help="""Specify what command to use to invoke a web browser when opening the notebook. If not specified, the default browser will be determined by the `webbrowser` standard library module, which allows setting of the BROWSER environment variable to override it. """) webapp_settings = Dict(config=True, help="DEPRECATED, use tornado_settings" ) def _webapp_settings_changed(self, name, old, new): self.log.warn("\n webapp_settings is deprecated, use tornado_settings.\n") self.tornado_settings = new tornado_settings = Dict(config=True, help="Supply overrides for the tornado.web.Application that the " "IPython notebook uses.") ssl_options = Dict(config=True, help="""Supply SSL options for the tornado HTTPServer. See the tornado docs for details.""") jinja_environment_options = Dict(config=True, help="Supply extra arguments that will be passed to Jinja environment.") jinja_template_vars = Dict( config=True, help="Extra variables to supply to jinja templates when rendering.", ) enable_mathjax = Bool(True, config=True, help="""Whether to enable MathJax for typesetting math/TeX MathJax is the javascript library IPython uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. """ ) def _enable_mathjax_changed(self, name, old, new): """set mathjax url to empty if mathjax is disabled""" if not new: self.mathjax_url = u'' base_url = Unicode('/', config=True, help='''The base URL for the notebook server. Leading and trailing slashes can be omitted, and will automatically be added. ''') def _base_url_changed(self, name, old, new): if not new.startswith('/'): self.base_url = '/'+new elif not new.endswith('/'): self.base_url = new+'/' base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""") def _base_project_url_changed(self, name, old, new): self.log.warn("base_project_url is deprecated, use base_url") self.base_url = new extra_static_paths = List(Unicode, config=True, help="""Extra paths to search for serving static files. This allows adding javascript/css to be available from the notebook server machine, or overriding individual files in the IPython""" ) def _extra_static_paths_default(self): return [os.path.join(self.profile_dir.location, 'static')] @property def static_file_path(self): """return extra paths + the default location""" return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH] extra_template_paths = List(Unicode, config=True, help="""Extra paths to search for serving jinja templates. Can be used to override templates from IPython.html.templates.""" ) def _extra_template_paths_default(self): return [] @property def template_file_path(self): """return extra paths + the default locations""" return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST extra_nbextensions_path = List(Unicode, config=True, help="""extra paths to look for Javascript notebook extensions""" ) @property def nbextensions_path(self): """The path to look for Javascript notebook extensions""" return self.extra_nbextensions_path + [os.path.join(get_ipython_dir(), 'nbextensions')] + SYSTEM_NBEXTENSIONS_DIRS websocket_url = Unicode("", config=True, help="""The base URL for websockets, if it differs from the HTTP server (hint: it almost certainly doesn't). Should be in the form of an HTTP origin: ws[s]://hostname[:port] """ ) mathjax_url = Unicode("", config=True, help="""The url for MathJax.js.""" ) def _mathjax_url_default(self): if not self.enable_mathjax: return u'' static_url_prefix = self.tornado_settings.get("static_url_prefix", url_path_join(self.base_url, "static") ) # try local mathjax, either in nbextensions/mathjax or static/mathjax for (url_prefix, search_path) in [ (url_path_join(self.base_url, "nbextensions"), self.nbextensions_path), (static_url_prefix, self.static_file_path), ]: self.log.debug("searching for local mathjax in %s", search_path) try: mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), search_path) except IOError: continue else: url = url_path_join(url_prefix, u"mathjax/MathJax.js") self.log.info("Serving local MathJax from %s at %s", mathjax, url) return url # no local mathjax, serve from CDN url = u"https://cdn.mathjax.org/mathjax/latest/MathJax.js" self.log.info("Using MathJax from CDN: %s", url) return url def _mathjax_url_changed(self, name, old, new): if new and not self.enable_mathjax: # enable_mathjax=False overrides mathjax_url self.mathjax_url = u'' else: self.log.info("Using MathJax: %s", new) contents_manager_class = Type( default_value=FileContentsManager, klass=ContentsManager, config=True, help='The notebook manager class to use.' ) kernel_manager_class = Type( default_value=MappingKernelManager, config=True, help='The kernel manager class to use.' ) session_manager_class = Type( default_value=SessionManager, config=True, help='The session manager class to use.' ) cluster_manager_class = Type( default_value=ClusterManager, config=True, help='The cluster manager class to use.' ) config_manager_class = Type( default_value=ConfigManager, config = True, help='The config manager class to use' ) kernel_spec_manager = Instance(KernelSpecManager) kernel_spec_manager_class = Type( default_value=KernelSpecManager, config=True, help=""" The kernel spec manager class to use. Should be a subclass of `IPython.kernel.kernelspec.KernelSpecManager`. The Api of KernelSpecManager is provisional and might change without warning between this version of IPython and the next stable one. """ ) login_handler_class = Type( default_value=LoginHandler, klass=web.RequestHandler, config=True, help='The login handler class to use.', ) logout_handler_class = Type( default_value=LogoutHandler, klass=web.RequestHandler, config=True, help='The logout handler class to use.', ) trust_xheaders = Bool(False, config=True, help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers" "sent by the upstream reverse proxy. Necessary if the proxy handles SSL") ) info_file = Unicode() def _info_file_default(self): info_file = "nbserver-%s.json"%os.getpid() return os.path.join(self.profile_dir.security_dir, info_file) pylab = Unicode('disabled', config=True, help=""" DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. """ ) def _pylab_changed(self, name, old, new): """when --pylab is specified, display a warning and exit""" if new != 'warn': backend = ' %s' % new else: backend = '' self.log.error("Support for specifying --pylab on the command line has been removed.") self.log.error( "Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend) ) self.exit(1) notebook_dir = Unicode(config=True, help="The directory to use for notebooks and kernels." ) def _notebook_dir_default(self): if self.file_to_run: return os.path.dirname(os.path.abspath(self.file_to_run)) else: return py3compat.getcwd() def _notebook_dir_changed(self, name, old, new): """Do a bit of validation of the notebook dir.""" if not os.path.isabs(new): # If we receive a non-absolute path, make it absolute. self.notebook_dir = os.path.abspath(new) return if not os.path.isdir(new): raise TraitError("No such notebook dir: %r" % new) # setting App.notebook_dir implies setting notebook and kernel dirs as well self.config.FileContentsManager.root_dir = new self.config.MappingKernelManager.root_dir = new server_extensions = List(Unicode(), config=True, help=("Python modules to load as notebook server extensions. " "This is an experimental API, and may change in future releases.") ) reraise_server_extension_failures = Bool( False, config=True, help="Reraise exceptions encountered loading server extensions?", ) def parse_command_line(self, argv=None): super(NotebookApp, self).parse_command_line(argv) if self.extra_args: arg0 = self.extra_args[0] f = os.path.abspath(arg0) self.argv.remove(arg0) if not os.path.exists(f): self.log.critical("No such file or directory: %s", f) self.exit(1) # Use config here, to ensure that it takes higher priority than # anything that comes from the profile. c = Config() if os.path.isdir(f): c.NotebookApp.notebook_dir = f elif os.path.isfile(f): c.NotebookApp.file_to_run = f self.update_config(c) def init_kernel_argv(self): """add the profile-dir to arguments to be passed to IPython kernels""" # FIXME: remove special treatment of IPython kernels # Kernel should get *absolute* path to profile directory self.ipython_kernel_argv = ["--profile-dir", self.profile_dir.location] def init_configurables(self): self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ipython_dir=self.ipython_dir, ) self.kernel_manager = self.kernel_manager_class( parent=self, log=self.log, ipython_kernel_argv=self.ipython_kernel_argv, connection_dir=self.profile_dir.security_dir, ) self.contents_manager = self.contents_manager_class( parent=self, log=self.log, ) self.session_manager = self.session_manager_class( parent=self, log=self.log, kernel_manager=self.kernel_manager, contents_manager=self.contents_manager, ) self.cluster_manager = self.cluster_manager_class( parent=self, log=self.log, ) self.config_manager = self.config_manager_class( parent=self, log=self.log, profile_dir=self.profile_dir.location, ) def init_logging(self): # This prevents double log messages because tornado use a root logger that # self.log is a child of. The logging module dipatches log messages to a log # and all of its ancenstors until propagate is set to False. self.log.propagate = False for log in app_log, access_log, gen_log: # consistent log output name (NotebookApp instead of tornado.access, etc.) log.name = self.log.name # hook up tornado 3's loggers to our app handlers logger = logging.getLogger('tornado') logger.propagate = True logger.parent = self.log logger.setLevel(self.log.level) def init_webapp(self): """initialize tornado webapp and httpserver""" self.tornado_settings['allow_origin'] = self.allow_origin if self.allow_origin_pat: self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat) self.tornado_settings['allow_credentials'] = self.allow_credentials # ensure default_url starts with base_url if not self.default_url.startswith(self.base_url): self.default_url = url_path_join(self.base_url, self.default_url) self.web_app = NotebookWebApplication( self, self.kernel_manager, self.contents_manager, self.cluster_manager, self.session_manager, self.kernel_spec_manager, self.config_manager, self.log, self.base_url, self.default_url, self.tornado_settings, self.jinja_environment_options ) ssl_options = self.ssl_options if self.certfile: ssl_options['certfile'] = self.certfile if self.keyfile: ssl_options['keyfile'] = self.keyfile if not ssl_options: # None indicates no SSL config ssl_options = None else: # Disable SSLv3, since its use is discouraged. ssl_options['ssl_version']=ssl.PROTOCOL_TLSv1 self.login_handler_class.validate_security(self, ssl_options=ssl_options) self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options, xheaders=self.trust_xheaders) success = None for port in random_ports(self.port, self.port_retries+1): try: self.http_server.listen(port, self.ip) except socket.error as e: if e.errno == errno.EADDRINUSE: self.log.info('The port %i is already in use, trying another random port.' % port) continue elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)): self.log.warn("Permission to listen on port %i denied" % port) continue else: raise else: self.port = port success = True break if not success: self.log.critical('ERROR: the notebook server could not be started because ' 'no available port could be found.') self.exit(1) @property def display_url(self): ip = self.ip if self.ip else '[all ip addresses on your system]' return self._url(ip) @property def connection_url(self): ip = self.ip if self.ip else 'localhost' return self._url(ip) def _url(self, ip): proto = 'https' if self.certfile else 'http' return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url) def init_terminals(self): try: from .terminal import initialize initialize(self.web_app) self.web_app.settings['terminals_available'] = True except ImportError as e: log = self.log.debug if sys.platform == 'win32' else self.log.warn log("Terminals not available (error was %s)", e) def init_signal(self): if not sys.platform.startswith('win'): signal.signal(signal.SIGINT, self._handle_sigint) signal.signal(signal.SIGTERM, self._signal_stop) if hasattr(signal, 'SIGUSR1'): # Windows doesn't support SIGUSR1 signal.signal(signal.SIGUSR1, self._signal_info) if hasattr(signal, 'SIGINFO'): # only on BSD-based systems signal.signal(signal.SIGINFO, self._signal_info) def _handle_sigint(self, sig, frame): """SIGINT handler spawns confirmation dialog""" # register more forceful signal handler for ^C^C case signal.signal(signal.SIGINT, self._signal_stop) # request confirmation dialog in bg thread, to avoid # blocking the App thread = threading.Thread(target=self._confirm_exit) thread.daemon = True thread.start() def _restore_sigint_handler(self): """callback for restoring original SIGINT handler""" signal.signal(signal.SIGINT, self._handle_sigint) def _confirm_exit(self): """confirm shutdown on ^C A second ^C, or answering 'y' within 5s will cause shutdown, otherwise original SIGINT handler will be restored. This doesn't work on Windows. """ info = self.log.info info('interrupted') print(self.notebook_info()) sys.stdout.write("Shutdown this notebook server (y/[n])? ") sys.stdout.flush() r,w,x = select.select([sys.stdin], [], [], 5) if r: line = sys.stdin.readline() if line.lower().startswith('y') and 'n' not in line.lower(): self.log.critical("Shutdown confirmed") ioloop.IOLoop.current().stop() return else: print("No answer for 5s:", end=' ') print("resuming operation...") # no answer, or answer is no: # set it back to original SIGINT handler # use IOLoop.add_callback because signal.signal must be called # from main thread ioloop.IOLoop.current().add_callback(self._restore_sigint_handler) def _signal_stop(self, sig, frame): self.log.critical("received signal %s, stopping", sig) ioloop.IOLoop.current().stop() def _signal_info(self, sig, frame): print(self.notebook_info()) def init_components(self): """Check the components submodule, and warn if it's unclean""" status = submodule.check_submodule_status() if status == 'missing': self.log.warn("components submodule missing, running `git submodule update`") submodule.update_submodules(submodule.ipython_parent()) elif status == 'unclean': self.log.warn("components submodule unclean, you may see 404s on static/components") self.log.warn("run `setup.py submodule` or `git submodule update` to update") def init_server_extensions(self): """Load any extensions specified by config. Import the module, then call the load_jupyter_server_extension function, if one exists. The extension API is experimental, and may change in future releases. """ for modulename in self.server_extensions: try: mod = importlib.import_module(modulename) func = getattr(mod, 'load_jupyter_server_extension', None) if func is not None: func(self) except Exception: if self.reraise_server_extension_failures: raise self.log.warn("Error loading server extension %s", modulename, exc_info=True) @catch_config_error def initialize(self, argv=None): super(NotebookApp, self).initialize(argv) self.init_logging() self.init_kernel_argv() self.init_configurables() self.init_components() self.init_webapp() self.init_terminals() self.init_signal() self.init_server_extensions() def cleanup_kernels(self): """Shutdown all kernels. The kernels will shutdown themselves when this process no longer exists, but explicit shutdown allows the KernelManagers to cleanup the connection files. """ self.log.info('Shutting down kernels') self.kernel_manager.shutdown_all() def notebook_info(self): "Return the current working directory and the server url information" info = self.contents_manager.info_string() + "\n" info += "%d active kernels \n" % len(self.kernel_manager._kernels) return info + "The IPython Notebook is running at: %s" % self.display_url def server_info(self): """Return a JSONable dict of information about this server.""" return {'url': self.connection_url, 'hostname': self.ip if self.ip else 'localhost', 'port': self.port, 'secure': bool(self.certfile), 'base_url': self.base_url, 'notebook_dir': os.path.abspath(self.notebook_dir), 'pid': os.getpid() } def write_server_info_file(self): """Write the result of server_info() to the JSON file info_file.""" with open(self.info_file, 'w') as f: json.dump(self.server_info(), f, indent=2) def remove_server_info_file(self): """Remove the nbserver-<pid>.json file created for this server. Ignores the error raised when the file has already been removed. """ try: os.unlink(self.info_file) except OSError as e: if e.errno != errno.ENOENT: raise def start(self): """ Start the IPython Notebook server app, after initialization This method takes no arguments so all configuration and initialization must be done prior to calling this method.""" if self.subapp is not None: return self.subapp.start() info = self.log.info for line in self.notebook_info().split("\n"): info(line) info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).") self.write_server_info_file() if self.open_browser or self.file_to_run: try: browser = webbrowser.get(self.browser or None) except webbrowser.Error as e: self.log.warn('No web browser found: %s.' % e) browser = None if self.file_to_run: if not os.path.exists(self.file_to_run): self.log.critical("%s does not exist" % self.file_to_run) self.exit(1) relpath = os.path.relpath(self.file_to_run, self.notebook_dir) uri = url_path_join('notebooks', *relpath.split(os.sep)) else: uri = 'tree' if browser: b = lambda : browser.open(url_path_join(self.connection_url, uri), new=2) threading.Thread(target=b).start() self.io_loop = ioloop.IOLoop.current() if sys.platform.startswith('win'): # add no-op to wake every 5s # to handle signals that may be ignored by the inner loop pc = ioloop.PeriodicCallback(lambda : None, 5000) pc.start() try: self.io_loop.start() except KeyboardInterrupt: info("Interrupted...") finally: self.cleanup_kernels() self.remove_server_info_file() def stop(self): def _stop(): self.http_server.stop() self.io_loop.stop() self.io_loop.add_callback(_stop) def list_running_servers(profile='default'): """Iterate over the server info files of running notebook servers. Given a profile name, find nbserver-* files in the security directory of that profile, and yield dicts of their information, each one pertaining to a currently running notebook server instance. """ pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), name=profile) for file in os.listdir(pd.security_dir): if file.startswith('nbserver-'): with io.open(os.path.join(pd.security_dir, file), encoding='utf-8') as f: info = json.load(f) # Simple check whether that process is really still running # Also remove leftover files from IPython 2.x without a pid field if ('pid' in info) and check_pid(info['pid']): yield info else: # If the process has died, try to delete its info file try: os.unlink(file) except OSError: pass # TODO: This should warn or log or something #----------------------------------------------------------------------------- # Main entry point #----------------------------------------------------------------------------- launch_new_instance = NotebookApp.launch_instance
./CrossVul/dataset_final_sorted/CWE-79/py/good_1727_0
crossvul-python_data_good_447_0
"""Tornado handlers for nbconvert.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import io import os import zipfile from tornado import web, escape from tornado.log import app_log from ..base.handlers import ( IPythonHandler, FilesRedirectHandler, path_regex, ) from nbformat import from_dict from ipython_genutils.py3compat import cast_bytes from ipython_genutils import text def find_resource_files(output_files_dir): files = [] for dirpath, dirnames, filenames in os.walk(output_files_dir): files.extend([os.path.join(dirpath, f) for f in filenames]) return files def respond_zip(handler, name, output, resources): """Zip up the output and resource files and respond with the zip file. Returns True if it has served a zip file, False if there are no resource files, in which case we serve the plain output file. """ # Check if we have resource files we need to zip output_files = resources.get('outputs', None) if not output_files: return False # Headers zip_filename = os.path.splitext(name)[0] + '.zip' handler.set_attachment_header(zip_filename) handler.set_header('Content-Type', 'application/zip') handler.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') # Prepare the zip file buffer = io.BytesIO() zipf = zipfile.ZipFile(buffer, mode='w', compression=zipfile.ZIP_DEFLATED) output_filename = os.path.splitext(name)[0] + resources['output_extension'] zipf.writestr(output_filename, cast_bytes(output, 'utf-8')) for filename, data in output_files.items(): zipf.writestr(os.path.basename(filename), data) zipf.close() handler.finish(buffer.getvalue()) return True def get_exporter(format, **kwargs): """get an exporter, raising appropriate errors""" # if this fails, will raise 500 try: from nbconvert.exporters.base import get_exporter except ImportError as e: raise web.HTTPError(500, "Could not import nbconvert: %s" % e) try: Exporter = get_exporter(format) except KeyError: # should this be 400? raise web.HTTPError(404, u"No exporter for format: %s" % format) try: return Exporter(**kwargs) except Exception as e: app_log.exception("Could not construct Exporter: %s", Exporter) raise web.HTTPError(500, "Could not construct Exporter: %s" % e) class NbconvertFileHandler(IPythonHandler): SUPPORTED_METHODS = ('GET',) @property def content_security_policy(self): # In case we're serving HTML/SVG, confine any Javascript to a unique # origin so it can't interact with the notebook server. return super(NbconvertFileHandler, self).content_security_policy + \ "; sandbox allow-scripts" @web.authenticated def get(self, format, path): exporter = get_exporter(format, config=self.config, log=self.log) path = path.strip('/') # If the notebook relates to a real file (default contents manager), # give its path to nbconvert. if hasattr(self.contents_manager, '_get_os_path'): os_path = self.contents_manager._get_os_path(path) ext_resources_dir, basename = os.path.split(os_path) else: ext_resources_dir = None model = self.contents_manager.get(path=path) name = model['name'] if model['type'] != 'notebook': # not a notebook, redirect to files return FilesRedirectHandler.redirect_to_files(self, path) nb = model['content'] self.set_header('Last-Modified', model['last_modified']) # create resources dictionary mod_date = model['last_modified'].strftime(text.date_format) nb_title = os.path.splitext(name)[0] resource_dict = { "metadata": { "name": nb_title, "modified_date": mod_date }, "config_dir": self.application.settings['config_dir'] } if ext_resources_dir: resource_dict['metadata']['path'] = ext_resources_dir try: output, resources = exporter.from_notebook_node( nb, resources=resource_dict ) except Exception as e: self.log.exception("nbconvert failed: %s", e) raise web.HTTPError(500, "nbconvert failed: %s" % e) if respond_zip(self, name, output, resources): return # Force download if requested if self.get_argument('download', 'false').lower() == 'true': filename = os.path.splitext(name)[0] + resources['output_extension'] self.set_attachment_header(filename) # MIME type if exporter.output_mimetype: self.set_header('Content-Type', '%s; charset=utf-8' % exporter.output_mimetype) self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') self.finish(output) class NbconvertPostHandler(IPythonHandler): SUPPORTED_METHODS = ('POST',) @property def content_security_policy(self): # In case we're serving HTML/SVG, confine any Javascript to a unique # origin so it can't interact with the notebook server. return super(NbconvertPostHandler, self).content_security_policy + \ "; sandbox allow-scripts" @web.authenticated def post(self, format): exporter = get_exporter(format, config=self.config) model = self.get_json_body() name = model.get('name', 'notebook.ipynb') nbnode = from_dict(model['content']) try: output, resources = exporter.from_notebook_node(nbnode, resources={ "metadata": {"name": name[:name.rfind('.')],}, "config_dir": self.application.settings['config_dir'], }) except Exception as e: raise web.HTTPError(500, "nbconvert failed: %s" % e) if respond_zip(self, name, output, resources): return # MIME type if exporter.output_mimetype: self.set_header('Content-Type', '%s; charset=utf-8' % exporter.output_mimetype) self.finish(output) #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _format_regex = r"(?P<format>\w+)" default_handlers = [ (r"/nbconvert/%s" % _format_regex, NbconvertPostHandler), (r"/nbconvert/%s%s" % (_format_regex, path_regex), NbconvertFileHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_447_0
crossvul-python_data_good_5191_1
from __future__ import unicode_literals import os import re import sys import types from django.conf import settings from django.core.urlresolvers import Resolver404, resolve from django.http import ( HttpRequest, HttpResponse, HttpResponseNotFound, build_request_repr, ) from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import force_escape, pprint from django.utils import lru_cache, six, timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, smart_text from django.utils.html import escape from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. DEBUG_ENGINE = Engine(debug=True) HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE') CLEANSED_SUBSTITUTE = '********************' def linebreak_iter(template_source): yield 0 p = template_source.find('\n') while p >= 0: yield p + 1 p = template_source.find('\n', p + 1) yield len(template_source) + 1 class CallableSettingWrapper(object): """ Object to wrap callable appearing in settings * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def cleanse_setting(key, value): """Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if HIDDEN_SETTINGS.search(key): cleansed = CLEANSED_SUBSTITUTE else: if isinstance(value, dict): cleansed = {k: cleanse_setting(k, v) for k, v in value.items()} else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): # For fixing #21345 and #23070 cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(): "Returns a dictionary of the settings module, with sensitive settings blurred out." settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = cleanse_setting(k, getattr(settings, k)) return settings_dict def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = ExceptionReporter(request, exc_type, exc_value, tb) if request.is_ajax(): text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain') else: html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') @lru_cache.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) class ExceptionReporterFilter(object): """ Base for all exception reporter filter classes. All overridable hooks contain lenient default behaviors. """ def get_request_repr(self, request): if request is None: return repr(None) else: return build_request_repr(request, POST_override=self.get_post_parameters(request)) def get_post_parameters(self, request): if request is None: return {} else: return request.POST def get_traceback_frame_variables(self, request, tb_frame): return list(six.iteritems(tb_frame.f_locals)) class SafeExceptionReporterFilter(ExceptionReporterFilter): """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replaces the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = CLEANSED_SUBSTITUTE return multivaluedict def get_post_parameters(self, request): """ Replaces the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k, v in cleansed.items(): cleansed[k] = CLEANSED_SUBSTITUTE return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = CLEANSED_SUBSTITUTE return cleansed else: return request.POST def cleanse_special_types(self, request, value): if isinstance(value, HttpRequest): # Cleanse the request's POST parameters. value = self.get_request_repr(value) elif isinstance(value, MultiValueDict): # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replaces the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name, value in tb_frame.f_locals.items(): cleansed[name] = CLEANSED_SUBSTITUTE else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = CLEANSED_SUBSTITUTE else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = CLEANSED_SUBSTITUTE cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE return cleansed.items() class ExceptionReporter(object): """ A class to organize and coordinate reporting on exceptions. """ def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = None self.template_does_not_exist = False self.loader_debug_info = None # Handle deprecated string exceptions if isinstance(self.exc_type, six.string_types): self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type) self.exc_type = type(self.exc_value) def format_path_status(self, path): if not os.path.exists(path): return "File does not exist" if not os.path.isfile(path): return "Not a file" if not os.access(path, os.R_OK): return "File is not readable" return "File exists" def get_traceback_data(self): """Return a dictionary containing traceback information.""" try: default_template_engine = Engine.get_default() except Exception: # Since the debug view must never crash, catch all exceptions. # If Django can't find a default template engine, get_default() # raises ImproperlyConfigured. If some template engines fail to # load, any exception may be raised. default_template_engine = None # TODO: add support for multiple template engines (#24120). # TemplateDoesNotExist should carry all the information. # Replaying the search process isn't a good design. if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): if default_template_engine is None: template_loaders = [] else: self.template_does_not_exist = True self.loader_debug_info = [] # If Django fails in get_template_loaders, provide an empty list # for the following loop to not fail. try: template_loaders = default_template_engine.template_loaders except Exception: template_loaders = [] for loader in template_loaders: try: source_list_func = loader.get_template_sources # NOTE: This assumes exc_value is the name of the template that # the loader attempted to load. template_list = [{ 'name': t, 'status': self.format_path_status(t), } for t in source_list_func(str(self.exc_value))] except AttributeError: template_list = [] loader_name = loader.__module__ + '.' + loader.__class__.__name__ self.loader_debug_info.append({ 'loader': loader_name, 'templates': template_list, }) # TODO: add support for multiple template engines (#24119). if (default_template_engine is not None and default_template_engine.debug and hasattr(self.exc_value, 'django_template_source')): self.get_template_exception_info() frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # The force_escape filter assume unicode, make sure that works if isinstance(v, six.binary_type): v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > 4096: v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, force_escape(v))) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_text( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'filtered_POST': self.filter.get_post_parameters(self.request), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'loader_debug_info': self.loader_debug_info, } # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = smart_text(self.exc_value, errors='replace') if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): "Return HTML version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): "Return plain text version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def get_template_exception_info(self): origin, (start, end) = self.exc_value.django_template_source template_source = origin.reload() context_lines = 10 line = 0 upto = 0 source_lines = [] before = during = after = "" for num, next in enumerate(linebreak_iter(template_source)): if start >= upto and end <= next: line = num before = escape(template_source[upto:start]) during = escape(template_source[start:end]) after = escape(template_source[end:next]) source_lines.append((num, escape(template_source[upto:next]))) upto = next total = len(source_lines) top = max(1, line - context_lines) bottom = min(total, line + 1 + context_lines) # In some rare cases, exc_value.args might be empty. try: message = self.exc_value.args[0] except IndexError: message = '(Could not get exception message)' self.template_info = { 'message': message, 'source_lines': source_lines[top:bottom], 'before': before, 'during': during, 'after': after, 'top': top, 'bottom': bottom, 'total': total, 'line': line, 'name': origin.name, } def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except (OSError, IOError): pass if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], six.binary_type): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [six.text_type(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): frames = [] tb = self.tb while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is not None: frames.append({ 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) tb = tb.tb_next return frames def format_exception(self): """ Return the same data as from traceback.format_exception. """ import traceback frames = self.get_traceback_frames() tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames] list = ['Traceback (most recent call last):\n'] list += traceback.format_list(tb) list += traceback.format_exception_only(self.exc_type, self.exc_value) return list def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried # empty URLconf or (request.path == '/' and len(tried) == 1 # default URLconf and len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Resolver404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE) c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): "Create an empty URLconf 404 error response." t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE) c = Context({ "title": _("Welcome to Django"), "heading": _("It worked!"), "subheading": _("Congratulations on your first Django-powered page."), "instructions": _("Of course, you haven't actually done any work yet. " "Next, start your first app by running <code>python manage.py startapp [app_label]</code>."), "explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your " "Django settings file and you haven't configured any URLs. Get to work!"), }) return HttpResponse(t.render(c), content_type='text/html') # # Templates are embedded in the file so that we know the error handler will # always work even if the template loader is broken. # TECHNICAL_500_TEMPLATE = (""" <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } code, pre { font-size: 100%; white-space: pre-wrap; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } table.vars { margin:5px 0 2px 40px; } table.vars td, table.req td { font-family:monospace; } table td.code { width:100%; } table td.code pre { overflow:hidden; } table.source th { color:#666; } table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; } ul.traceback { list-style-type:none; color: #222; } ul.traceback li.frame { padding-bottom:1em; color:#666; } ul.traceback li.user { background-color:#e0e0e0; color:#000 } div.context { padding:10px 0; overflow:hidden; } div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; } div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; } div.context ol li pre { display:inline; } div.context ol.context-line li { color:#505050; background-color:#dfdfdf; } div.context ol.context-line li span { position:absolute; right:32px; } .user div.context ol.context-line li { background-color:#bbb; color:#000; } .user div.context ol li { color:#666; } div.commands { margin-left: 40px; } div.commands a { color:#555; text-decoration:none; } .user div.commands a { color: black; } #summary { background: #ffc; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #template, #template-not-exist { background:#f6f6f6; } #template-not-exist ul { margin: 0 0 0 20px; } #unicode-hint { background:#eee; } #traceback { background:#eee; } #requestinfo { background:#f6f6f6; padding-left:120px; } #summary table { border:none; background:transparent; } #requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; } #requestinfo h3 { margin-bottom:-1em; } .error { background: #ffc; } .specific { color:#cc3300; font-weight:bold; } h2 span.commands { font-size:.7em;} span.commands a:link {color:#5E5694;} pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; } </style> {% if not is_email %} <script type="text/javascript"> //<!-- function getElementsByClassName(oElm, strTagName, strClassName){ // Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com var arrElements = (strTagName == "*" && document.all)? document.all : oElm.getElementsByTagName(strTagName); var arrReturnElements = new Array(); strClassName = strClassName.replace(/\-/g, "\\-"); var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)"); var oElement; for(var i=0; i<arrElements.length; i++){ oElement = arrElements[i]; if(oRegExp.test(oElement.className)){ arrReturnElements.push(oElement); } } return (arrReturnElements) } function hideAll(elems) { for (var e = 0; e < elems.length; e++) { elems[e].style.display = 'none'; } } window.onload = function() { hideAll(getElementsByClassName(document, 'table', 'vars')); hideAll(getElementsByClassName(document, 'ol', 'pre-context')); hideAll(getElementsByClassName(document, 'ol', 'post-context')); hideAll(getElementsByClassName(document, 'div', 'pastebin')); } function toggle() { for (var i = 0; i < arguments.length; i++) { var e = document.getElementById(arguments[i]); if (e) { e.style.display = e.style.display == 'none' ? 'block': 'none'; } } return false; } function varToggle(link, id) { toggle('v' + id); var s = link.getElementsByTagName('span')[0]; var uarr = String.fromCharCode(0x25b6); var darr = String.fromCharCode(0x25bc); s.textContent = s.textContent == uarr ? darr : uarr; return false; } function switchPastebinFriendly(link) { s1 = "Switch to copy-and-paste view"; s2 = "Switch back to interactive view"; link.textContent = link.textContent.trim() == s1 ? s2: s1; toggle('browserTraceback', 'pastebinTraceback'); return false; } //--> </script> {% endif %} </head> <body> <div id="summary"> <h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</h1> <pre class="exception_value">""" """{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}""" """</pre> <table class="meta"> {% if request %} <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% endif %} <tr> <th>Django Version:</th> <td>{{ django_version_info }}</td> </tr> {% if exception_type %} <tr> <th>Exception Type:</th> <td>{{ exception_type }}</td> </tr> {% endif %} {% if exception_type and exception_value %} <tr> <th>Exception Value:</th> <td><pre>{{ exception_value|force_escape }}</pre></td> </tr> {% endif %} {% if lastframe %} <tr> <th>Exception Location:</th> <td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td> </tr> {% endif %} <tr> <th>Python Executable:</th> <td>{{ sys_executable|escape }}</td> </tr> <tr> <th>Python Version:</th> <td>{{ sys_version_info }}</td> </tr> <tr> <th>Python Path:</th> <td><pre>{{ sys_path|pprint }}</pre></td> </tr> <tr> <th>Server time:</th> <td>{{server_time|date:"r"}}</td> </tr> </table> </div> {% if unicode_hint %} <div id="unicode-hint"> <h2>Unicode error hint</h2> <p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p> </div> {% endif %} {% if template_does_not_exist %} <div id="template-not-exist"> <h2>Template-loader postmortem</h2> {% if loader_debug_info %} <p>Django tried loading these templates, in this order:</p> <ul> {% for loader in loader_debug_info %} <li>Using loader <code>{{ loader.loader }}</code>: <ul> {% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %} </ul> </li> {% endfor %} </ul> {% else %} <p>Django couldn't find any templates because your <code>'loaders'</code> option is empty!</p> {% endif %} </div> {% endif %} {% if template_info %} <div id="template"> <h2>Error during template rendering</h2> <p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p> <h3>{{ template_info.message }}</h3> <table class="source{% if template_info.top %} cut-top{% endif %} {% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}"> {% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} <tr class="error"><th>{{ source_line.0 }}</th> <td> {{ template_info.before }} <span class="specific">{{ template_info.during }}</span> {{ template_info.after }} </td> </tr> {% else %} <tr><th>{{ source_line.0 }}</th> <td>{{ source_line.1 }}</td></tr> {% endifequal %} {% endfor %} </table> </div> {% endif %} {% if frames %} <div id="traceback"> <h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);"> Switch to copy-and-paste view</a></span>{% endif %} </h2> {% autoescape off %} <div id="browserTraceback"> <ul class="traceback"> {% for frame in frames %} <li class="frame {{ frame.type }}"> <code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code> {% if frame.context_line %} <div class="context" id="c{{ frame.id }}"> {% if frame.pre_context and not is_email %} <ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}"> {% for line in frame.pre_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} <ol start="{{ frame.lineno }}" class="context-line"> <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre> {{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol> {% if frame.post_context and not is_email %} <ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}"> {% for line in frame.post_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} </div> {% endif %} {% if frame.vars %} <div class="commands"> {% if is_email %} <h2>Local Vars</h2> {% else %} <a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>&#x25b6;</span> Local vars</a> {% endif %} </div> <table class="vars" id="v{{ frame.id }}"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in frame.vars|dictsort:"0" %} <tr> <td>{{ var.0|force_escape }}</td> <td class="code"><pre>{{ var.1 }}</pre></td> </tr> {% endfor %} </tbody> </table> {% endif %} </li> {% endfor %} </ul> </div> {% endautoescape %} <form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post"> {% if not is_email %} <div id="pastebinTraceback" class="pastebin"> <input type="hidden" name="language" value="PythonConsole"> <input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}"> <input type="hidden" name="source" value="Django Dpaste Agent"> <input type="hidden" name="poster" value="Django"> <textarea name="content" id="traceback_area" cols="140" rows="25"> Environment: {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.build_absolute_uri|escape }} {% endif %} Django Version: {{ django_version_info }} Python Version: {{ sys_version_info }} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template Loader Error: {% if loader_debug_info %}Django tried loading these templates, in this order: {% for loader in loader_debug_info %}Using loader {{ loader.loader }}: {% for t in loader.templates %}{{ t.name }} ({{ t.status }}) {% endfor %}{% endfor %} {% else %}Django couldn't find any templates because your 'loaders' option is empty! {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}{% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }} {% else %} {{ source_line.0 }} : {{ source_line.1 }} {% endifequal %}{% endfor %}{% endif %} Traceback: {% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %} {% endfor %} Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %} Exception Value: {{ exception_value|force_escape }} </textarea> <br><br> <input type="submit" value="Share this traceback on a public Web site"> </div> </form> </div> {% endif %} {% endif %} <div id="requestinfo"> <h2>Request information</h2> {% if request %} <h3 id="get-info">GET</h3> {% if request.GET %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.GET.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No GET data</p> {% endif %} <h3 id="post-info">POST</h3> {% if filtered_POST %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in filtered_POST.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No POST data</p> {% endif %} <h3 id="files-info">FILES</h3> {% if request.FILES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.FILES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No FILES data</p> {% endif %} <h3 id="cookie-info">COOKIES</h3> {% if request.COOKIES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.COOKIES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No cookie data</p> {% endif %} <h3 id="meta-info">META</h3> <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.META.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>Request data not supplied</p> {% endif %} <h3 id="settings-info">Settings</h3> <h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4> <table class="req"> <thead> <tr> <th>Setting</th> <th>Value</th> </tr> </thead> <tbody> {% for var in settings.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> </div> {% if not is_email %} <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard page generated by the handler for this status code. </p> </div> {% endif %} </body> </html> """) TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %} {% firstof exception_value 'No exception message supplied' %} {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.build_absolute_uri }}{% endif %} Django Version: {{ django_version_info }} Python Executable: {{ sys_executable }} Python Version: {{ sys_version_info }} Python Path: {{ sys_path }} Server time: {{server_time|date:"r"}} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader Error: {% if loader_debug_info %}Django tried loading these templates, in this order: {% for loader in loader_debug_info %}Using loader {{ loader.loader }}: {% for t in loader.templates %}{{ t.name }} ({{ t.status }}) {% endfor %}{% endfor %} {% else %}Django couldn't find any templates because your 'loaders' option is empty! {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}{% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }} {% else %} {{ source_line.0 }} : {{ source_line.1 }} {% endifequal %}{% endfor %}{% endif %}{% if frames %} Traceback: {% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %} {% endfor %} {% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %} {% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %} {% if request %}Request information: GET:{% for k, v in request.GET.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %} POST:{% for k, v in filtered_POST.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %} FILES:{% for k, v in request.FILES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %} COOKIES:{% for k, v in request.COOKIES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %} META:{% for k, v in request.META.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% else %}Request data not supplied {% endif %} Settings: Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} You're seeing this error because you have DEBUG = True in your Django settings file. Change that to False, and Django will display a standard page generated by the handler for this status code. """ TECHNICAL_404_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <title>Page not found at {{ request.path_info|escape }}</title> <meta name="robots" content="NONE,NOARCHIVE"> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } table { border:none; border-collapse: collapse; width:100%; } td, th { vertical-align:top; padding:2px 3px; } th { width:12em; text-align:right; color:#666; padding-right:.5em; } #info { background:#f6f6f6; } #info ol { margin: 0.5em 4em; } #info ol li { font-family: monospace; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>Page not found <span>(404)</span></h1> <table class="meta"> <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% if raising_view_name %} <tr> <th>Raised by:</th> <td>{{ raising_view_name }}</td> </tr> {% endif %} </table> </div> <div id="info"> {% if urlpatterns %} <p> Using the URLconf defined in <code>{{ urlconf }}</code>, Django tried these URL patterns, in this order: </p> <ol> {% for pattern in urlpatterns %} <li> {% for pat in pattern %} {{ pat.regex.pattern }} {% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %} {% endfor %} </li> {% endfor %} </ol> <p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p> {% else %} <p>{{ reason }}</p> {% endif %} </div> <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard 404 page. </p> </div> </body> </html> """ DEFAULT_URLCONF_TEMPLATE = """ <!DOCTYPE html> <html lang="en"><head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } #summary { background: #e0ebff; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #instructions { background:#f6f6f6; } #summary table { border:none; background:transparent; } </style> </head> <body> <div id="summary"> <h1>{{ heading }}</h1> <h2>{{ subheading }}</h2> </div> <div id="instructions"> <p> {{ instructions|safe }} </p> </div> <div id="explanation"> <p> {{ explanation|safe }} </p> </div> </body></html> """
./CrossVul/dataset_final_sorted/CWE-79/py/good_5191_1
crossvul-python_data_bad_5525_1
############################################################################## # # Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## """Zope-specific Python Expression Handler Handler for Python expressions that uses the RestrictedPython package. $Id$ """ from AccessControl import safe_builtins from AccessControl.ZopeGuards import guarded_getattr, get_safe_globals from RestrictedPython import compile_restricted_eval from zope.tales.tales import CompilerError from zope.tales.pythonexpr import PythonExpr class PythonExpr(PythonExpr): _globals = get_safe_globals() _globals['_getattr_'] = guarded_getattr _globals['__debug__' ] = __debug__ def __init__(self, name, expr, engine): self.text = self.expr = text = expr.strip().replace('\n', ' ') # Unicode expression are not handled properly by RestrictedPython # We convert the expression to UTF-8 (ajung) if isinstance(text, unicode): text = text.encode('utf-8') code, err, warn, use = compile_restricted_eval(text, self.__class__.__name__) if err: raise engine.getCompilerError()('Python expression error:\n%s' % '\n'.join(err)) self._varnames = use.keys() self._code = code def __call__(self, econtext): __traceback_info__ = self.text vars = self._bind_used_names(econtext, {}) vars.update(self._globals) return eval(self._code, vars, {}) class _SecureModuleImporter: __allow_access_to_unprotected_subobjects__ = True def __getitem__(self, module): mod = safe_builtins['__import__'](module) path = module.split('.') for name in path[1:]: mod = getattr(mod, name) return mod from DocumentTemplate.DT_Util import TemplateDict, InstanceDict from AccessControl.DTML import RestrictedDTML class Rtd(RestrictedDTML, TemplateDict): this = None def call_with_ns(f, ns, arg=1): td = Rtd() # prefer 'context' to 'here'; fall back to 'None' this = ns.get('context', ns.get('here')) td.this = this request = ns.get('request', {}) td._push(request) td._push(InstanceDict(td.this, td)) td._push(ns) try: if arg==2: return f(None, td) else: return f(td) finally: td._pop(3)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5525_1
crossvul-python_data_bad_4389_0
from typing import Any, List import bleach from .rest_api import ValidationError allowed_tags_strict = [ "a", "img", # links and images "br", "p", "span", "blockquote", # text layout "strike", "del", "ins", "strong", "u", "em", "sup", "sub", "pre", # text formatting "h1", "h2", "h3", "h4", "h5", "h6", # headings "ol", "ul", "li", # lists "table", "caption", "thead", "tbody", "th", "tr", "td", # tables "div", ] allowed_tags_permissive = allowed_tags_strict + [ "video", ] def allow_all(tag: str, name: str, value: str) -> bool: return True allowed_attributes = allow_all allowed_styles = [ "color", "background-color", "height", "width", "text-align", "vertical-align", "float", "text-decoration", "margin", "padding", "line-height", "max-width", "min-width", "max-height", "min-height", "overflow", "word-break", "word-wrap", ] def validate_html_strict(html: str) -> str: """ This method takes a string and escapes all non-whitelisted html entries. Every field of a model that is loaded trusted in the DOM should be validated. During copy and paste from Word maybe some tabs are spread over the html. Remove them. """ return base_validate_html(html, allowed_tags_strict) def validate_html_permissive(html: str) -> str: """ See validate_html_strict, but allows some more tags, like iframes and videos. Do not use on validation for normal users, only for admins! """ return base_validate_html(html, allowed_tags_permissive) def base_validate_html(html: str, allowed_tags: List[str]) -> str: """ For internal use only. """ html = html.replace("\t", "") return bleach.clean( html, tags=allowed_tags, attributes=allowed_attributes, styles=allowed_styles ) def validate_json(json: Any, max_depth: int) -> Any: """ Traverses through the JSON structure (dicts and lists) and runs validate_html_strict on every found string. Give max-depth to protect against stack-overflows. This should be the maximum nested depth of the object expected. """ if max_depth == 0: raise ValidationError({"detail": "The JSON is too nested."}) if isinstance(json, dict): return {key: validate_json(value, max_depth - 1) for key, value in json.items()} if isinstance(json, list): return [validate_json(item, max_depth - 1) for item in json] if isinstance(json, str): return validate_html_strict(json) return json
./CrossVul/dataset_final_sorted/CWE-79/py/bad_4389_0
crossvul-python_data_good_1644_7
import json from tornado import web from ...base.handlers import APIHandler, json_errors class NbconvertRootHandler(APIHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated @json_errors def get(self): try: from IPython.nbconvert.exporters.export import exporter_map except ImportError as e: raise web.HTTPError(500, "Could not import nbconvert: %s" % e) res = {} for format, exporter in exporter_map.items(): res[format] = info = {} info['output_mimetype'] = exporter.output_mimetype self.finish(json.dumps(res)) default_handlers = [ (r"/api/nbconvert", NbconvertRootHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_7
crossvul-python_data_bad_4953_0
from __future__ import unicode_literals import base64 import calendar import datetime import re import sys import unicodedata from binascii import Error as BinasciiError from email.utils import formatdate from django.utils import six from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, force_str, force_text from django.utils.functional import keep_lazy_text from django.utils.six.moves.urllib.parse import ( quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode, urlparse, ) ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) RFC3986_GENDELIMS = str(":/?#[]@") RFC3986_SUBDELIMS = str("!$&'()*+,;=") PROTOCOL_TO_PORT = { 'http': 80, 'https': 443, } @keep_lazy_text def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(quote(force_str(url), force_str(safe))) @keep_lazy_text def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(quote_plus(force_str(url), force_str(safe))) @keep_lazy_text def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_text(unquote(force_str(quoted_url))) @keep_lazy_text def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_text(unquote_plus(force_str(quoted_url))) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first cast to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return original_urlencode( [(force_str(k), [force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ return formatdate(epoch_seconds, usegmt=True) def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an integer expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2]) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int to avoid # returning a long (#15067). The long type was removed in Python 3. if six.PY2 and value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ char_set = '0123456789abcdefghijklmnopqrstuvwxyz' if i < 0: raise ValueError("Negative base36 conversion input.") if six.PY2: if not isinstance(i, six.integer_types): raise TypeError("Non-integer base36 conversion input.") if i > sys.maxint: raise ValueError("Base36 conversion input too large.") if i < 36: return char_set[i] b36 = '' while i != 0: i, n = divmod(i, 36) b36 = char_set[n] + b36 return b36 def urlsafe_base64_encode(s): """ Encodes a bytestring in base64 for use in URLs, stripping any trailing equal signs. """ return base64.urlsafe_b64encode(s).rstrip(b'\n=') def urlsafe_base64_decode(s): """ Decodes a base64 encoded string, adding back any trailing equal signs that might have been stripped. """ s = force_bytes(s) try: return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'=')) except (LookupError, BinasciiError) as e: raise ValueError(e) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.encode('ascii').decode('unicode_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necessary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') def unquote_etag(etag): """ Unquote an ETag string; i.e. revert quote_etag(). """ return etag.strip('"').replace('\\"', '"').replace('\\\\', '\\') if etag else etag def is_same_domain(host, pattern): """ Return ``True`` if the host is either an exact match or a match to the wildcard pattern. Any pattern beginning with a period matches a domain and all of its subdomains. (e.g. ``.example.com`` matches ``example.com`` and ``foo.example.com``). Anything else is an exact string match. """ if not pattern: return False pattern = pattern.lower() return ( pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or pattern == host ) def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url. """ if url is not None: url = url.strip() if not url: return False # Chrome treats \ completely as / url = url.replace('\\', '/') # Chrome considers any URL with more than two slashes to be absolute, but # urlparse is not so flexible. Treat any url with three slashes as unsafe. if url.startswith('///'): return False url_info = urlparse(url) # Forbid URLs like http:///example.com - with a scheme, but without a hostname. # In that URL, example.com is not the hostname but, a path component. However, # Chrome will still consider example.com to be the hostname, so we must not # allow this syntax. if not url_info.netloc and url_info.scheme: return False # Forbid URLs that start with control characters. Some browsers (like # Chrome) ignore quite a few control characters at the start of a # URL and might consider the URL as scheme relative. if unicodedata.category(url[0])[0] == 'C': return False return ((not url_info.netloc or url_info.netloc == host) and (not url_info.scheme or url_info.scheme in ['http', 'https']))
./CrossVul/dataset_final_sorted/CWE-79/py/bad_4953_0
crossvul-python_data_bad_5788_1
from __future__ import unicode_literals import calendar import datetime import re import sys try: from urllib import parse as urllib_parse except ImportError: # Python 2 import urllib as urllib_parse import urlparse urllib_parse.urlparse = urlparse.urlparse from email.utils import formatdate from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str, force_text from django.utils.functional import allow_lazy from django.utils import six ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(urllib_parse.quote(force_str(url), force_str(safe))) urlquote = allow_lazy(urlquote, six.text_type) def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe))) urlquote_plus = allow_lazy(urlquote_plus, six.text_type) def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_text(urllib_parse.unquote(force_str(quoted_url))) urlunquote = allow_lazy(urlunquote, six.text_type) def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_text(urllib_parse.unquote_plus(force_str(quoted_url))) urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first case to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return urllib_parse.urlencode( [(force_str(k), [force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s GMT' % rfcdate[:25] def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an integer expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: raise ValueError("%r is not a valid date" % date) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int to avoid # returning a long (#15067). The long type was removed in Python 3. if not six.PY3 and value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ digits = "0123456789abcdefghijklmnopqrstuvwxyz" factor = 0 if i < 0: raise ValueError("Negative base36 conversion input.") if not six.PY3: if not isinstance(i, six.integer_types): raise TypeError("Non-integer base36 conversion input.") if i > sys.maxint: raise ValueError("Base36 conversion input too large.") # Find starting factor while True: factor += 1 if i < 36 ** factor: factor -= 1 break base36 = [] # Construct base36 representation while factor >= 0: j = 36 ** factor base36.append(digits[i // j]) i = i % j factor -= 1 return ''.join(base36) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.encode('ascii').decode('unicode_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necessary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2) return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port) def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host). Always returns ``False`` on an empty url. """ if not url: return False netloc = urllib_parse.urlparse(url)[1] return not netloc or netloc == host
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5788_1
crossvul-python_data_bad_5191_1
from __future__ import unicode_literals import os import re import sys import types from django.conf import settings from django.core.urlresolvers import Resolver404, resolve from django.http import ( HttpRequest, HttpResponse, HttpResponseNotFound, build_request_repr, ) from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import force_escape, pprint from django.utils import lru_cache, six, timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, smart_text from django.utils.html import escape from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. DEBUG_ENGINE = Engine(debug=True) HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE') CLEANSED_SUBSTITUTE = '********************' def linebreak_iter(template_source): yield 0 p = template_source.find('\n') while p >= 0: yield p + 1 p = template_source.find('\n', p + 1) yield len(template_source) + 1 class CallableSettingWrapper(object): """ Object to wrap callable appearing in settings * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def cleanse_setting(key, value): """Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if HIDDEN_SETTINGS.search(key): cleansed = CLEANSED_SUBSTITUTE else: if isinstance(value, dict): cleansed = {k: cleanse_setting(k, v) for k, v in value.items()} else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): # For fixing #21345 and #23070 cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(): "Returns a dictionary of the settings module, with sensitive settings blurred out." settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = cleanse_setting(k, getattr(settings, k)) return settings_dict def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = ExceptionReporter(request, exc_type, exc_value, tb) if request.is_ajax(): text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain') else: html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') @lru_cache.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) class ExceptionReporterFilter(object): """ Base for all exception reporter filter classes. All overridable hooks contain lenient default behaviors. """ def get_request_repr(self, request): if request is None: return repr(None) else: return build_request_repr(request, POST_override=self.get_post_parameters(request)) def get_post_parameters(self, request): if request is None: return {} else: return request.POST def get_traceback_frame_variables(self, request, tb_frame): return list(six.iteritems(tb_frame.f_locals)) class SafeExceptionReporterFilter(ExceptionReporterFilter): """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replaces the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = CLEANSED_SUBSTITUTE return multivaluedict def get_post_parameters(self, request): """ Replaces the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k, v in cleansed.items(): cleansed[k] = CLEANSED_SUBSTITUTE return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = CLEANSED_SUBSTITUTE return cleansed else: return request.POST def cleanse_special_types(self, request, value): if isinstance(value, HttpRequest): # Cleanse the request's POST parameters. value = self.get_request_repr(value) elif isinstance(value, MultiValueDict): # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replaces the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name, value in tb_frame.f_locals.items(): cleansed[name] = CLEANSED_SUBSTITUTE else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = CLEANSED_SUBSTITUTE else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = CLEANSED_SUBSTITUTE cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE return cleansed.items() class ExceptionReporter(object): """ A class to organize and coordinate reporting on exceptions. """ def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = None self.template_does_not_exist = False self.loader_debug_info = None # Handle deprecated string exceptions if isinstance(self.exc_type, six.string_types): self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type) self.exc_type = type(self.exc_value) def format_path_status(self, path): if not os.path.exists(path): return "File does not exist" if not os.path.isfile(path): return "Not a file" if not os.access(path, os.R_OK): return "File is not readable" return "File exists" def get_traceback_data(self): """Return a dictionary containing traceback information.""" try: default_template_engine = Engine.get_default() except Exception: # Since the debug view must never crash, catch all exceptions. # If Django can't find a default template engine, get_default() # raises ImproperlyConfigured. If some template engines fail to # load, any exception may be raised. default_template_engine = None # TODO: add support for multiple template engines (#24120). # TemplateDoesNotExist should carry all the information. # Replaying the search process isn't a good design. if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): if default_template_engine is None: template_loaders = [] else: self.template_does_not_exist = True self.loader_debug_info = [] # If Django fails in get_template_loaders, provide an empty list # for the following loop to not fail. try: template_loaders = default_template_engine.template_loaders except Exception: template_loaders = [] for loader in template_loaders: try: source_list_func = loader.get_template_sources # NOTE: This assumes exc_value is the name of the template that # the loader attempted to load. template_list = [{ 'name': t, 'status': self.format_path_status(t), } for t in source_list_func(str(self.exc_value))] except AttributeError: template_list = [] loader_name = loader.__module__ + '.' + loader.__class__.__name__ self.loader_debug_info.append({ 'loader': loader_name, 'templates': template_list, }) # TODO: add support for multiple template engines (#24119). if (default_template_engine is not None and default_template_engine.debug and hasattr(self.exc_value, 'django_template_source')): self.get_template_exception_info() frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # The force_escape filter assume unicode, make sure that works if isinstance(v, six.binary_type): v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > 4096: v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, force_escape(v))) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_text( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'filtered_POST': self.filter.get_post_parameters(self.request), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'loader_debug_info': self.loader_debug_info, } # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = smart_text(self.exc_value, errors='replace') if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): "Return HTML version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): "Return plain text version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def get_template_exception_info(self): origin, (start, end) = self.exc_value.django_template_source template_source = origin.reload() context_lines = 10 line = 0 upto = 0 source_lines = [] before = during = after = "" for num, next in enumerate(linebreak_iter(template_source)): if start >= upto and end <= next: line = num before = escape(template_source[upto:start]) during = escape(template_source[start:end]) after = escape(template_source[end:next]) source_lines.append((num, escape(template_source[upto:next]))) upto = next total = len(source_lines) top = max(1, line - context_lines) bottom = min(total, line + 1 + context_lines) # In some rare cases, exc_value.args might be empty. try: message = self.exc_value.args[0] except IndexError: message = '(Could not get exception message)' self.template_info = { 'message': message, 'source_lines': source_lines[top:bottom], 'before': before, 'during': during, 'after': after, 'top': top, 'bottom': bottom, 'total': total, 'line': line, 'name': origin.name, } def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except (OSError, IOError): pass if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], six.binary_type): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [six.text_type(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): frames = [] tb = self.tb while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is not None: frames.append({ 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) tb = tb.tb_next return frames def format_exception(self): """ Return the same data as from traceback.format_exception. """ import traceback frames = self.get_traceback_frames() tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames] list = ['Traceback (most recent call last):\n'] list += traceback.format_list(tb) list += traceback.format_exception_only(self.exc_type, self.exc_value) return list def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried # empty URLconf or (request.path == '/' and len(tried) == 1 # default URLconf and len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Resolver404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE) c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): "Create an empty URLconf 404 error response." t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE) c = Context({ "title": _("Welcome to Django"), "heading": _("It worked!"), "subheading": _("Congratulations on your first Django-powered page."), "instructions": _("Of course, you haven't actually done any work yet. " "Next, start your first app by running <code>python manage.py startapp [app_label]</code>."), "explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your " "Django settings file and you haven't configured any URLs. Get to work!"), }) return HttpResponse(t.render(c), content_type='text/html') # # Templates are embedded in the file so that we know the error handler will # always work even if the template loader is broken. # TECHNICAL_500_TEMPLATE = (""" <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } code, pre { font-size: 100%; white-space: pre-wrap; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } table.vars { margin:5px 0 2px 40px; } table.vars td, table.req td { font-family:monospace; } table td.code { width:100%; } table td.code pre { overflow:hidden; } table.source th { color:#666; } table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; } ul.traceback { list-style-type:none; color: #222; } ul.traceback li.frame { padding-bottom:1em; color:#666; } ul.traceback li.user { background-color:#e0e0e0; color:#000 } div.context { padding:10px 0; overflow:hidden; } div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; } div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; } div.context ol li pre { display:inline; } div.context ol.context-line li { color:#505050; background-color:#dfdfdf; } div.context ol.context-line li span { position:absolute; right:32px; } .user div.context ol.context-line li { background-color:#bbb; color:#000; } .user div.context ol li { color:#666; } div.commands { margin-left: 40px; } div.commands a { color:#555; text-decoration:none; } .user div.commands a { color: black; } #summary { background: #ffc; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #template, #template-not-exist { background:#f6f6f6; } #template-not-exist ul { margin: 0 0 0 20px; } #unicode-hint { background:#eee; } #traceback { background:#eee; } #requestinfo { background:#f6f6f6; padding-left:120px; } #summary table { border:none; background:transparent; } #requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; } #requestinfo h3 { margin-bottom:-1em; } .error { background: #ffc; } .specific { color:#cc3300; font-weight:bold; } h2 span.commands { font-size:.7em;} span.commands a:link {color:#5E5694;} pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; } </style> {% if not is_email %} <script type="text/javascript"> //<!-- function getElementsByClassName(oElm, strTagName, strClassName){ // Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com var arrElements = (strTagName == "*" && document.all)? document.all : oElm.getElementsByTagName(strTagName); var arrReturnElements = new Array(); strClassName = strClassName.replace(/\-/g, "\\-"); var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)"); var oElement; for(var i=0; i<arrElements.length; i++){ oElement = arrElements[i]; if(oRegExp.test(oElement.className)){ arrReturnElements.push(oElement); } } return (arrReturnElements) } function hideAll(elems) { for (var e = 0; e < elems.length; e++) { elems[e].style.display = 'none'; } } window.onload = function() { hideAll(getElementsByClassName(document, 'table', 'vars')); hideAll(getElementsByClassName(document, 'ol', 'pre-context')); hideAll(getElementsByClassName(document, 'ol', 'post-context')); hideAll(getElementsByClassName(document, 'div', 'pastebin')); } function toggle() { for (var i = 0; i < arguments.length; i++) { var e = document.getElementById(arguments[i]); if (e) { e.style.display = e.style.display == 'none' ? 'block': 'none'; } } return false; } function varToggle(link, id) { toggle('v' + id); var s = link.getElementsByTagName('span')[0]; var uarr = String.fromCharCode(0x25b6); var darr = String.fromCharCode(0x25bc); s.innerHTML = s.innerHTML == uarr ? darr : uarr; return false; } function switchPastebinFriendly(link) { s1 = "Switch to copy-and-paste view"; s2 = "Switch back to interactive view"; link.innerHTML = link.innerHTML.trim() == s1 ? s2: s1; toggle('browserTraceback', 'pastebinTraceback'); return false; } //--> </script> {% endif %} </head> <body> <div id="summary"> <h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</h1> <pre class="exception_value">""" """{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}""" """</pre> <table class="meta"> {% if request %} <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% endif %} <tr> <th>Django Version:</th> <td>{{ django_version_info }}</td> </tr> {% if exception_type %} <tr> <th>Exception Type:</th> <td>{{ exception_type }}</td> </tr> {% endif %} {% if exception_type and exception_value %} <tr> <th>Exception Value:</th> <td><pre>{{ exception_value|force_escape }}</pre></td> </tr> {% endif %} {% if lastframe %} <tr> <th>Exception Location:</th> <td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td> </tr> {% endif %} <tr> <th>Python Executable:</th> <td>{{ sys_executable|escape }}</td> </tr> <tr> <th>Python Version:</th> <td>{{ sys_version_info }}</td> </tr> <tr> <th>Python Path:</th> <td><pre>{{ sys_path|pprint }}</pre></td> </tr> <tr> <th>Server time:</th> <td>{{server_time|date:"r"}}</td> </tr> </table> </div> {% if unicode_hint %} <div id="unicode-hint"> <h2>Unicode error hint</h2> <p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p> </div> {% endif %} {% if template_does_not_exist %} <div id="template-not-exist"> <h2>Template-loader postmortem</h2> {% if loader_debug_info %} <p>Django tried loading these templates, in this order:</p> <ul> {% for loader in loader_debug_info %} <li>Using loader <code>{{ loader.loader }}</code>: <ul> {% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %} </ul> </li> {% endfor %} </ul> {% else %} <p>Django couldn't find any templates because your <code>'loaders'</code> option is empty!</p> {% endif %} </div> {% endif %} {% if template_info %} <div id="template"> <h2>Error during template rendering</h2> <p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p> <h3>{{ template_info.message }}</h3> <table class="source{% if template_info.top %} cut-top{% endif %} {% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}"> {% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} <tr class="error"><th>{{ source_line.0 }}</th> <td> {{ template_info.before }} <span class="specific">{{ template_info.during }}</span> {{ template_info.after }} </td> </tr> {% else %} <tr><th>{{ source_line.0 }}</th> <td>{{ source_line.1 }}</td></tr> {% endifequal %} {% endfor %} </table> </div> {% endif %} {% if frames %} <div id="traceback"> <h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);"> Switch to copy-and-paste view</a></span>{% endif %} </h2> {% autoescape off %} <div id="browserTraceback"> <ul class="traceback"> {% for frame in frames %} <li class="frame {{ frame.type }}"> <code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code> {% if frame.context_line %} <div class="context" id="c{{ frame.id }}"> {% if frame.pre_context and not is_email %} <ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}"> {% for line in frame.pre_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} <ol start="{{ frame.lineno }}" class="context-line"> <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre> {{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol> {% if frame.post_context and not is_email %} <ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}"> {% for line in frame.post_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} </div> {% endif %} {% if frame.vars %} <div class="commands"> {% if is_email %} <h2>Local Vars</h2> {% else %} <a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>&#x25b6;</span> Local vars</a> {% endif %} </div> <table class="vars" id="v{{ frame.id }}"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in frame.vars|dictsort:"0" %} <tr> <td>{{ var.0|force_escape }}</td> <td class="code"><pre>{{ var.1 }}</pre></td> </tr> {% endfor %} </tbody> </table> {% endif %} </li> {% endfor %} </ul> </div> {% endautoescape %} <form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post"> {% if not is_email %} <div id="pastebinTraceback" class="pastebin"> <input type="hidden" name="language" value="PythonConsole"> <input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}"> <input type="hidden" name="source" value="Django Dpaste Agent"> <input type="hidden" name="poster" value="Django"> <textarea name="content" id="traceback_area" cols="140" rows="25"> Environment: {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.build_absolute_uri|escape }} {% endif %} Django Version: {{ django_version_info }} Python Version: {{ sys_version_info }} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template Loader Error: {% if loader_debug_info %}Django tried loading these templates, in this order: {% for loader in loader_debug_info %}Using loader {{ loader.loader }}: {% for t in loader.templates %}{{ t.name }} ({{ t.status }}) {% endfor %}{% endfor %} {% else %}Django couldn't find any templates because your 'loaders' option is empty! {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}{% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }} {% else %} {{ source_line.0 }} : {{ source_line.1 }} {% endifequal %}{% endfor %}{% endif %} Traceback: {% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %} {% endfor %} Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %} Exception Value: {{ exception_value|force_escape }} </textarea> <br><br> <input type="submit" value="Share this traceback on a public Web site"> </div> </form> </div> {% endif %} {% endif %} <div id="requestinfo"> <h2>Request information</h2> {% if request %} <h3 id="get-info">GET</h3> {% if request.GET %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.GET.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No GET data</p> {% endif %} <h3 id="post-info">POST</h3> {% if filtered_POST %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in filtered_POST.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No POST data</p> {% endif %} <h3 id="files-info">FILES</h3> {% if request.FILES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.FILES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No FILES data</p> {% endif %} <h3 id="cookie-info">COOKIES</h3> {% if request.COOKIES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.COOKIES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No cookie data</p> {% endif %} <h3 id="meta-info">META</h3> <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.META.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>Request data not supplied</p> {% endif %} <h3 id="settings-info">Settings</h3> <h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4> <table class="req"> <thead> <tr> <th>Setting</th> <th>Value</th> </tr> </thead> <tbody> {% for var in settings.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> </div> {% if not is_email %} <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard page generated by the handler for this status code. </p> </div> {% endif %} </body> </html> """) TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %} {% firstof exception_value 'No exception message supplied' %} {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.build_absolute_uri }}{% endif %} Django Version: {{ django_version_info }} Python Executable: {{ sys_executable }} Python Version: {{ sys_version_info }} Python Path: {{ sys_path }} Server time: {{server_time|date:"r"}} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader Error: {% if loader_debug_info %}Django tried loading these templates, in this order: {% for loader in loader_debug_info %}Using loader {{ loader.loader }}: {% for t in loader.templates %}{{ t.name }} ({{ t.status }}) {% endfor %}{% endfor %} {% else %}Django couldn't find any templates because your 'loaders' option is empty! {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}{% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }} {% else %} {{ source_line.0 }} : {{ source_line.1 }} {% endifequal %}{% endfor %}{% endif %}{% if frames %} Traceback: {% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %} {% endfor %} {% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %} {% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %} {% if request %}Request information: GET:{% for k, v in request.GET.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %} POST:{% for k, v in filtered_POST.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %} FILES:{% for k, v in request.FILES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %} COOKIES:{% for k, v in request.COOKIES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %} META:{% for k, v in request.META.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% else %}Request data not supplied {% endif %} Settings: Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} You're seeing this error because you have DEBUG = True in your Django settings file. Change that to False, and Django will display a standard page generated by the handler for this status code. """ TECHNICAL_404_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <title>Page not found at {{ request.path_info|escape }}</title> <meta name="robots" content="NONE,NOARCHIVE"> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } table { border:none; border-collapse: collapse; width:100%; } td, th { vertical-align:top; padding:2px 3px; } th { width:12em; text-align:right; color:#666; padding-right:.5em; } #info { background:#f6f6f6; } #info ol { margin: 0.5em 4em; } #info ol li { font-family: monospace; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>Page not found <span>(404)</span></h1> <table class="meta"> <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% if raising_view_name %} <tr> <th>Raised by:</th> <td>{{ raising_view_name }}</td> </tr> {% endif %} </table> </div> <div id="info"> {% if urlpatterns %} <p> Using the URLconf defined in <code>{{ urlconf }}</code>, Django tried these URL patterns, in this order: </p> <ol> {% for pattern in urlpatterns %} <li> {% for pat in pattern %} {{ pat.regex.pattern }} {% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %} {% endfor %} </li> {% endfor %} </ol> <p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p> {% else %} <p>{{ reason }}</p> {% endif %} </div> <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard 404 page. </p> </div> </body> </html> """ DEFAULT_URLCONF_TEMPLATE = """ <!DOCTYPE html> <html lang="en"><head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } #summary { background: #e0ebff; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #instructions { background:#f6f6f6; } #summary table { border:none; background:transparent; } </style> </head> <body> <div id="summary"> <h1>{{ heading }}</h1> <h2>{{ subheading }}</h2> </div> <div id="instructions"> <p> {{ instructions|safe }} </p> </div> <div id="explanation"> <p> {{ explanation|safe }} </p> </div> </body></html> """
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5191_1
crossvul-python_data_bad_5729_0
""" Form Widget classes specific to the Django admin site. """ from __future__ import unicode_literals import copy from django import forms from django.contrib.admin.templatetags.admin_static import static from django.core.urlresolvers import reverse from django.forms.widgets import RadioFieldRenderer from django.forms.util import flatatt from django.utils.html import escape, format_html, format_html_join, smart_urlquote from django.utils.text import Truncator from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe from django.utils.encoding import force_text from django.utils import six class FilteredSelectMultiple(forms.SelectMultiple): """ A SelectMultiple with a JavaScript filter interface. Note that the resulting JavaScript assumes that the jsi18n catalog has been loaded in the page """ @property def media(self): js = ["core.js", "SelectBox.js", "SelectFilter2.js"] return forms.Media(js=[static("admin/js/%s" % path) for path in js]) def __init__(self, verbose_name, is_stacked, attrs=None, choices=()): self.verbose_name = verbose_name self.is_stacked = is_stacked super(FilteredSelectMultiple, self).__init__(attrs, choices) def render(self, name, value, attrs=None, choices=()): if attrs is None: attrs = {} attrs['class'] = 'selectfilter' if self.is_stacked: attrs['class'] += 'stacked' output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)] output.append('<script type="text/javascript">addEvent(window, "load", function(e) {') # TODO: "id_" is hard-coded here. This should instead use the correct # API to determine the ID dynamically. output.append('SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % (name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), static('admin/'))) return mark_safe(''.join(output)) class AdminDateWidget(forms.DateInput): @property def media(self): js = ["calendar.js", "admin/DateTimeShortcuts.js"] return forms.Media(js=[static("admin/js/%s" % path) for path in js]) def __init__(self, attrs=None, format=None): final_attrs = {'class': 'vDateField', 'size': '10'} if attrs is not None: final_attrs.update(attrs) super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format) class AdminTimeWidget(forms.TimeInput): @property def media(self): js = ["calendar.js", "admin/DateTimeShortcuts.js"] return forms.Media(js=[static("admin/js/%s" % path) for path in js]) def __init__(self, attrs=None, format=None): final_attrs = {'class': 'vTimeField', 'size': '8'} if attrs is not None: final_attrs.update(attrs) super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format) class AdminSplitDateTime(forms.SplitDateTimeWidget): """ A SplitDateTime Widget that has some admin-specific styling. """ def __init__(self, attrs=None): widgets = [AdminDateWidget, AdminTimeWidget] # Note that we're calling MultiWidget, not SplitDateTimeWidget, because # we want to define widgets. forms.MultiWidget.__init__(self, widgets, attrs) def format_output(self, rendered_widgets): return format_html('<p class="datetime">{0} {1}<br />{2} {3}</p>', _('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]) class AdminRadioFieldRenderer(RadioFieldRenderer): def render(self): """Outputs a <ul> for this set of radio fields.""" return format_html('<ul{0}>\n{1}\n</ul>', flatatt(self.attrs), format_html_join('\n', '<li>{0}</li>', ((force_text(w),) for w in self))) class AdminRadioSelect(forms.RadioSelect): renderer = AdminRadioFieldRenderer class AdminFileWidget(forms.ClearableFileInput): template_with_initial = ('<p class="file-upload">%s</p>' % forms.ClearableFileInput.template_with_initial) template_with_clear = ('<span class="clearable-file-input">%s</span>' % forms.ClearableFileInput.template_with_clear) def url_params_from_lookup_dict(lookups): """ Converts the type of lookups specified in a ForeignKey limit_choices_to attribute to a dictionary of query parameters """ params = {} if lookups and hasattr(lookups, 'items'): items = [] for k, v in lookups.items(): if isinstance(v, (tuple, list)): v = ','.join([str(x) for x in v]) elif isinstance(v, bool): # See django.db.fields.BooleanField.get_prep_lookup v = ('0', '1')[v] else: v = six.text_type(v) items.append((k, v)) params.update(dict(items)) return params class ForeignKeyRawIdWidget(forms.TextInput): """ A Widget for displaying ForeignKeys in the "raw_id" interface rather than in a <select> box. """ def __init__(self, rel, admin_site, attrs=None, using=None): self.rel = rel self.admin_site = admin_site self.db = using super(ForeignKeyRawIdWidget, self).__init__(attrs) def render(self, name, value, attrs=None): rel_to = self.rel.to if attrs is None: attrs = {} extra = [] if rel_to in self.admin_site._registry: # The related object is registered with the same AdminSite related_url = reverse('admin:%s_%s_changelist' % (rel_to._meta.app_label, rel_to._meta.module_name), current_app=self.admin_site.name) params = self.url_parameters() if params: url = '?' + '&amp;'.join(['%s=%s' % (k, v) for k, v in params.items()]) else: url = '' if "class" not in attrs: attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook. # TODO: "lookup_id_" is hard-coded here. This should instead use # the correct API to determine the ID dynamically. extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % (related_url, url, name)) extra.append('<img src="%s" width="16" height="16" alt="%s" /></a>' % (static('admin/img/selector-search.gif'), _('Lookup'))) output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra if value: output.append(self.label_for_value(value)) return mark_safe(''.join(output)) def base_url_parameters(self): return url_params_from_lookup_dict(self.rel.limit_choices_to) def url_parameters(self): from django.contrib.admin.views.main import TO_FIELD_VAR params = self.base_url_parameters() params.update({TO_FIELD_VAR: self.rel.get_related_field().name}) return params def label_for_value(self, value): key = self.rel.get_related_field().name try: obj = self.rel.to._default_manager.using(self.db).get(**{key: value}) return '&nbsp;<strong>%s</strong>' % escape(Truncator(obj).words(14, truncate='...')) except (ValueError, self.rel.to.DoesNotExist): return '' class ManyToManyRawIdWidget(ForeignKeyRawIdWidget): """ A Widget for displaying ManyToMany ids in the "raw_id" interface rather than in a <select multiple> box. """ def render(self, name, value, attrs=None): if attrs is None: attrs = {} if self.rel.to in self.admin_site._registry: # The related object is registered with the same AdminSite attrs['class'] = 'vManyToManyRawIdAdminField' if value: value = ','.join([force_text(v) for v in value]) else: value = '' return super(ManyToManyRawIdWidget, self).render(name, value, attrs) def url_parameters(self): return self.base_url_parameters() def label_for_value(self, value): return '' def value_from_datadict(self, data, files, name): value = data.get(name) if value: return value.split(',') def _has_changed(self, initial, data): if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True for pk1, pk2 in zip(initial, data): if force_text(pk1) != force_text(pk2): return True return False class RelatedFieldWidgetWrapper(forms.Widget): """ This class is a wrapper to a given widget to add the add icon for the admin interface. """ def __init__(self, widget, rel, admin_site, can_add_related=None): self.is_hidden = widget.is_hidden self.needs_multipart_form = widget.needs_multipart_form self.attrs = widget.attrs self.choices = widget.choices self.widget = widget self.rel = rel # Backwards compatible check for whether a user can add related # objects. if can_add_related is None: can_add_related = rel.to in admin_site._registry self.can_add_related = can_add_related # so we can check if the related object is registered with this AdminSite self.admin_site = admin_site def __deepcopy__(self, memo): obj = copy.copy(self) obj.widget = copy.deepcopy(self.widget, memo) obj.attrs = self.widget.attrs memo[id(self)] = obj return obj @property def media(self): return self.widget.media def render(self, name, value, *args, **kwargs): rel_to = self.rel.to info = (rel_to._meta.app_label, rel_to._meta.object_name.lower()) self.widget.choices = self.choices output = [self.widget.render(name, value, *args, **kwargs)] if self.can_add_related: related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name) # TODO: "add_id_" is hard-coded here. This should instead use the # correct API to determine the ID dynamically. output.append('<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % (related_url, name)) output.append('<img src="%s" width="10" height="10" alt="%s"/></a>' % (static('admin/img/icon_addlink.gif'), _('Add Another'))) return mark_safe(''.join(output)) def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs) return self.attrs def value_from_datadict(self, data, files, name): return self.widget.value_from_datadict(data, files, name) def _has_changed(self, initial, data): return self.widget._has_changed(initial, data) def id_for_label(self, id_): return self.widget.id_for_label(id_) class AdminTextareaWidget(forms.Textarea): def __init__(self, attrs=None): final_attrs = {'class': 'vLargeTextField'} if attrs is not None: final_attrs.update(attrs) super(AdminTextareaWidget, self).__init__(attrs=final_attrs) class AdminTextInputWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'vTextField'} if attrs is not None: final_attrs.update(attrs) super(AdminTextInputWidget, self).__init__(attrs=final_attrs) class AdminURLFieldWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'vURLField'} if attrs is not None: final_attrs.update(attrs) super(AdminURLFieldWidget, self).__init__(attrs=final_attrs) def render(self, name, value, attrs=None): html = super(AdminURLFieldWidget, self).render(name, value, attrs) if value: value = force_text(self._format_value(value)) final_attrs = {'href': mark_safe(smart_urlquote(value))} html = format_html( '<p class="url">{0} <a {1}>{2}</a><br />{3} {4}</p>', _('Currently:'), flatatt(final_attrs), value, _('Change:'), html ) return html class AdminIntegerFieldWidget(forms.TextInput): class_name = 'vIntegerField' def __init__(self, attrs=None): final_attrs = {'class': self.class_name} if attrs is not None: final_attrs.update(attrs) super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs) class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget): class_name = 'vBigIntegerField' class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'vCommaSeparatedIntegerField'} if attrs is not None: final_attrs.update(attrs) super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5729_0
crossvul-python_data_good_1644_6
"""Tornado handlers for kernel specifications.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import glob import json import os pjoin = os.path.join from tornado import web from ...base.handlers import APIHandler, json_errors from ...utils import url_path_join def kernelspec_model(handler, name): """Load a KernelSpec by name and return the REST API model""" ksm = handler.kernel_spec_manager spec = ksm.get_kernel_spec(name) d = {'name': name} d['spec'] = spec.to_dict() d['resources'] = resources = {} resource_dir = spec.resource_dir for resource in ['kernel.js', 'kernel.css']: if os.path.exists(pjoin(resource_dir, resource)): resources[resource] = url_path_join( handler.base_url, 'kernelspecs', name, resource ) for logo_file in glob.glob(pjoin(resource_dir, 'logo-*')): fname = os.path.basename(logo_file) no_ext, _ = os.path.splitext(fname) resources[no_ext] = url_path_join( handler.base_url, 'kernelspecs', name, fname ) return d class MainKernelSpecHandler(APIHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated @json_errors def get(self): ksm = self.kernel_spec_manager km = self.kernel_manager model = {} model['default'] = km.default_kernel_name model['kernelspecs'] = specs = {} for kernel_name in ksm.find_kernel_specs(): try: d = kernelspec_model(self, kernel_name) except Exception: self.log.error("Failed to load kernel spec: '%s'", kernel_name, exc_info=True) continue specs[kernel_name] = d self.set_header("Content-Type", 'application/json') self.finish(json.dumps(model)) class KernelSpecHandler(APIHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated @json_errors def get(self, kernel_name): try: model = kernelspec_model(self, kernel_name) except KeyError: raise web.HTTPError(404, u'Kernel spec %s not found' % kernel_name) self.set_header("Content-Type", 'application/json') self.finish(json.dumps(model)) # URL to handler mappings kernel_name_regex = r"(?P<kernel_name>\w+)" default_handlers = [ (r"/api/kernelspecs", MainKernelSpecHandler), (r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_6
crossvul-python_data_bad_3149_0
# -*-python-*- # # Copyright (C) 1999-2016 The ViewCVS Group. All Rights Reserved. # # By using this file, you agree to the terms and conditions set forth in # the LICENSE.html file which can be found at the top level of the ViewVC # distribution or at http://viewvc.org/license-1.html. # # For more information, visit http://viewvc.org/ # # ----------------------------------------------------------------------- # # viewvc: View CVS/SVN repositories via a web browser # # ----------------------------------------------------------------------- __version__ = '1.1.26-dev' # this comes from our library; measure the startup time import debug debug.t_start('startup') debug.t_start('imports') # standard modules that we know are in the path or builtin import sys import os import fnmatch import gzip import mimetypes import re import rfc822 import stat import string import struct import tempfile import time import types import urllib # These modules come from our library (the stub has set up the path) import accept import compat import config import ezt import popen import sapi import vcauth import vclib import vclib.ccvs import vclib.svn try: import idiff except (SyntaxError, ImportError): idiff = None debug.t_end('imports') ######################################################################### checkout_magic_path = '*checkout*' # According to RFC 1738 the '~' character is unsafe in URLs. # But for compatibility with URLs bookmarked with old releases of ViewCVS: oldstyle_checkout_magic_path = '~checkout~' docroot_magic_path = '*docroot*' viewcvs_mime_type = 'text/vnd.viewcvs-markup' alt_mime_type = 'text/x-cvsweb-markup' view_roots_magic = '*viewroots*' # Put here the variables we need in order to hold our state - they # will be added (with their current value) to (almost) any link/query # string you construct. _sticky_vars = [ 'hideattic', 'sortby', 'sortdir', 'logsort', 'diff_format', 'search', 'limit_changes', ] # for reading/writing between a couple descriptors CHUNK_SIZE = 8192 # for rcsdiff processing of header _RCSDIFF_IS_BINARY = 'binary-diff' _RCSDIFF_ERROR = 'error' # special characters that don't need to be URL encoded _URL_SAFE_CHARS = "/*~" class Request: def __init__(self, server, cfg): self.server = server self.cfg = cfg self.script_name = _normalize_path(server.getenv('SCRIPT_NAME', '')) self.browser = server.getenv('HTTP_USER_AGENT', 'unknown') # process the Accept-Language: header, and load the key/value # files, given the selected language hal = server.getenv('HTTP_ACCEPT_LANGUAGE','') try: self.lang_selector = accept.language(hal) except accept.AcceptLanguageParseError: self.lang_selector = accept.language('en') self.language = self.lang_selector.select_from(cfg.general.languages) self.kv = cfg.load_kv_files(self.language) # check for an authenticated username self.username = server.getenv('REMOTE_USER') # if we allow compressed output, see if the client does too self.gzip_compress_level = 0 if cfg.options.allow_compress: http_accept_encoding = os.environ.get("HTTP_ACCEPT_ENCODING", "") if "gzip" in filter(None, map(lambda x: string.strip(x), string.split(http_accept_encoding, ","))): self.gzip_compress_level = 9 # make this configurable? def run_viewvc(self): cfg = self.cfg # This function first parses the query string and sets the following # variables. Then it executes the request. self.view_func = None # function to call to process the request self.repos = None # object representing current repository self.rootname = None # name of current root (as used in viewvc.conf) self.roottype = None # current root type ('svn' or 'cvs') self.rootpath = None # physical path to current root self.pathtype = None # type of path, either vclib.FILE or vclib.DIR self.where = None # path to file or directory in current root self.query_dict = {} # validated and cleaned up query options self.path_parts = None # for convenience, equals where.split('/') self.pathrev = None # current path revision or tag self.auth = None # authorizer module in use # redirect if we're loading from a valid but irregular URL # These redirects aren't neccessary to make ViewVC work, it functions # just fine without them, but they make it easier for server admins to # implement access restrictions based on URL needs_redirect = 0 # Process the query params for name, values in self.server.params().items(): # we only care about the first value value = values[0] # patch up old queries that use 'cvsroot' to look like they used 'root' if name == 'cvsroot': name = 'root' needs_redirect = 1 # same for 'only_with_tag' and 'pathrev' if name == 'only_with_tag': name = 'pathrev' needs_redirect = 1 # redirect view=rev to view=revision, too if name == 'view' and value == 'rev': value = 'revision' needs_redirect = 1 # validate the parameter _validate_param(name, value) # if we're here, then the parameter is okay self.query_dict[name] = value # Resolve the view parameter into a handler function. self.view_func = _views.get(self.query_dict.get('view', None), self.view_func) # Process PATH_INFO component of query string path_info = self.server.getenv('PATH_INFO', '') # clean it up. this removes duplicate '/' characters and any that may # exist at the front or end of the path. ### we might want to redirect to the cleaned up URL path_parts = _path_parts(path_info) if path_parts: # handle magic path prefixes if path_parts[0] == docroot_magic_path: # if this is just a simple hunk of doc, then serve it up self.where = _path_join(path_parts[1:]) return view_doc(self) elif path_parts[0] in (checkout_magic_path, oldstyle_checkout_magic_path): path_parts.pop(0) self.view_func = view_checkout if not cfg.options.checkout_magic: needs_redirect = 1 # handle tarball magic suffixes if self.view_func is download_tarball: if (self.query_dict.get('parent')): del path_parts[-1] elif path_parts[-1][-7:] == ".tar.gz": path_parts[-1] = path_parts[-1][:-7] # Figure out root name self.rootname = self.query_dict.get('root') if self.rootname == view_roots_magic: del self.query_dict['root'] self.rootname = "" needs_redirect = 1 elif self.rootname is None: if cfg.options.root_as_url_component: if path_parts: self.rootname = path_parts.pop(0) else: self.rootname = "" elif self.view_func != view_roots: self.rootname = cfg.general.default_root elif cfg.options.root_as_url_component: needs_redirect = 1 # Take care of old-new roots mapping for old_root, new_root in cfg.general.renamed_roots.items(): if self.rootname == old_root: self.rootname = new_root needs_redirect = 1 self.where = _path_join(path_parts) self.path_parts = path_parts if self.rootname: roottype, rootpath = locate_root(cfg, self.rootname) if roottype: # Overlay root-specific options. cfg.overlay_root_options(self.rootname) # Setup an Authorizer for this rootname and username debug.t_start('setup-authorizer') self.auth = setup_authorizer(cfg, self.username) debug.t_end('setup-authorizer') # Create the repository object debug.t_start('select-repos') try: if roottype == 'cvs': self.rootpath = vclib.ccvs.canonicalize_rootpath(rootpath) self.repos = vclib.ccvs.CVSRepository(self.rootname, self.rootpath, self.auth, cfg.utilities, cfg.options.use_rcsparse) # required so that spawned rcs programs correctly expand # $CVSHeader$ os.environ['CVSROOT'] = self.rootpath elif roottype == 'svn': self.rootpath = vclib.svn.canonicalize_rootpath(rootpath) self.repos = vclib.svn.SubversionRepository(self.rootname, self.rootpath, self.auth, cfg.utilities, cfg.options.svn_config_dir) else: raise vclib.ReposNotFound() except vclib.ReposNotFound: pass debug.t_end('select-repos') if self.repos is None: raise debug.ViewVCException( 'The root "%s" is unknown. If you believe the value is ' 'correct, then please double-check your configuration.' % self.rootname, "404 Not Found") if self.repos: debug.t_start('select-repos') self.repos.open() debug.t_end('select-repos') type = self.repos.roottype() if type == vclib.SVN: self.roottype = 'svn' elif type == vclib.CVS: self.roottype = 'cvs' else: raise debug.ViewVCException( 'The root "%s" has an unknown type ("%s"). Expected "cvs" or "svn".' % (self.rootname, type), "500 Internal Server Error") # If this is using an old-style 'rev' parameter, redirect to new hotness. # Subversion URLs will now use 'pathrev'; CVS ones use 'revision'. if self.repos and self.query_dict.has_key('rev'): if self.roottype == 'svn' \ and not self.query_dict.has_key('pathrev') \ and not self.view_func == view_revision: self.query_dict['pathrev'] = self.query_dict['rev'] del self.query_dict['rev'] else: # elif not self.query_dict.has_key('revision'): ? self.query_dict['revision'] = self.query_dict['rev'] del self.query_dict['rev'] needs_redirect = 1 if self.repos and self.view_func is not redirect_pathrev: # If this is an intended-to-be-hidden CVSROOT path, complain. if cfg.options.hide_cvsroot \ and is_cvsroot_path(self.roottype, path_parts): raise debug.ViewVCException("Unknown location: /%s" % self.where, "404 Not Found") # Make sure path exists self.pathrev = pathrev = self.query_dict.get('pathrev') self.pathtype = _repos_pathtype(self.repos, path_parts, pathrev) if self.pathtype is None: # Path doesn't exist, see if it could be an old-style ViewVC URL # with a fake suffix. result = _strip_suffix('.diff', path_parts, pathrev, vclib.FILE, \ self.repos, view_diff) or \ _strip_suffix('.tar.gz', path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) or \ _strip_suffix('root.tar.gz', path_parts, pathrev, vclib.DIR,\ self.repos, download_tarball) or \ _strip_suffix(self.rootname + '-root.tar.gz', \ path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) or \ _strip_suffix('root', \ path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) or \ _strip_suffix(self.rootname + '-root', \ path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) if result: self.path_parts, self.pathtype, self.view_func = result self.where = _path_join(self.path_parts) needs_redirect = 1 else: raise debug.ViewVCException("Unknown location: /%s" % self.where, "404 Not Found") # If we have an old ViewCVS Attic URL which is still valid, redirect if self.roottype == 'cvs': attic_parts = None if (self.pathtype == vclib.FILE and len(self.path_parts) > 1 and self.path_parts[-2] == 'Attic'): attic_parts = self.path_parts[:-2] + self.path_parts[-1:] elif (self.pathtype == vclib.DIR and len(self.path_parts) > 0 and self.path_parts[-1] == 'Attic'): attic_parts = self.path_parts[:-1] if attic_parts: self.path_parts = attic_parts self.where = _path_join(attic_parts) needs_redirect = 1 if self.view_func is None: # view parameter is not set, try looking at pathtype and the # other parameters if not self.rootname: self.view_func = view_roots elif self.pathtype == vclib.DIR: # ViewCVS 0.9.2 used to put ?tarball=1 at the end of tarball urls if self.query_dict.has_key('tarball'): self.view_func = download_tarball else: self.view_func = view_directory elif self.pathtype == vclib.FILE: if self.query_dict.has_key('r1') and self.query_dict.has_key('r2'): self.view_func = view_diff elif self.query_dict.has_key('annotate'): self.view_func = view_annotate elif self.query_dict.has_key('graph'): if not self.query_dict.has_key('makeimage'): self.view_func = view_cvsgraph else: self.view_func = view_cvsgraph_image elif self.query_dict.has_key('revision') \ or cfg.options.default_file_view != "log": if cfg.options.default_file_view == "markup" \ or self.query_dict.get('content-type', None) \ in (viewcvs_mime_type, alt_mime_type): self.view_func = view_markup else: self.view_func = view_checkout else: self.view_func = view_log # If we've chosen the roots or revision view, our effective # location is not really "inside" the repository, so we have no # path and therefore no path parts or type, either. if self.view_func is view_revision or self.view_func is view_roots: self.where = '' self.path_parts = [] self.pathtype = None # if we have a directory and the request didn't end in "/", then redirect # so that it does. if (self.pathtype == vclib.DIR and path_info[-1:] != '/' and self.view_func is not download_tarball and self.view_func is not redirect_pathrev): needs_redirect = 1 # startup is done now. debug.t_end('startup') # If we need to redirect, do so. Otherwise, handle our requested view. if needs_redirect: self.server.redirect(self.get_url()) else: debug.t_start('view-func') self.view_func(self) debug.t_end('view-func') def get_url(self, escape=0, partial=0, prefix=0, **args): """Constructs a link to another ViewVC page just like the get_link function except that it returns a single URL instead of a URL split into components. If PREFIX is set, include the protocol and server name portions of the URL.""" url, params = apply(self.get_link, (), args) qs = compat.urlencode(params) if qs: result = urllib.quote(url, _URL_SAFE_CHARS) + '?' + qs else: result = urllib.quote(url, _URL_SAFE_CHARS) if partial: result = result + (qs and '&' or '?') if escape: result = self.server.escape(result) if prefix: result = '%s://%s%s' % \ (self.server.getenv("HTTPS") == "on" and "https" or "http", self.server.getenv("HTTP_HOST"), result) return result def get_form(self, **args): """Constructs a link to another ViewVC page just like the get_link function except that it returns a base URL suitable for use as an HTML form action, and an iterable object with .name and .value attributes representing stuff that should be in <input type=hidden> tags with the link parameters.""" url, params = apply(self.get_link, (), args) action = self.server.escape(urllib.quote(url, _URL_SAFE_CHARS)) hidden_values = [] for name, value in params.items(): hidden_values.append(_item(name=self.server.escape(name), value=self.server.escape(value))) return action, hidden_values def get_link(self, view_func=None, where=None, pathtype=None, params=None): """Constructs a link pointing to another ViewVC page. All arguments correspond to members of the Request object. If they are set to None they take values from the current page. Return value is a base URL and a dictionary of parameters""" cfg = self.cfg if view_func is None: view_func = self.view_func if params is None: params = self.query_dict.copy() else: params = params.copy() # must specify both where and pathtype or neither assert (where is None) == (pathtype is None) # if we are asking for the revision info view, we don't need any # path information if (view_func is view_revision or view_func is view_roots or view_func is redirect_pathrev): where = pathtype = None elif where is None: where = self.where pathtype = self.pathtype # no need to add sticky variables for views with no links sticky_vars = not (view_func is view_checkout or view_func is download_tarball) # The logic used to construct the URL is an inverse of the # logic used to interpret URLs in Request.run_viewvc url = self.script_name # add checkout magic if neccessary if view_func is view_checkout and cfg.options.checkout_magic: url = url + '/' + checkout_magic_path # add root to url rootname = None if view_func is not view_roots: if cfg.options.root_as_url_component: # remove root from parameter list if present try: rootname = params['root'] except KeyError: rootname = self.rootname else: del params['root'] # add root path component if rootname is not None: url = url + '/' + rootname else: # add root to parameter list try: rootname = params['root'] except KeyError: rootname = params['root'] = self.rootname # no need to specify default root if rootname == cfg.general.default_root: del params['root'] # add 'pathrev' value to parameter list if (self.pathrev is not None and not params.has_key('pathrev') and view_func is not view_revision and rootname == self.rootname): params['pathrev'] = self.pathrev # add path if where: url = url + '/' + where # add trailing slash for a directory if pathtype == vclib.DIR: url = url + '/' # normalize top level URLs for use in Location headers and A tags elif not url: url = '/' # no need to explicitly specify directory view for a directory if view_func is view_directory and pathtype == vclib.DIR: view_func = None # no need to explicitly specify roots view when in root_as_url # mode or there's no default root if view_func is view_roots and (cfg.options.root_as_url_component or not cfg.general.default_root): view_func = None # no need to explicitly specify annotate view when # there's an annotate parameter if view_func is view_annotate and params.get('annotate') is not None: view_func = None # no need to explicitly specify diff view when # there's r1 and r2 parameters if (view_func is view_diff and params.get('r1') is not None and params.get('r2') is not None): view_func = None # no need to explicitly specify checkout view when it's the default # view or when checkout_magic is enabled if view_func is view_checkout: if ((cfg.options.default_file_view == "co" and pathtype == vclib.FILE) or cfg.options.checkout_magic): view_func = None # no need to explicitly specify markup view when it's the default view if view_func is view_markup: if (cfg.options.default_file_view == "markup" \ and pathtype == vclib.FILE): view_func = None # set the view parameter view_code = _view_codes.get(view_func) if view_code and not (params.has_key('view') and params['view'] is None): params['view'] = view_code # add sticky values to parameter list if sticky_vars: for name in _sticky_vars: value = self.query_dict.get(name) if value is not None and not params.has_key(name): params[name] = value # remove null values from parameter list for name, value in params.items(): if value is None: del params[name] return url, params def _path_parts(path): """Split up a repository path into a list of path components""" # clean it up. this removes duplicate '/' characters and any that may # exist at the front or end of the path. return filter(None, string.split(path, '/')) def _normalize_path(path): """Collapse leading slashes in the script name You only get multiple slashes in the script name when users accidentally type urls like http://abc.com//viewvc.cgi/, but we correct for it because we output the script name in links and web browsers interpret //viewvc.cgi/ as http://viewvc.cgi/ """ i = 0 for c in path: if c != '/': break i = i + 1 if i: return path[i-1:] return path def _validate_param(name, value): """Validate whether the given value is acceptable for the param name. If the value is not allowed, then an error response is generated, and this function throws an exception. Otherwise, it simply returns None. """ # First things first -- check that we have a legal parameter name. try: validator = _legal_params[name] except KeyError: raise debug.ViewVCException( 'An illegal parameter name was provided.', '400 Bad Request') # Is there a validator? Is it a regex or a function? Validate if # we can, returning without incident on valid input. if validator is None: return elif hasattr(validator, 'match'): if validator.match(value): return else: if validator(value): return # If we get here, the input value isn't valid. raise debug.ViewVCException( 'An illegal value was provided for the "%s" parameter.' % (name), '400 Bad Request') def _validate_regex(value): ### we need to watch the flow of these parameters through the system ### to ensure they don't hit the page unescaped. otherwise, these ### parameters could constitute a CSS attack. try: re.compile(value) return True except: return None def _validate_view(value): # Return true iff VALUE is one of our allowed views. return _views.has_key(value) def _validate_mimetype(value): # For security purposes, we only allow mimetypes from a predefined set # thereof. return value in (viewcvs_mime_type, alt_mime_type, 'text/plain') # obvious things here. note that we don't need uppercase for alpha. _re_validate_alpha = re.compile('^[a-z]+$') _re_validate_number = re.compile('^[0-9]+$') _re_validate_boolint = re.compile('^[01]$') # when comparing two revs, we sometimes construct REV:SYMBOL, so ':' is needed _re_validate_revnum = re.compile('^[-_.a-zA-Z0-9:~\\[\\]/]*$') # date time values _re_validate_datetime = re.compile(r'^(\d\d\d\d-\d\d-\d\d(\s+\d\d:\d\d' '(:\d\d)?)?)?$') # the legal query parameters and their validation functions _legal_params = { 'root' : None, 'view' : _validate_view, 'search' : _validate_regex, 'p1' : None, 'p2' : None, 'hideattic' : _re_validate_boolint, 'limit_changes' : _re_validate_number, 'sortby' : _re_validate_alpha, 'sortdir' : _re_validate_alpha, 'logsort' : _re_validate_alpha, 'diff_format' : _re_validate_alpha, 'pathrev' : _re_validate_revnum, 'dir_pagestart' : _re_validate_number, 'log_pagestart' : _re_validate_number, 'annotate' : _re_validate_revnum, 'graph' : _re_validate_revnum, 'makeimage' : _re_validate_boolint, 'r1' : _re_validate_revnum, 'tr1' : _re_validate_revnum, 'r2' : _re_validate_revnum, 'tr2' : _re_validate_revnum, 'revision' : _re_validate_revnum, 'content-type' : _validate_mimetype, # for query 'file_match' : _re_validate_alpha, 'branch_match' : _re_validate_alpha, 'who_match' : _re_validate_alpha, 'comment_match' : _re_validate_alpha, 'dir' : None, 'file' : None, 'branch' : None, 'who' : None, 'comment' : None, 'querysort' : _re_validate_alpha, 'date' : _re_validate_alpha, 'hours' : _re_validate_number, 'mindate' : _re_validate_datetime, 'maxdate' : _re_validate_datetime, 'format' : _re_validate_alpha, # for redirect_pathrev 'orig_path' : None, 'orig_pathtype' : None, 'orig_pathrev' : None, 'orig_view' : None, # deprecated 'parent' : _re_validate_boolint, 'rev' : _re_validate_revnum, 'tarball' : _re_validate_boolint, 'hidecvsroot' : _re_validate_boolint, } def _path_join(path_parts): return string.join(path_parts, '/') def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func): """strip the suffix from a repository path if the resulting path is of the specified type, otherwise return None""" if not path_parts: return None l = len(suffix) if path_parts[-1][-l:] == suffix: path_parts = path_parts[:] if len(path_parts[-1]) == l: del path_parts[-1] else: path_parts[-1] = path_parts[-1][:-l] t = _repos_pathtype(repos, path_parts, rev) if pathtype == t: return path_parts, t, view_func return None def _repos_pathtype(repos, path_parts, rev): """Return the type of a repository path, or None if the path doesn't exist""" try: return repos.itemtype(path_parts, rev) except vclib.ItemNotFound: return None def _orig_path(request, rev_param='revision', path_param=None): "Get original path of requested file at old revision before copies or moves" # The 'pathrev' variable is interpreted by nearly all ViewVC views to # provide a browsable snapshot of a repository at some point in its history. # 'pathrev' is a tag name for CVS repositories and a revision number for # Subversion repositories. It's automatically propagated between pages by # logic in the Request.get_link() function which adds it to links like a # sticky variable. When 'pathrev' is set, directory listings only include # entries that exist in the specified revision or tag. Similarly, log pages # will only show revisions preceding the point in history specified by # 'pathrev.' Markup, checkout, and annotate pages show the 'pathrev' # revision of files by default when no other revision is specified. # # In Subversion repositories, paths are always considered to refer to the # pathrev revision. For example, if there is a "circle.jpg" in revision 3, # which is renamed and modified as "square.jpg" in revision 4, the original # circle image is visible at the following URLs: # # *checkout*/circle.jpg?pathrev=3 # *checkout*/square.jpg?revision=3 # *checkout*/square.jpg?revision=3&pathrev=4 # # Note that the following: # # *checkout*/circle.jpg?rev=3 # # now gets redirected to one of the following URLs: # # *checkout*/circle.jpg?pathrev=3 (for Subversion) # *checkout*/circle.jpg?revision=3 (for CVS) # rev = request.query_dict.get(rev_param, request.pathrev) path = request.query_dict.get(path_param, request.where) if rev is not None and hasattr(request.repos, '_getrev'): try: pathrev = request.repos._getrev(request.pathrev) rev = request.repos._getrev(rev) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') return _path_parts(request.repos.get_location(path, pathrev, rev)), rev return _path_parts(path), rev def setup_authorizer(cfg, username, rootname=None): """Setup the authorizer. If ROOTNAME is provided, assume that per-root options have not been overlayed. Otherwise, assume they have (and fetch the authorizer for the configured root).""" if rootname is None: authorizer = cfg.options.authorizer params = cfg.get_authorizer_params() else: authorizer, params = cfg.get_authorizer_and_params_hack(rootname) # No configured authorizer? No problem. if not authorizer: return None # First, try to load a module with the configured name. import imp fp = None try: try: fp, path, desc = imp.find_module("%s" % (authorizer), vcauth.__path__) my_auth = imp.load_module('viewvc', fp, path, desc) except ImportError: raise debug.ViewVCException( 'Invalid authorizer (%s) specified for root "%s"' \ % (authorizer, rootname), '500 Internal Server Error') finally: if fp: fp.close() # Add a rootname mapping callback function to the parameters. def _root_lookup_func(cb_rootname): return locate_root(cfg, cb_rootname) # Finally, instantiate our Authorizer. return my_auth.ViewVCAuthorizer(_root_lookup_func, username, params) def check_freshness(request, mtime=None, etag=None, weak=0): cfg = request.cfg # See if we are supposed to disable etags (for debugging, usually) if not cfg.options.generate_etags: return 0 request_etag = request_mtime = None if etag is not None: if weak: etag = 'W/"%s"' % etag else: etag = '"%s"' % etag request_etag = request.server.getenv('HTTP_IF_NONE_MATCH') if mtime is not None: try: request_mtime = request.server.getenv('HTTP_IF_MODIFIED_SINCE') request_mtime = rfc822.mktime_tz(rfc822.parsedate_tz(request_mtime)) except: request_mtime = None # if we have an etag, use that for freshness checking. # if not available, then we use the last-modified time. # if not available, then the document isn't fresh. if etag is not None: isfresh = (request_etag == etag) elif mtime is not None: isfresh = (request_mtime >= mtime) else: isfresh = 0 # require revalidation after the configured amount of time if cfg and cfg.options.http_expiration_time >= 0: expiration = compat.formatdate(time.time() + cfg.options.http_expiration_time) request.server.addheader('Expires', expiration) request.server.addheader('Cache-Control', 'max-age=%d' % cfg.options.http_expiration_time) if isfresh: request.server.header(status='304 Not Modified') else: if etag is not None: request.server.addheader('ETag', etag) if mtime is not None: request.server.addheader('Last-Modified', compat.formatdate(mtime)) return isfresh def get_view_template(cfg, view_name, language="en"): # See if the configuration specifies a template for this view. If # not, use the default template path for this view. tname = vars(cfg.templates).get(view_name) or view_name + ".ezt" # Template paths are relative to the configurated template_dir (if # any, "templates" otherwise), so build the template path as such. tname = os.path.join(cfg.options.template_dir or "templates", tname) # Allow per-language template selection. tname = string.replace(tname, '%lang%', language) # Finally, construct the whole template path. tname = cfg.path(tname) debug.t_start('ezt-parse') template = ezt.Template(tname) debug.t_end('ezt-parse') return template def get_writeready_server_file(request, content_type=None, encoding=None, content_length=None, allow_compress=True): """Return a file handle to a response body stream, after outputting any queued special headers (on REQUEST.server) and (optionally) a 'Content-Type' header whose value is CONTENT_TYPE and character set is ENCODING. If CONTENT_LENGTH is provided and compression is not in use, also generate a 'Content-Length' header for this response. Callers my use ALLOW_COMPRESS to disable compression where it would otherwise be allowed. (Such as when transmitting an already-compressed response.) After this function is called, it is too late to add new headers to the response.""" if allow_compress and request.gzip_compress_level: request.server.addheader('Content-Encoding', 'gzip') elif content_length is not None: request.server.addheader('Content-Length', content_length) if content_type and encoding: request.server.header("%s; charset=%s" % (content_type, encoding)) elif content_type: request.server.header(content_type) else: request.server.header() if allow_compress and request.gzip_compress_level: fp = gzip.GzipFile('', 'wb', request.gzip_compress_level, request.server.file()) else: fp = request.server.file() return fp def generate_page(request, view_name, data, content_type=None): server_fp = get_writeready_server_file(request, content_type) template = get_view_template(request.cfg, view_name, request.language) template.generate(server_fp, data) def nav_path(request): """Return current path as list of items with "name" and "href" members The href members are view_directory links for directories and view_log links for files, but are set to None when the link would point to the current view""" if not request.repos: return [] is_dir = request.pathtype == vclib.DIR # add root item items = [] root_item = _item(name=request.server.escape(request.repos.name), href=None) if request.path_parts or request.view_func is not view_directory: root_item.href = request.get_url(view_func=view_directory, where='', pathtype=vclib.DIR, params={}, escape=1) items.append(root_item) # add path part items path_parts = [] for part in request.path_parts: path_parts.append(part) is_last = len(path_parts) == len(request.path_parts) item = _item(name=part, href=None) if not is_last or (is_dir and request.view_func is not view_directory): item.href = request.get_url(view_func=view_directory, where=_path_join(path_parts), pathtype=vclib.DIR, params={}, escape=1) elif not is_dir and request.view_func is not view_log: item.href = request.get_url(view_func=view_log, where=_path_join(path_parts), pathtype=vclib.FILE, params={}, escape=1) items.append(item) return items def prep_tags(request, tags): url, params = request.get_link(params={'pathrev': None}) params = compat.urlencode(params) if params: url = urllib.quote(url, _URL_SAFE_CHARS) + '?' + params + '&pathrev=' else: url = urllib.quote(url, _URL_SAFE_CHARS) + '?pathrev=' url = request.server.escape(url) links = [ ] for tag in tags: links.append(_item(name=tag.name, href=url+tag.name)) links.sort(lambda a, b: cmp(a.name, b.name)) return links def guess_mime(filename): return mimetypes.guess_type(filename)[0] def is_viewable_image(mime_type): return mime_type and mime_type in ('image/gif', 'image/jpeg', 'image/png') def is_text(mime_type): return not mime_type or mime_type[:5] == 'text/' def is_cvsroot_path(roottype, path_parts): return roottype == 'cvs' and path_parts and path_parts[0] == 'CVSROOT' def is_plain_text(mime_type): return not mime_type or mime_type == 'text/plain' def default_view(mime_type, cfg): "Determine whether file should be viewed through markup page or sent raw" # If the mime type is text/anything or a supported image format we view # through the markup page. If the mime type is something else, we send # it directly to the browser. That way users can see things like flash # animations, pdfs, word documents, multimedia, etc, which wouldn't be # very useful marked up. If the mime type is totally unknown (happens when # we encounter an unrecognized file extension) we also view it through # the markup page since that's better than sending it text/plain. if ('markup' in cfg.options.allowed_views and (is_viewable_image(mime_type) or is_text(mime_type))): return view_markup return view_checkout def is_binary_file_mime_type(mime_type, cfg): """Return True iff MIME_TYPE is set and matches one of the binary file mime type patterns in CFG.""" if mime_type: for pattern in cfg.options.binary_mime_types: if fnmatch.fnmatch(mime_type, pattern): return True return False def get_file_view_info(request, where, rev=None, mime_type=None, pathrev=-1): """Return an object holding common hrefs and a viewability flag used for various views of FILENAME at revision REV whose MIME type is MIME_TYPE. The object's members include: view_href download_href download_text_href annotate_href revision_href prefer_markup """ rev = rev and str(rev) or None mime_type = mime_type or guess_mime(where) if pathrev == -1: # cheesy default value, since we need to preserve None pathrev = request.pathrev view_href = None download_href = None download_text_href = None annotate_href = None revision_href = None if 'markup' in request.cfg.options.allowed_views: view_href = request.get_url(view_func=view_markup, where=where, pathtype=vclib.FILE, params={'revision': rev, 'pathrev': pathrev}, escape=1) if 'co' in request.cfg.options.allowed_views: download_href = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params={'revision': rev, 'pathrev': pathrev}, escape=1) if not is_plain_text(mime_type): download_text_href = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params={'content-type': 'text/plain', 'revision': rev, 'pathrev': pathrev}, escape=1) if 'annotate' in request.cfg.options.allowed_views: annotate_href = request.get_url(view_func=view_annotate, where=where, pathtype=vclib.FILE, params={'annotate': rev, 'pathrev': pathrev}, escape=1) if request.roottype == 'svn': revision_href = request.get_url(view_func=view_revision, params={'revision': rev}, escape=1) is_binary_file = is_binary_file_mime_type(mime_type, request.cfg) if is_binary_file: download_text_href = annotate_href = view_href = None prefer_markup = False else: prefer_markup = default_view(mime_type, request.cfg) == view_markup return _item(view_href=view_href, download_href=download_href, download_text_href=download_text_href, annotate_href=annotate_href, revision_href=revision_href, prefer_markup=ezt.boolean(prefer_markup)) # Matches URLs _re_rewrite_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)' '(://[-a-zA-Z0-9%.~:_/]+)((\?|\&)' '([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*' '(#([-a-zA-Z0-9%.~:_]+)?)?)') # Matches email addresses _re_rewrite_email = re.compile('([-a-zA-Z0-9_.\+]+)@' '(([-a-zA-Z0-9]+\.)+[A-Za-z]{2,4})') # Matches revision references _re_rewrite_svnrevref = re.compile(r'\b(r|rev #?|revision #?)([0-9]+)\b') class ViewVCHtmlFormatterTokens: def __init__(self, tokens): self.tokens = tokens def get_result(self, maxlen=0): """Format the tokens per the registered set of formatters, and limited to MAXLEN visible characters (or unlimited if MAXLEN is 0). Return a 3-tuple containing the formatted result string, the number of visible characters in the result string, and a boolean flag indicating whether or not S was truncated.""" out = '' out_len = 0 for token in self.tokens: chunk, chunk_len = token.converter(token.match, token.userdata, maxlen) out = out + chunk out_len = out_len + chunk_len if maxlen: maxlen = maxlen - chunk_len if maxlen <= 0: return out, out_len, 1 return out, out_len, 0 class ViewVCHtmlFormatter: """Format a string as HTML-encoded output with customizable markup rules, for example turning strings that look like URLs into anchor links. NOTE: While there might appear to be some unused portions of this interface, there is a good chance that there are consumers outside of ViewVC itself that make use of these things. """ def __init__(self): self._formatters = [] def format_url(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as linkified URL, with no more than MAXLEN characters in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum. - the number of non-HTML-tag characters returned. """ s = mobj.group(0) trunc_s = maxlen and s[:maxlen] or s return '<a href="%s">%s</a>' % (sapi.escape(s), sapi.escape(trunc_s)), \ len(trunc_s) def format_email(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as linkified email address, with no more than MAXLEN characters in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum. - the number of non-HTML-tag characters returned. """ s = mobj.group(0) trunc_s = maxlen and s[:maxlen] or s return '<a href="mailto:%s">%s</a>' % (urllib.quote(s), self._entity_encode(trunc_s)), \ len(trunc_s) def format_email_obfuscated(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an entity-encoded email address, with no more than MAXLEN characters in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum. - the number of non-HTML-tag characters returned. """ s = mobj.group(0) trunc_s = maxlen and s[:maxlen] or s return self._entity_encode(trunc_s), len(trunc_s) def format_email_truncated(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an HTML-escaped truncated email address of no more than MAXLEN characters. If MAXLEN is 0, there is no maximum. - the number of characters returned. """ s = mobj.group(1) s_len = len(s) if (maxlen == 0) or (s_len < (maxlen - 1)): return self._entity_encode(s) + '&#64;&hellip;', s_len + 2 elif s_len < maxlen: return self._entity_encode(s) + '&#64;', s_len + 1 else: trunc_s = mobj.group(1)[:maxlen] return self._entity_encode(trunc_s), len(trunc_s) def format_svnrevref(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an linkified URL to a ViewVC Subversion revision view, with no more than MAXLEN characters in the non-HTML-tag portions. If MAXLEN is 0, there is no maximum. - the number of characters returned. USERDATA is a function that accepts a revision reference and returns a URL to that revision. """ s = mobj.group(0) revref = mobj.group(2) trunc_s = maxlen and s[:maxlen] or s revref_url = userdata(revref) return '<a href="%s">%s</a>' % (sapi.escape(revref_url), sapi.escape(trunc_s)), \ len(trunc_s) def format_custom_url(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an linkified URL created by substituting match groups 0-9 into USERDATA (which is a format string that uses \N to represent the substitution locations) and with no more than MAXLEN characters in the non-HTML-tag portions. If MAXLEN is 0, there is no maximum. - the number of characters returned. """ format = userdata text = mobj.group(0) url = format for i in range(9): try: repl = mobj.group(i) except: repl = '' url = url.replace('\%d' % (i), repl) trunc_s = maxlen and text[:maxlen] or text return '<a href="%s">%s</a>' % (sapi.escape(url), sapi.escape(trunc_s)), \ len(trunc_s) def format_text(self, s, unused, maxlen=0): """Return a 2-tuple containing: - the text S, HTML-escaped, containing no more than MAXLEN characters. If MAXLEN is 0, there is no maximum. - the number of characters returned. """ trunc_s = maxlen and s[:maxlen] or s return sapi.escape(trunc_s), len(trunc_s) def add_formatter(self, regexp, conv, userdata=None): """Register a formatter which finds instances of strings matching REGEXP, and using the function CONV and USERDATA to format them. CONV is a function which accepts three parameters: - the MatchObject which holds the string portion to be formatted, - the USERDATA object, - the maximum number of characters from that string to use for human-readable output (or 0 to indicate no maximum). """ if type(regexp) == type(''): regexp = re.compile(regexp) self._formatters.append([regexp, conv, userdata]) def get_result(self, s, maxlen=0): """Format S per the set of formatters registered with this object, and limited to MAXLEN visible characters (or unlimited if MAXLEN is 0). Return a 3-tuple containing the formatted result string, the number of visible characters in the result string, and a boolean flag indicating whether or not S was truncated. """ return self.tokenize_text(s).get_result(maxlen) def tokenize_text(self, s): """Return a ViewVCHtmlFormatterTokens object containing the tokens created when parsing the string S. Callers can use that object's get_result() function to retrieve HTML-formatted text. """ tokens = [] # We could just have a "while s:" here instead of "for line: while # line:", but for really large log messages with heavy # tokenization, the cost in both performance and memory # consumption of the approach taken was atrocious. for line in string.split(string.replace(s, '\r\n', '\n'), '\n'): line = line + '\n' while line: best_match = best_conv = best_userdata = None for test in self._formatters: match = test[0].search(line) # If we find and match and (a) its our first one, or (b) it # matches text earlier than our previous best match, or (c) it # matches text at the same location as our previous best match # but extends to cover more text than that match, then this is # our new best match. # # Implied here is that when multiple formatters match exactly # the same text, the first formatter in the registration list wins. if match \ and ((best_match is None) \ or (match.start() < best_match.start()) or ((match.start() == best_match.start()) \ and (match.end() > best_match.end()))): best_match = match best_conv = test[1] best_userdata = test[2] # If we found a match... if best_match: # ... add any non-matching stuff first, then the matching bit. start = best_match.start() end = best_match.end() if start > 0: tokens.append(_item(match=line[:start], converter=self.format_text, userdata=None)) tokens.append(_item(match=best_match, converter=best_conv, userdata=best_userdata)) line = line[end:] else: # Otherwise, just add the rest of the string. tokens.append(_item(match=line, converter=self.format_text, userdata=None)) line = '' return ViewVCHtmlFormatterTokens(tokens) def _entity_encode(self, s): return string.join(map(lambda x: '&#%d;' % (ord(x)), s), '') class LogFormatter: def __init__(self, request, log): self.request = request self.log = log or '' self.tokens = None self.cache = {} # (maxlen, htmlize) => resulting_log def get(self, maxlen=0, htmlize=1): cfg = self.request.cfg # Prefer the cache. if self.cache.has_key((maxlen, htmlize)): return self.cache[(maxlen, htmlize)] # If we are HTML-izing... if htmlize: # ...and we don't yet have ViewVCHtmlFormatter() object tokens... if not self.tokens: # ... then get them. lf = ViewVCHtmlFormatter() # Rewrite URLs. lf.add_formatter(_re_rewrite_url, lf.format_url) # Rewrite Subversion revision references. if self.request.roottype == 'svn': def revision_to_url(rev): return self.request.get_url(view_func=view_revision, params={'revision': rev}, escape=0) lf.add_formatter(_re_rewrite_svnrevref, lf.format_svnrevref, revision_to_url) # Rewrite email addresses. if cfg.options.mangle_email_addresses == 2: lf.add_formatter(_re_rewrite_email, lf.format_email_truncated) elif cfg.options.mangle_email_addresses == 1: lf.add_formatter(_re_rewrite_email, lf.format_email_obfuscated) else: lf.add_formatter(_re_rewrite_email, lf.format_email) # Add custom rewrite handling per configuration. for rule in cfg.options.custom_log_formatting: rule = rule.replace('\\:', '\x01') regexp, format = map(lambda x: x.strip(), rule.split(':', 1)) regexp = regexp.replace('\x01', ':') format = format.replace('\x01', ':') lf.add_formatter(re.compile(regexp), lf.format_custom_url, format) # Tokenize the log message. self.tokens = lf.tokenize_text(self.log) # Use our formatter to ... you know ... format. log, log_len, truncated = self.tokens.get_result(maxlen) result_log = log + (truncated and '&hellip;' or '') # But if we're not HTML-izing... else: # ...then do much more simplistic transformations as necessary. log = self.log if cfg.options.mangle_email_addresses == 2: log = re.sub(_re_rewrite_email, r'\1@...', log) result_log = maxlen and log[:maxlen] or log # In either case, populate the cache and return the results. self.cache[(maxlen, htmlize)] = result_log return result_log _time_desc = { 1 : 'second', 60 : 'minute', 3600 : 'hour', 86400 : 'day', 604800 : 'week', 2628000 : 'month', 31536000 : 'year', } def get_time_text(request, interval, num): "Get some time text, possibly internationalized." ### some languages have even harder pluralization rules. we'll have to ### deal with those on demand if num == 0: return '' text = _time_desc[interval] if num == 1: attr = text + '_singular' fmt = '%d ' + text else: attr = text + '_plural' fmt = '%d ' + text + 's' try: fmt = getattr(request.kv.i18n.time, attr) except AttributeError: pass return fmt % num def little_time(request): try: return request.kv.i18n.time.little_time except AttributeError: return 'very little time' def html_time(request, secs, extended=0): secs = long(time.time()) - secs if secs < 2: return little_time(request) breaks = _time_desc.keys() breaks.sort() i = 0 while i < len(breaks): if secs < 2 * breaks[i]: break i = i + 1 value = breaks[i - 1] s = get_time_text(request, value, secs / value) if extended and i > 1: secs = secs % value value = breaks[i - 2] ext = get_time_text(request, value, secs / value) if ext: ### this is not i18n compatible. pass on it for now s = s + ', ' + ext return s def common_template_data(request, revision=None, mime_type=None): """Return a ezt.TemplateData instance with data dictionary items common to most ViewVC views.""" cfg = request.cfg # Initialize data dictionary members (sorted alphanumerically) data = ezt.TemplateData({ 'annotate_href' : None, 'cfg' : cfg, 'docroot' : cfg.options.docroot is None \ and request.script_name + '/' + docroot_magic_path \ or cfg.options.docroot, 'download_href' : None, 'download_text_href' : None, 'graph_href': None, 'kv' : request.kv, 'lockinfo' : None, 'log_href' : None, 'nav_path' : nav_path(request), 'pathtype' : None, 'prefer_markup' : ezt.boolean(0), 'queryform_href' : None, 'rev' : None, 'revision_href' : None, 'rootname' : request.rootname \ and request.server.escape(request.rootname) or None, 'rootpath' : request.rootpath, 'roots_href' : None, 'roottype' : request.roottype, 'rss_href' : None, 'tarball_href' : None, 'up_href' : None, 'username' : request.username, 'view' : _view_codes[request.view_func], 'view_href' : None, 'vsn' : __version__, 'where' : request.server.escape(request.where), }) rev = revision if not rev: rev = request.query_dict.get('annotate') if not rev: rev = request.query_dict.get('revision') if not rev and request.roottype == 'svn': rev = request.query_dict.get('pathrev') try: data['rev'] = hasattr(request.repos, '_getrev') \ and request.repos._getrev(rev) or rev except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') if request.pathtype == vclib.DIR: data['pathtype'] = 'dir' elif request.pathtype == vclib.FILE: data['pathtype'] = 'file' if request.path_parts: dir = _path_join(request.path_parts[:-1]) data['up_href'] = request.get_url(view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1) if 'roots' in cfg.options.allowed_views: data['roots_href'] = request.get_url(view_func=view_roots, escape=1, params={}) if request.pathtype == vclib.FILE: fvi = get_file_view_info(request, request.where, data['rev'], mime_type) data['view_href'] = fvi.view_href data['download_href'] = fvi.download_href data['download_text_href'] = fvi.download_text_href data['annotate_href'] = fvi.annotate_href data['revision_href'] = fvi.revision_href data['prefer_markup'] = fvi.prefer_markup data['log_href'] = request.get_url(view_func=view_log, params={}, escape=1) if request.roottype == 'cvs' and cfg.options.use_cvsgraph: data['graph_href'] = request.get_url(view_func=view_cvsgraph, params={}, escape=1) file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {}) def _only_this_file(item): return item.name == request.path_parts[-1] entries = filter(_only_this_file, file_data) if len(entries) == 1: request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {}) data['lockinfo'] = entries[0].lockinfo elif request.pathtype == vclib.DIR: data['view_href'] = request.get_url(view_func=view_directory, params={}, escape=1) if 'tar' in cfg.options.allowed_views: data['tarball_href'] = request.get_url(view_func=download_tarball, params={}, escape=1) if request.roottype == 'svn': data['revision_href'] = request.get_url(view_func=view_revision, params={'revision': data['rev']}, escape=1) data['log_href'] = request.get_url(view_func=view_log, params={}, escape=1) if is_querydb_nonempty_for_root(request): if request.pathtype == vclib.DIR: params = {} if request.roottype == 'cvs' and request.pathrev: params['branch'] = request.pathrev data['queryform_href'] = request.get_url(view_func=view_queryform, params=params, escape=1) data['rss_href'] = request.get_url(view_func=view_query, params={'date': 'month', 'format': 'rss'}, escape=1) elif request.pathtype == vclib.FILE: parts = _path_parts(request.where) where = _path_join(parts[:-1]) data['rss_href'] = request.get_url(view_func=view_query, where=where, pathtype=request.pathtype, params={'date': 'month', 'format': 'rss', 'file': parts[-1], 'file_match': 'exact'}, escape=1) return data def retry_read(src, reqlen=CHUNK_SIZE): while 1: chunk = src.read(CHUNK_SIZE) if not chunk: # need to check for eof methods because the cStringIO file objects # returned by ccvs don't provide them if hasattr(src, 'eof') and src.eof() is None: time.sleep(1) continue return chunk def copy_stream(src, dst, htmlize=0): while 1: chunk = retry_read(src) if not chunk: break if htmlize: chunk = sapi.escape(chunk) dst.write(chunk) class MarkupPipeWrapper: """An EZT callback that outputs a filepointer, plus some optional pre- and post- text.""" def __init__(self, fp, pretext=None, posttext=None, htmlize=0): self.fp = fp self.pretext = pretext self.posttext = posttext self.htmlize = htmlize def __call__(self, ctx): if self.pretext: ctx.fp.write(self.pretext) copy_stream(self.fp, ctx.fp, self.htmlize) self.fp.close() if self.posttext: ctx.fp.write(self.posttext) _re_rewrite_escaped_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)' '(://[-a-zA-Z0-9%.~:_/]+)' '((\?|\&amp;amp;|\&amp;|\&)' '([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*' '(#([-a-zA-Z0-9%.~:_]+)?)?)') def markup_escaped_urls(s): # Return a copy of S with all URL references -- which are expected # to be already HTML-escaped -- wrapped in <a href=""></a>. def _url_repl(match_obj): url = match_obj.group(0) unescaped_url = string.replace(url, "&amp;amp;", "&amp;") return "<a href=\"%s\">%s</a>" % (unescaped_url, url) return re.sub(_re_rewrite_escaped_url, _url_repl, s) def detect_encoding(text_block): """Return the encoding used by TEXT_BLOCK as detected by the chardet Python module. (Currently, this is used only when syntax highlighting is not enabled/available; otherwise, Pygments does this work for us.)""" # Does the TEXT_BLOCK start with a BOM? for bom, encoding in [('\xef\xbb\xbf', 'utf-8'), ('\xff\xfe', 'utf-16'), ('\xfe\xff', 'utf-16be'), ('\xff\xfe\0\0', 'utf-32'), ('\0\0\xfe\xff', 'utf-32be'), ]: if text_block[:len(bom)] == bom: return encoding # If no recognized BOM, see if chardet can help us. try: import chardet # If chardet can confidently claimed a match, we'll use its # findings. (And if that match is 'ascii' -- which is a subset of # utf-8 -- we'll just call it 'utf-8' and score a zero transform.) resp = chardet.detect(text_block) if resp.get('confidence') == 1.0: encoding = resp.get('encoding') if encoding is "ascii": encoding = "utf-8" return encoding except: pass # By default ... we have no idea. return None def transcode_text(text, encoding=None): """If ENCODING is provided and not 'utf-8', transcode TEXT from ENCODING to UTF-8.""" if not encoding or encoding == 'utf-8': return text try: return unicode(text, encoding, 'replace').encode('utf-8', 'replace') except: pass return text def markup_stream(request, cfg, blame_data, file_lines, filename, mime_type, encoding, colorize): """Return the contents of a versioned file as a list of vclib.Annotation objects, each representing one line of the file's contents. Use BLAME_DATA as the annotation information for the file if provided. Use FILE_LINES as the lines of file content text themselves. MIME_TYPE is the MIME content type of the file; ENCODING is its character encoding. If COLORIZE is true, attempt to apply syntax coloration to the file contents, and use the HTML-marked-up results as the text in the return vclib.Annotation objects.""" # Nothing to mark up? So be it. if not file_lines: return [] # Determine if we should (and can) use Pygments to highlight our # output. Reasons not to include a) being told not to by the # configuration, b) not being able to import the Pygments modules, # and c) Pygments not having a lexer for our file's format. pygments_lexer = None if colorize: from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import ClassNotFound, \ get_lexer_by_name, \ get_lexer_for_mimetype, \ get_lexer_for_filename, \ guess_lexer if not encoding: encoding = 'guess' if cfg.options.detect_encoding: try: import chardet encoding = 'chardet' except (SyntaxError, ImportError): pass # First, see if there's a Pygments lexer associated with MIME_TYPE. if mime_type: try: pygments_lexer = get_lexer_for_mimetype(mime_type, encoding=encoding, tabsize=cfg.options.tabsize, stripnl=False) except ClassNotFound: pygments_lexer = None # If we've no lexer thus far, try to find one based on the FILENAME. if not pygments_lexer: try: pygments_lexer = get_lexer_for_filename(filename, encoding=encoding, tabsize=cfg.options.tabsize, stripnl=False) except ClassNotFound: pygments_lexer = None # Still no lexer? If we've reason to believe this is a text # file, try to guess the lexer based on the file's content. if not pygments_lexer and is_text(mime_type) and file_lines: try: pygments_lexer = guess_lexer(file_lines[0], encoding=encoding, tabsize=cfg.options.tabsize, stripnl=False) except ClassNotFound: pygments_lexer = None # If we aren't highlighting, just return an amalgamation of the # BLAME_DATA (if any) and the FILE_LINES. if not pygments_lexer: # If allowed by configuration, try to detect the source encoding # for this file. We'll assemble a block of data from the file # contents to do so... 1024 bytes should be enough. if not encoding and cfg.options.detect_encoding: block_size = 0 text_block = '' for i in range(len(file_lines)): text_block = text_block + file_lines[i] if len(text_block) >= 1024: break encoding = detect_encoding(text_block) # Built output data comprised of marked-up and possibly-transcoded # source text lines wrapped in (possibly dummy) vclib.Annotation # objects. lines = [] file_lines = transcode_text(string.join(file_lines, ''), encoding) if file_lines[-1] == '\n': file_lines = file_lines[:-1] file_lines = string.split(file_lines, '\n') for i in range(len(file_lines)): line = file_lines[i] if cfg.options.tabsize > 0: line = string.expandtabs(line, cfg.options.tabsize) line = markup_escaped_urls(sapi.escape(line)) if blame_data: blame_item = blame_data[i] blame_item.text = line else: blame_item = vclib.Annotation(line, i + 1, None, None, None, None) blame_item.diff_href = None lines.append(blame_item) return lines # If we get here, we're highlighting something. class PygmentsSink: def __init__(self, blame_data): if blame_data: self.has_blame_data = 1 self.blame_data = blame_data else: self.has_blame_data = 0 self.blame_data = [] self.line_no = 0 def write(self, buf): ### FIXME: Don't bank on write() being called once per line buf = markup_escaped_urls(string.rstrip(buf, '\n\r')) if self.has_blame_data: self.blame_data[self.line_no].text = buf else: item = vclib.Annotation(buf, self.line_no + 1, None, None, None, None) item.diff_href = None self.blame_data.append(item) self.line_no = self.line_no + 1 ps = PygmentsSink(blame_data) highlight(string.join(file_lines, ''), pygments_lexer, HtmlFormatter(nowrap=True, classprefix="pygments-", encoding='utf-8'), ps) return ps.blame_data def make_time_string(date, cfg): """Returns formatted date string in either local time or UTC. The passed in 'date' variable is seconds since epoch. """ if date is None: return None if cfg.options.use_localtime: tm = time.localtime(date) else: tm = time.gmtime(date) if cfg.options.iso8601_timestamps: if cfg.options.use_localtime: if tm[8] and time.daylight: tz = -time.altzone else: tz = -time.timezone tz = float(tz) / 3600.0 tz = string.replace(str.format('{0:+06.2f}', tz), '.', ':') else: tz = 'Z' return time.strftime('%Y-%m-%dT%H:%M:%S', tm) + tz else: return time.asctime(tm) + ' ' + \ (cfg.options.use_localtime and time.tzname[tm[8]] or 'UTC') def make_rss_time_string(date, cfg): """Returns formatted date string in UTC, formatted for RSS. The passed in 'date' variable is seconds since epoch. """ if date is None: return None return time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(date)) + ' UTC' def make_comma_sep_list_string(items): return string.join(map(lambda x: x.name, items), ', ') def get_itemprops(request, path_parts, rev): itemprops = request.repos.itemprops(path_parts, rev) propnames = itemprops.keys() propnames.sort() props = [] for name in propnames: lf = LogFormatter(request, itemprops[name]) value = lf.get(maxlen=0, htmlize=1) undisplayable = ezt.boolean(0) # skip non-utf8 property names try: unicode(name, 'utf8') except: continue # note non-utf8 property values try: unicode(value, 'utf8') except: value = None undisplayable = ezt.boolean(1) props.append(_item(name=name, value=value, undisplayable=undisplayable)) return props def parse_mime_type(mime_type): mime_parts = map(lambda x: x.strip(), string.split(mime_type, ';')) type_subtype = mime_parts[0].lower() parameters = {} for part in mime_parts[1:]: name, value = string.split(part, '=', 1) parameters[name] = value return type_subtype, parameters def calculate_mime_type(request, path_parts, rev): """Return a 2-tuple carrying the MIME content type and character encoding for the file represented by PATH_PARTS in REV. Use REQUEST for repository access as necessary.""" if not path_parts: return None, None mime_type = encoding = None if request.roottype == 'svn' \ and (not request.cfg.options.svn_ignore_mimetype): try: itemprops = request.repos.itemprops(path_parts, rev) mime_type = itemprops.get('svn:mime-type') if mime_type: mime_type, parameters = parse_mime_type(mime_type) return mime_type, parameters.get('charset') except: pass return guess_mime(path_parts[-1]), None def assert_viewable_filesize(cfg, filesize): if cfg.options.max_filesize_kbytes \ and filesize != -1 \ and filesize > (1024 * cfg.options.max_filesize_kbytes): raise debug.ViewVCException('Display of files larger than %d KB ' 'disallowed by configuration' % (cfg.options.max_filesize_kbytes), '403 Forbidden') def markup_or_annotate(request, is_annotate): cfg = request.cfg path, rev = _orig_path(request, is_annotate and 'annotate' or 'revision') lines = fp = image_src_href = None annotation = 'none' revision = None mime_type, encoding = calculate_mime_type(request, path, rev) # Is this display blocked by 'binary_mime_types' configuration? if is_binary_file_mime_type(mime_type, cfg): raise debug.ViewVCException('Display of binary file content disabled ' 'by configuration', '403 Forbidden') # Is this a viewable image type? if is_viewable_image(mime_type) \ and 'co' in cfg.options.allowed_views: fp, revision = request.repos.openfile(path, rev, {}) fp.close() if check_freshness(request, None, revision, weak=1): return if is_annotate: annotation = 'binary' image_src_href = request.get_url(view_func=view_checkout, params={'revision': rev}, escape=1) # Not a viewable image. else: filesize = request.repos.filesize(path, rev) # If configuration disallows display of large files, try to honor # that request. assert_viewable_filesize(cfg, filesize) # If this was an annotation request, try to annotate this file. # If something goes wrong, that's okay -- we'll gracefully revert # to a plain markup display. blame_data = None if is_annotate: try: blame_source, revision = request.repos.annotate(path, rev, False) if check_freshness(request, None, revision, weak=1): return # Create BLAME_DATA list from BLAME_SOURCE, adding diff_href # items to each relevant "line". blame_data = [] for item in blame_source: item.diff_href = None if item.prev_rev: item.diff_href = request.get_url(view_func=view_diff, params={'r1': item.prev_rev, 'r2': item.rev}, escape=1, partial=1) blame_data.append(item) annotation = 'annotated' except vclib.NonTextualFileContents: annotation = 'binary' except: annotation = 'error' # Grab the file contents. fp, revision = request.repos.openfile(path, rev, {'cvs_oldkeywords' : 1}) if check_freshness(request, None, revision, weak=1): fp.close() return # If we're limiting by filesize but couldn't pull off the cheap # check above, we'll try to do so line by line here (while # building our file_lines array). if cfg.options.max_filesize_kbytes and filesize == -1: file_lines = [] filesize = 0 while 1: line = fp.readline() if not line: break filesize = filesize + len(line) assert_viewable_filesize(cfg, filesize) file_lines.append(line) else: file_lines = fp.readlines() fp.close() # Do we have a differing number of file content lines and # annotation items? That's no good. Call it an error and don't # bother attempting the annotation display. if blame_data and (len(file_lines) != len(blame_data)): annotation = 'error' blame_data = None # Try to markup the file contents/annotation. If we get an error # and we were colorizing the stream, try once more without the # colorization enabled. colorize = cfg.options.enable_syntax_coloration try: lines = markup_stream(request, cfg, blame_data, file_lines, path[-1], mime_type, encoding, colorize) except: if colorize: lines = markup_stream(request, cfg, blame_data, file_lines, path[-1], mime_type, encoding, False) else: raise debug.ViewVCException('Error displaying file contents', '500 Internal Server Error') data = common_template_data(request, revision, mime_type) data.merge(ezt.TemplateData({ 'mime_type' : mime_type, 'log' : None, 'date' : None, 'ago' : None, 'author' : None, 'branches' : None, 'tags' : None, 'branch_points' : None, 'changed' : None, 'size' : None, 'state' : None, 'vendor_branch' : None, 'prev' : None, 'orig_path' : None, 'orig_href' : None, 'image_src_href' : image_src_href, 'lines' : lines, 'properties' : get_itemprops(request, path, rev), 'annotation' : annotation, })) if cfg.options.show_log_in_markup: options = { 'svn_latest_log': 1, ### FIXME: Use of this magical value is uncool. 'svn_cross_copies': 1, } revs = request.repos.itemlog(path, revision, vclib.SORTBY_REV, 0, 1, options) entry = revs[-1] lf = LogFormatter(request, entry.log) data['date'] = make_time_string(entry.date, cfg) data['author'] = entry.author data['changed'] = entry.changed data['log'] = lf.get(maxlen=0, htmlize=1) data['size'] = entry.size if entry.date is not None: data['ago'] = html_time(request, entry.date, 1) if request.roottype == 'cvs': branch = entry.branch_number prev = entry.prev or entry.parent data['state'] = entry.dead and 'dead' data['prev'] = prev and prev.string data['vendor_branch'] = ezt.boolean(branch and branch[2] % 2 == 1) ### TODO: Should this be using prep_tags() instead? data['branches'] = make_comma_sep_list_string(entry.branches) data['tags'] = make_comma_sep_list_string(entry.tags) data['branch_points']= make_comma_sep_list_string(entry.branch_points) if path != request.path_parts: orig_path = _path_join(path) data['orig_path'] = orig_path data['orig_href'] = request.get_url(view_func=view_log, where=orig_path, pathtype=vclib.FILE, params={'pathrev': revision}, escape=1) generate_page(request, "file", data) def view_markup(request): if 'markup' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Markup view is disabled', '403 Forbidden') if request.pathtype != vclib.FILE: raise debug.ViewVCException('Unsupported feature: markup view on ' 'directory', '400 Bad Request') markup_or_annotate(request, 0) def view_annotate(request): if 'annotate' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Annotation view is disabled', '403 Forbidden') if request.pathtype != vclib.FILE: raise debug.ViewVCException('Unsupported feature: annotate view on ' 'directory', '400 Bad Request') markup_or_annotate(request, 1) def revcmp(rev1, rev2): rev1 = map(int, string.split(rev1, '.')) rev2 = map(int, string.split(rev2, '.')) return cmp(rev1, rev2) def sort_file_data(file_data, roottype, sortdir, sortby, group_dirs): # convert sortdir into a sign bit s = sortdir == "down" and -1 or 1 # in cvs, revision numbers can't be compared meaningfully between # files, so try to do the right thing and compare dates instead if roottype == "cvs" and sortby == "rev": sortby = "date" def file_sort_sortby(file1, file2, sortby): # sort according to sortby if sortby == 'rev': return s * revcmp(file1.rev, file2.rev) elif sortby == 'date': return s * cmp(file2.date, file1.date) # latest date is first elif sortby == 'log': return s * cmp(file1.log, file2.log) elif sortby == 'author': return s * cmp(file1.author, file2.author) return s * cmp(file1.name, file2.name) def file_sort_cmp(file1, file2, sortby=sortby, group_dirs=group_dirs, s=s): # if we're grouping directories together, sorting is pretty # simple. a directory sorts "higher" than a non-directory, and # two directories are sorted as normal. if group_dirs: if file1.kind == vclib.DIR: if file2.kind == vclib.DIR: # two directories, no special handling. return file_sort_sortby(file1, file2, sortby) else: # file1 is a directory, it sorts first. return -1 elif file2.kind == vclib.DIR: # file2 is a directory, it sorts first. return 1 # we should have data on these. if not, then it is because we requested # a specific tag and that tag is not present on the file. if file1.rev is not None and file2.rev is not None: return file_sort_sortby(file1, file2, sortby) elif file1.rev is not None: return -1 elif file2.rev is not None: return 1 # sort by file name return s * cmp(file1.name, file2.name) file_data.sort(file_sort_cmp) def icmp(x, y): """case insensitive comparison""" return cmp(string.lower(x), string.lower(y)) def view_roots(request): if 'roots' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Root listing view is disabled', '403 Forbidden') # add in the roots for the selection roots = [] expand_root_parents(request.cfg) allroots = list_roots(request) if len(allroots): rootnames = allroots.keys() rootnames.sort(icmp) for rootname in rootnames: root_path, root_type, lastmod = allroots[rootname] href = request.get_url(view_func=view_directory, where='', pathtype=vclib.DIR, params={'root': rootname}, escape=1) if root_type == vclib.SVN: log_href = request.get_url(view_func=view_log, where='', pathtype=vclib.DIR, params={'root': rootname}, escape=1) else: log_href = None roots.append(_item(name=request.server.escape(rootname), type=root_type, path=root_path, author=lastmod and lastmod.author or None, ago=lastmod and lastmod.ago or None, date=lastmod and lastmod.date or None, log=lastmod and lastmod.log or None, short_log=lastmod and lastmod.short_log or None, rev=lastmod and lastmod.rev or None, href=href, log_href=log_href)) data = common_template_data(request) data.merge(ezt.TemplateData({ 'roots' : roots, })) generate_page(request, "roots", data) def view_directory(request): cfg = request.cfg # For Subversion repositories, the revision acts as a weak validator for # the directory listing (to take into account template changes or # revision property changes). if request.roottype == 'svn': try: rev = request.repos._getrev(request.pathrev) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') tree_rev = request.repos.created_rev(request.where, rev) if check_freshness(request, None, str(tree_rev), weak=1): return # List current directory options = {} if request.roottype == 'cvs': hideattic = int(request.query_dict.get('hideattic', cfg.options.hide_attic)) options["cvs_subdirs"] = (cfg.options.show_subdir_lastmod and cfg.options.show_logs) file_data = request.repos.listdir(request.path_parts, request.pathrev, options) # sort with directories first, and using the "sortby" criteria sortby = request.query_dict.get('sortby', cfg.options.sort_by) or 'file' sortdir = request.query_dict.get('sortdir', 'up') # when paging and sorting by filename, we can greatly improve # performance by "cheating" -- first, we sort (we already have the # names), then we just fetch dirlogs for the needed entries. # however, when sorting by other properties or not paging, we've no # choice but to fetch dirlogs for everything. debug.t_start("dirlogs") if cfg.options.dir_pagesize and sortby == 'file': dirlogs_first = int(request.query_dict.get('dir_pagestart', 0)) if dirlogs_first > len(file_data): dirlogs_first = 0 dirlogs_last = dirlogs_first + cfg.options.dir_pagesize for file in file_data: file.rev = None file.date = None file.log = None file.author = None file.size = None file.lockinfo = None file.dead = None sort_file_data(file_data, request.roottype, sortdir, sortby, cfg.options.sort_group_dirs) # request dirlogs only for the slice of files in "this page" request.repos.dirlogs(request.path_parts, request.pathrev, file_data[dirlogs_first:dirlogs_last], options) else: request.repos.dirlogs(request.path_parts, request.pathrev, file_data, options) sort_file_data(file_data, request.roottype, sortdir, sortby, cfg.options.sort_group_dirs) debug.t_end("dirlogs") # If a regex is specified, build a compiled form thereof for filtering searchstr = None search_re = request.query_dict.get('search', '') if cfg.options.use_re_search and search_re: searchstr = re.compile(search_re) # loop through entries creating rows and changing these values rows = [ ] num_displayed = 0 num_dead = 0 # set some values to be used inside loop where = request.where where_prefix = where and where + '/' for file in file_data: row = _item(author=None, log=None, short_log=None, state=None, size=None, log_file=None, log_rev=None, graph_href=None, mime_type=None, date=None, ago=None, view_href=None, log_href=None, revision_href=None, annotate_href=None, download_href=None, download_text_href=None, prefer_markup=ezt.boolean(0)) if request.roottype == 'cvs' and file.absent: continue if cfg.options.hide_errorful_entries and file.errors: continue row.rev = file.rev row.author = file.author row.state = (request.roottype == 'cvs' and file.dead) and 'dead' or '' if file.date is not None: row.date = make_time_string(file.date, cfg) row.ago = html_time(request, file.date) if cfg.options.show_logs: debug.t_start("dirview_logformat") lf = LogFormatter(request, file.log) row.log = lf.get(maxlen=0, htmlize=1) row.short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=1) debug.t_end("dirview_logformat") row.lockinfo = file.lockinfo row.anchor = request.server.escape(file.name) row.name = request.server.escape(file.name) row.pathtype = (file.kind == vclib.FILE and 'file') or \ (file.kind == vclib.DIR and 'dir') row.errors = file.errors if file.kind == vclib.DIR: if cfg.options.hide_cvsroot \ and is_cvsroot_path(request.roottype, request.path_parts + [file.name]): continue row.view_href = request.get_url(view_func=view_directory, where=where_prefix+file.name, pathtype=vclib.DIR, params={}, escape=1) if request.roottype == 'svn': row.revision_href = request.get_url(view_func=view_revision, params={'revision': file.rev}, escape=1) if request.roottype == 'cvs' and file.rev is not None: row.rev = None if cfg.options.show_logs: row.log_file = file.newest_file row.log_rev = file.rev if request.roottype == 'svn': row.log_href = request.get_url(view_func=view_log, where=where_prefix + file.name, pathtype=vclib.DIR, params={}, escape=1) elif file.kind == vclib.FILE: if searchstr is not None: if request.roottype == 'cvs' and (file.errors or file.dead): continue if not search_file(request.repos, request.path_parts + [file.name], request.pathrev, searchstr): continue if request.roottype == 'cvs' and file.dead: num_dead = num_dead + 1 if hideattic: continue num_displayed = num_displayed + 1 file_where = where_prefix + file.name if request.roottype == 'svn': row.size = file.size row.mime_type, encoding = calculate_mime_type(request, _path_parts(file_where), file.rev) fvi = get_file_view_info(request, file_where, file.rev, row.mime_type) row.view_href = fvi.view_href row.download_href = fvi.download_href row.download_text_href = fvi.download_text_href row.annotate_href = fvi.annotate_href row.revision_href = fvi.revision_href row.prefer_markup = fvi.prefer_markup row.log_href = request.get_url(view_func=view_log, where=file_where, pathtype=vclib.FILE, params={}, escape=1) if cfg.options.use_cvsgraph and request.roottype == 'cvs': row.graph_href = request.get_url(view_func=view_cvsgraph, where=file_where, pathtype=vclib.FILE, params={}, escape=1) rows.append(row) # Prepare the data that will be passed to the template, based on the # common template data. data = common_template_data(request) data.merge(ezt.TemplateData({ 'entries' : rows, 'sortby' : sortby, 'sortdir' : sortdir, 'search_re' : request.server.escape(search_re), 'dir_pagestart' : None, 'sortby_file_href' : request.get_url(params={'sortby': 'file', 'sortdir': None}, escape=1), 'sortby_rev_href' : request.get_url(params={'sortby': 'rev', 'sortdir': None}, escape=1), 'sortby_date_href' : request.get_url(params={'sortby': 'date', 'sortdir': None}, escape=1), 'sortby_author_href' : request.get_url(params={'sortby': 'author', 'sortdir': None}, escape=1), 'sortby_log_href' : request.get_url(params={'sortby': 'log', 'sortdir': None}, escape=1), 'files_shown' : num_displayed, 'num_dead' : num_dead, 'youngest_rev' : None, 'youngest_rev_href' : None, 'selection_form' : None, 'attic_showing' : None, 'show_attic_href' : None, 'hide_attic_href' : None, 'branch_tags': None, 'plain_tags': None, 'properties': get_itemprops(request, request.path_parts, request.pathrev), 'tree_rev' : None, 'tree_rev_href' : None, 'dir_paging_action' : None, 'dir_paging_hidden_values' : [], 'search_re_action' : None, 'search_re_hidden_values' : [], # Populated by paging()/paging_sws() 'picklist' : [], 'picklist_len' : 0, # Populated by pathrev_form() 'pathrev_action' : None, 'pathrev_hidden_values' : [], 'pathrev_clear_action' : None, 'pathrev_clear_hidden_values' : [], 'pathrev' : None, 'lastrev' : None, })) # clicking on sort column reverses sort order if sortdir == 'down': revsortdir = None # 'up' else: revsortdir = 'down' if sortby in ['file', 'rev', 'date', 'log', 'author']: data['sortby_%s_href' % sortby] = request.get_url(params={'sortdir': revsortdir}, escape=1) # CVS doesn't support sorting by rev if request.roottype == "cvs": data['sortby_rev_href'] = None # set cvs-specific fields if request.roottype == 'cvs': plain_tags = options['cvs_tags'] plain_tags.sort(icmp) plain_tags.reverse() data['plain_tags']= plain_tags branch_tags = options['cvs_branches'] branch_tags.sort(icmp) branch_tags.reverse() data['branch_tags']= branch_tags data['attic_showing'] = ezt.boolean(not hideattic) data['show_attic_href'] = request.get_url(params={'hideattic': 0}, escape=1) data['hide_attic_href'] = request.get_url(params={'hideattic': 1}, escape=1) # set svn-specific fields elif request.roottype == 'svn': data['tree_rev'] = tree_rev data['tree_rev_href'] = request.get_url(view_func=view_revision, params={'revision': tree_rev}, escape=1) data['youngest_rev'] = request.repos.get_youngest_revision() data['youngest_rev_href'] = request.get_url(view_func=view_revision, params={}, escape=1) if cfg.options.dir_pagesize: data['dir_paging_action'], data['dir_paging_hidden_values'] = \ request.get_form(params={'dir_pagestart': None}) pathrev_form(request, data) if cfg.options.use_re_search: data['search_re_action'], data['search_re_hidden_values'] = \ request.get_form(params={'search': None}) if cfg.options.dir_pagesize: data['dir_pagestart'] = int(request.query_dict.get('dir_pagestart',0)) data['entries'] = paging(data, 'entries', data['dir_pagestart'], 'name', cfg.options.dir_pagesize) generate_page(request, "directory", data) def paging(data, key, pagestart, local_name, pagesize): # Implement paging # Create the picklist picklist = data['picklist'] = [] for i in range(0, len(data[key]), pagesize): pick = _item(start=None, end=None, count=None, more=ezt.boolean(0)) pick.start = getattr(data[key][i], local_name) pick.count = i pick.page = (i / pagesize) + 1 try: pick.end = getattr(data[key][i+pagesize-1], local_name) except IndexError: pick.end = getattr(data[key][-1], local_name) picklist.append(pick) data['picklist_len'] = len(picklist) # Need to fix # pagestart can be greater than the length of data[key] if you # select a tag or search while on a page other than the first. # Should reset to the first page, this test won't do that every # time that it is needed. # Problem might go away if we don't hide non-matching files when # selecting for tags or searching. if pagestart > len(data[key]): pagestart = 0 pageend = pagestart + pagesize # Slice return data[key][pagestart:pageend] def paging_sws(data, key, pagestart, local_name, pagesize, extra_pages, offset): """Implement sliding window-style paging.""" # Create the picklist last_requested = pagestart + (extra_pages * pagesize) picklist = data['picklist'] = [] has_more = ezt.boolean(0) for i in range(0, len(data[key]), pagesize): pick = _item(start=None, end=None, count=None, more=ezt.boolean(0)) pick.start = getattr(data[key][i], local_name) pick.count = offset + i pick.page = (pick.count / pagesize) + 1 try: pick.end = getattr(data[key][i+pagesize-1], local_name) except IndexError: pick.end = getattr(data[key][-1], local_name) picklist.append(pick) if pick.count >= last_requested: pick.more = ezt.boolean(1) break data['picklist_len'] = len(picklist) first = pagestart - offset # FIXME: first can be greater than the length of data[key] if # you select a tag or search while on a page other than the first. # Should reset to the first page, but this test won't do that every # time that it is needed. Problem might go away if we don't hide # non-matching files when selecting for tags or searching. if first > len(data[key]): pagestart = 0 pageend = first + pagesize # Slice return data[key][first:pageend] def pathrev_form(request, data): lastrev = None if request.roottype == 'svn': data['pathrev_action'], data['pathrev_hidden_values'] = \ request.get_form(view_func=redirect_pathrev, params={'pathrev': None, 'orig_path': request.where, 'orig_pathtype': request.pathtype, 'orig_pathrev': request.pathrev, 'orig_view': _view_codes.get(request.view_func)}) if request.pathrev: youngest = request.repos.get_youngest_revision() lastrev = request.repos.last_rev(request.where, request.pathrev, youngest)[0] if lastrev == youngest: lastrev = None data['pathrev'] = request.pathrev data['lastrev'] = lastrev action, hidden_values = request.get_form(params={'pathrev': lastrev}) if request.roottype != 'svn': data['pathrev_action'] = action data['pathrev_hidden_values'] = hidden_values data['pathrev_clear_action'] = action data['pathrev_clear_hidden_values'] = hidden_values return lastrev def redirect_pathrev(request): assert request.roottype == 'svn' new_pathrev = request.query_dict.get('pathrev') or None path = request.query_dict.get('orig_path', '') pathtype = request.query_dict.get('orig_pathtype') pathrev = request.query_dict.get('orig_pathrev') view = _views.get(request.query_dict.get('orig_view')) youngest = request.repos.get_youngest_revision() # go out of the way to allow revision numbers higher than youngest try: new_pathrev = int(new_pathrev) except ValueError: new_pathrev = youngest except TypeError: pass else: if new_pathrev > youngest: new_pathrev = youngest if _repos_pathtype(request.repos, _path_parts(path), new_pathrev): pathrev = new_pathrev else: pathrev, path = request.repos.last_rev(path, pathrev, new_pathrev) # allow clearing sticky revision by submitting empty string if new_pathrev is None and pathrev == youngest: pathrev = None request.server.redirect(request.get_url(view_func=view, where=path, pathtype=pathtype, params={'pathrev': pathrev})) def view_log(request): cfg = request.cfg diff_format = request.query_dict.get('diff_format', cfg.options.diff_format) pathtype = request.pathtype if pathtype is vclib.DIR: if request.roottype == 'cvs': raise debug.ViewVCException('Unsupported feature: log view on CVS ' 'directory', '400 Bad Request') mime_type = encoding = None else: mime_type, encoding = calculate_mime_type(request, request.path_parts, request.pathrev) options = {} options['svn_show_all_dir_logs'] = 1 ### someday make this optional? options['svn_cross_copies'] = cfg.options.cross_copies logsort = request.query_dict.get('logsort', cfg.options.log_sort) if request.roottype == "svn": sortby = vclib.SORTBY_DEFAULT logsort = None else: if logsort == 'date': sortby = vclib.SORTBY_DATE elif logsort == 'rev': sortby = vclib.SORTBY_REV else: sortby = vclib.SORTBY_DEFAULT first = last = 0 log_pagestart = None if cfg.options.log_pagesize: log_pagestart = int(request.query_dict.get('log_pagestart', 0)) total = cfg.options.log_pagesextra * cfg.options.log_pagesize first = log_pagestart - min(log_pagestart, total) last = log_pagestart + (total + cfg.options.log_pagesize) + 1 show_revs = request.repos.itemlog(request.path_parts, request.pathrev, sortby, first, last - first, options) # selected revision selected_rev = request.query_dict.get('r1') entries = [ ] name_printed = { } cvs = request.roottype == 'cvs' for rev in show_revs: entry = _item() entry.rev = rev.string entry.state = (cvs and rev.dead and 'dead') entry.author = rev.author entry.changed = rev.changed entry.date = make_time_string(rev.date, cfg) entry.ago = None if rev.date is not None: entry.ago = html_time(request, rev.date, 1) entry.size = rev.size entry.lockinfo = rev.lockinfo entry.branch_point = None entry.next_main = None entry.orig_path = None entry.copy_path = None lf = LogFormatter(request, rev.log or '') entry.log = lf.get(maxlen=0, htmlize=1) entry.view_href = None entry.download_href = None entry.download_text_href = None entry.annotate_href = None entry.revision_href = None entry.sel_for_diff_href = None entry.diff_to_sel_href = None entry.diff_to_prev_href = None entry.diff_to_branch_href = None entry.diff_to_main_href = None if request.roottype == 'cvs': prev = rev.prev or rev.parent entry.prev = prev and prev.string branch = rev.branch_number entry.vendor_branch = ezt.boolean(branch and branch[2] % 2 == 1) entry.branches = prep_tags(request, rev.branches) entry.tags = prep_tags(request, rev.tags) entry.branch_points = prep_tags(request, rev.branch_points) entry.tag_names = map(lambda x: x.name, rev.tags) if branch and not name_printed.has_key(branch): entry.branch_names = map(lambda x: x.name, rev.branches) name_printed[branch] = 1 else: entry.branch_names = [ ] if rev.parent and rev.parent is not prev and not entry.vendor_branch: entry.branch_point = rev.parent.string # if it's the last revision on a branch then diff against the # last revision on the higher branch (e.g. change is committed and # brought over to -stable) if not rev.next and rev.parent and rev.parent.next: r = rev.parent.next while r.next: r = r.next entry.next_main = r.string elif request.roottype == 'svn': entry.prev = rev.prev and rev.prev.string entry.branches = entry.tags = entry.branch_points = [ ] entry.tag_names = entry.branch_names = [ ] entry.vendor_branch = None if rev.filename != request.where: entry.orig_path = rev.filename entry.copy_path = rev.copy_path entry.copy_rev = rev.copy_rev if entry.orig_path: entry.orig_href = request.get_url(view_func=view_log, where=entry.orig_path, pathtype=vclib.FILE, params={'pathrev': rev.string}, escape=1) if rev.copy_path: entry.copy_href = request.get_url(view_func=view_log, where=rev.copy_path, pathtype=vclib.FILE, params={'pathrev': rev.copy_rev}, escape=1) # view/download links if pathtype is vclib.FILE: fvi = get_file_view_info(request, request.where, rev.string, mime_type) entry.view_href = fvi.view_href entry.download_href = fvi.download_href entry.download_text_href = fvi.download_text_href entry.annotate_href = fvi.annotate_href entry.revision_href = fvi.revision_href entry.prefer_markup = fvi.prefer_markup else: entry.revision_href = request.get_url(view_func=view_revision, params={'revision': rev.string}, escape=1) entry.view_href = request.get_url(view_func=view_directory, where=rev.filename, pathtype=vclib.DIR, params={'pathrev': rev.string}, escape=1) # calculate diff links if selected_rev != entry.rev: entry.sel_for_diff_href = \ request.get_url(view_func=view_log, params={'r1': entry.rev, 'log_pagestart': log_pagestart}, escape=1) if entry.prev is not None: entry.diff_to_prev_href = \ request.get_url(view_func=view_diff, params={'r1': entry.prev, 'r2': entry.rev, 'diff_format': None}, escape=1) if selected_rev and \ selected_rev != str(entry.rev) and \ selected_rev != str(entry.prev) and \ selected_rev != str(entry.branch_point) and \ selected_rev != str(entry.next_main): entry.diff_to_sel_href = \ request.get_url(view_func=view_diff, params={'r1': selected_rev, 'r2': entry.rev, 'diff_format': None}, escape=1) if entry.next_main: entry.diff_to_main_href = \ request.get_url(view_func=view_diff, params={'r1': entry.next_main, 'r2': entry.rev, 'diff_format': None}, escape=1) if entry.branch_point: entry.diff_to_branch_href = \ request.get_url(view_func=view_diff, params={'r1': entry.branch_point, 'r2': entry.rev, 'diff_format': None}, escape=1) # Save our escaping until the end so stuff above works if entry.orig_path: entry.orig_path = request.server.escape(entry.orig_path) if entry.copy_path: entry.copy_path = request.server.escape(entry.copy_path) entries.append(entry) diff_select_action, diff_select_hidden_values = \ request.get_form(view_func=view_diff, params={'r1': None, 'r2': None, 'tr1': None, 'tr2': None, 'diff_format': None}) logsort_action, logsort_hidden_values = \ request.get_form(params={'logsort': None}) data = common_template_data(request) data.merge(ezt.TemplateData({ 'default_branch' : None, 'mime_type' : mime_type, 'rev_selected' : selected_rev, 'diff_format' : diff_format, 'logsort' : logsort, 'human_readable' : ezt.boolean(diff_format in ('f', 'h', 'l')), 'log_pagestart' : None, 'log_paging_action' : None, 'log_paging_hidden_values' : [], 'entries': entries, 'head_prefer_markup' : ezt.boolean(0), 'head_view_href' : None, 'head_download_href': None, 'head_download_text_href': None, 'head_annotate_href': None, 'tag_prefer_markup' : ezt.boolean(0), 'tag_view_href' : None, 'tag_download_href': None, 'tag_download_text_href': None, 'tag_annotate_href': None, 'diff_select_action' : diff_select_action, 'diff_select_hidden_values' : diff_select_hidden_values, 'logsort_action' : logsort_action, 'logsort_hidden_values' : logsort_hidden_values, 'tags' : [], 'branch_tags' : [], 'plain_tags' : [], # Populated by paging()/paging_sws() 'picklist' : [], 'picklist_len' : 0, # Populated by pathrev_form() 'pathrev_action' : None, 'pathrev_hidden_values' : [], 'pathrev_clear_action' : None, 'pathrev_clear_hidden_values' : [], 'pathrev' : None, 'lastrev' : None, })) lastrev = pathrev_form(request, data) if pathtype is vclib.FILE: if not request.pathrev or lastrev is None: fvi = get_file_view_info(request, request.where, None, mime_type, None) data['head_view_href']= fvi.view_href data['head_download_href']= fvi.download_href data['head_download_text_href']= fvi.download_text_href data['head_annotate_href']= fvi.annotate_href data['head_prefer_markup']= fvi.prefer_markup if request.pathrev and request.roottype == 'cvs': fvi = get_file_view_info(request, request.where, None, mime_type) data['tag_view_href']= fvi.view_href data['tag_download_href']= fvi.download_href data['tag_download_text_href']= fvi.download_text_href data['tag_annotate_href']= fvi.annotate_href data['tag_prefer_markup']= fvi.prefer_markup else: data['head_view_href'] = request.get_url(view_func=view_directory, params={}, escape=1) taginfo = options.get('cvs_tags', {}) tagitems = taginfo.items() tagitems.sort() tagitems.reverse() main = taginfo.get('MAIN') if main: # Default branch may have multiple names so we list them branches = [] for branch in main.aliases: # Don't list MAIN if branch is not main: branches.append(branch) data['default_branch'] = prep_tags(request, branches) for tag, rev in tagitems: if rev.co_rev: data['tags'].append(_item(rev=rev.co_rev.string, name=tag)) if rev.is_branch: data['branch_tags'].append(tag) else: data['plain_tags'].append(tag) if cfg.options.log_pagesize: data['log_paging_action'], data['log_paging_hidden_values'] = \ request.get_form(params={'log_pagestart': None, 'r1': selected_rev, }) data['log_pagestart'] = int(request.query_dict.get('log_pagestart',0)) data['entries'] = paging_sws(data, 'entries', data['log_pagestart'], 'rev', cfg.options.log_pagesize, cfg.options.log_pagesextra, first) generate_page(request, "log", data) def view_checkout(request): cfg = request.cfg if 'co' not in cfg.options.allowed_views: raise debug.ViewVCException('Checkout view is disabled', '403 Forbidden') if request.pathtype != vclib.FILE: raise debug.ViewVCException('Unsupported feature: checkout view on ' 'directory', '400 Bad Request') path, rev = _orig_path(request) fp, revision = request.repos.openfile(path, rev, {}) # The revision number acts as a strong validator. if not check_freshness(request, None, revision): mime_type, encoding = calculate_mime_type(request, path, rev) mime_type = request.query_dict.get('content-type') \ or mime_type \ or 'text/plain' server_fp = get_writeready_server_file(request, mime_type, encoding) copy_stream(fp, server_fp) fp.close() def view_cvsgraph_image(request): "output the image rendered by cvsgraph" # this function is derived from cgi/cvsgraphmkimg.cgi cfg = request.cfg if not cfg.options.use_cvsgraph: raise debug.ViewVCException('Graph view is disabled', '403 Forbidden') # If cvsgraph can't find its supporting libraries, uncomment and set # accordingly. Do the same in view_cvsgraph(). #os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib:/path/to/cvsgraph' rcsfile = request.repos.rcsfile(request.path_parts) fp = popen.popen(cfg.utilities.cvsgraph or 'cvsgraph', ("-c", cfg.path(cfg.options.cvsgraph_conf), "-r", request.repos.rootpath, rcsfile), 'rb', 0) copy_stream(fp, get_writeready_server_file(request, 'image/png')) fp.close() def view_cvsgraph(request): "output a page containing an image rendered by cvsgraph" cfg = request.cfg if not cfg.options.use_cvsgraph: raise debug.ViewVCException('Graph view is disabled', '403 Forbidden') # If cvsgraph can't find its supporting libraries, uncomment and set # accordingly. Do the same in view_cvsgraph_image(). #os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib:/path/to/cvsgraph' imagesrc = request.get_url(view_func=view_cvsgraph_image, escape=1) mime_type = guess_mime(request.where) view = default_view(mime_type, cfg) up_where = _path_join(request.path_parts[:-1]) # Create an image map rcsfile = request.repos.rcsfile(request.path_parts) fp = popen.popen(cfg.utilities.cvsgraph or 'cvsgraph', ("-i", "-c", cfg.path(cfg.options.cvsgraph_conf), "-r", request.repos.rootpath, "-x", "x", "-3", request.get_url(view_func=view_log, params={}, escape=1), "-4", request.get_url(view_func=view, params={'revision': None}, escape=1, partial=1), "-5", request.get_url(view_func=view_diff, params={'r1': None, 'r2': None}, escape=1, partial=1), "-6", request.get_url(view_func=view_directory, where=up_where, pathtype=vclib.DIR, params={'pathrev': None}, escape=1, partial=1), rcsfile), 'rb', 0) data = common_template_data(request) data.merge(ezt.TemplateData({ 'imagemap' : fp, 'imagesrc' : imagesrc, })) generate_page(request, "graph", data) def search_file(repos, path_parts, rev, search_re): """Return 1 iff the contents of the file at PATH_PARTS in REPOS as of revision REV matches regular expression SEARCH_RE.""" # Read in each line of a checked-out file, and then use re.search to # search line. fp = repos.openfile(path_parts, rev, {})[0] matches = 0 while 1: line = fp.readline() if not line: break if search_re.search(line): matches = 1 fp.close() break return matches def view_doc(request): """Serve ViewVC static content locally. Using this avoids the need for modifying the setup of the web server. """ cfg = request.cfg document = request.where filename = cfg.path(os.path.join(cfg.options.template_dir, "docroot", document)) # Stat the file to get content length and last-modified date. try: info = os.stat(filename) except OSError, v: raise debug.ViewVCException('Static file "%s" not available (%s)' % (document, str(v)), '404 Not Found') content_length = str(info[stat.ST_SIZE]) last_modified = info[stat.ST_MTIME] # content_length + mtime makes a pretty good etag. if check_freshness(request, last_modified, "%s-%s" % (content_length, last_modified)): return try: fp = open(filename, "rb") except IOError, v: raise debug.ViewVCException('Static file "%s" not available (%s)' % (document, str(v)), '404 Not Found') if document[-3:] == 'png': mime_type = 'image/png' elif document[-3:] == 'jpg': mime_type = 'image/jpeg' elif document[-3:] == 'gif': mime_type = 'image/gif' elif document[-3:] == 'css': mime_type = 'text/css' else: # assume HTML: mime_type = None copy_stream(fp, get_writeready_server_file(request, mime_type, content_length=content_length)) fp.close() def rcsdiff_date_reformat(date_str, cfg): if date_str is None: return None try: date = compat.cvs_strptime(date_str) except ValueError: return date_str return make_time_string(compat.timegm(date), cfg) _re_extract_rev = re.compile(r'^[-+*]{3} [^\t]+\t([^\t]+)\t((\d+\.)*\d+)$') _re_extract_info = re.compile(r'@@ \-([0-9]+).*\+([0-9]+).*@@(.*)') class DiffSource: def __init__(self, fp, cfg): self.fp = fp self.cfg = cfg self.save_line = None self.line_number = None self.prev_line_number = None # keep track of where we are during an iteration self.idx = -1 self.last = None # these will be set once we start reading self.state = 'no-changes' self.left_col = [ ] self.right_col = [ ] def __getitem__(self, idx): if idx == self.idx: return self.last if idx != self.idx + 1: raise DiffSequencingError() # keep calling _get_row until it gives us something. sometimes, it # doesn't return a row immediately because it is accumulating changes. # when it is out of data, _get_row will raise IndexError. while 1: item = self._get_row() if item: self.idx = idx self.last = item return item def _format_text(self, text): text = string.rstrip(text, '\r\n') if self.cfg.options.tabsize > 0: text = string.expandtabs(text, self.cfg.options.tabsize) hr_breakable = self.cfg.options.hr_breakable # in the code below, "\x01" will be our stand-in for "&". We don't want # to insert "&" because it would get escaped by sapi.escape(). Similarly, # we use "\x02" as a stand-in for "<br>" if hr_breakable > 1 and len(text) > hr_breakable: text = re.sub('(' + ('.' * hr_breakable) + ')', '\\1\x02', text) if hr_breakable: # make every other space "breakable" text = string.replace(text, ' ', ' \x01nbsp;') else: text = string.replace(text, ' ', '\x01nbsp;') text = sapi.escape(text) text = string.replace(text, '\x01', '&') text = string.replace(text, '\x02', '<span style="color:red">\</span><br />') return text def _get_row(self): if self.state[:5] == 'flush': item = self._flush_row() if item: return item self.state = 'dump' if self.save_line: line = self.save_line self.save_line = None else: line = self.fp.readline() if not line: if self.state == 'no-changes': self.state = 'done' return _item(type='no-changes') # see if there are lines to flush if self.left_col or self.right_col: # move into the flushing state self.state = 'flush-' + self.state return None # nothing more to return raise IndexError if line[:2] == '@@': self.state = 'dump' self.left_col = [ ] self.right_col = [ ] match = _re_extract_info.match(line) self.line_number = int(match.group(2)) - 1 self.prev_line_number = int(match.group(1)) - 1 return _item(type='header', line_info_left=match.group(1), line_info_right=match.group(2), line_info_extra=self._format_text(match.group(3))) if line[0] == '\\': # \ No newline at end of file # move into the flushing state. note: it doesn't matter if we really # have data to flush or not; that will be figured out later self.state = 'flush-' + self.state return None diff_code = line[0] output = self._format_text(line[1:]) if diff_code == '+': if self.state == 'dump': self.line_number = self.line_number + 1 return _item(type='add', right=output, line_number=self.line_number) self.state = 'pre-change-add' self.right_col.append(output) return None if diff_code == '-': self.state = 'pre-change-remove' self.left_col.append(output) return None # early exit to avoid line in if self.left_col or self.right_col: # save the line for processing again later, and move into the # flushing state self.save_line = line self.state = 'flush-' + self.state return None self.line_number = self.line_number + 1 self.prev_line_number = self.prev_line_number + 1 return _item(type='context', left=output, right=output, line_number=self.line_number) def _flush_row(self): if not self.left_col and not self.right_col: # nothing more to flush return None if self.state == 'flush-pre-change-remove': self.prev_line_number = self.prev_line_number + 1 return _item(type='remove', left=self.left_col.pop(0), line_number=self.prev_line_number) # state == flush-pre-change-add item = _item(type='change', have_left=ezt.boolean(0), have_right=ezt.boolean(0)) if self.left_col: self.prev_line_number = self.prev_line_number + 1 item.have_left = ezt.boolean(1) item.left = self.left_col.pop(0) item.line_number = self.prev_line_number if self.right_col: self.line_number = self.line_number + 1 item.have_right = ezt.boolean(1) item.right = self.right_col.pop(0) item.line_number = self.line_number return item class DiffSequencingError(Exception): pass def diff_parse_headers(fp, diff_type, path1, path2, rev1, rev2, sym1=None, sym2=None): date1 = date2 = log_rev1 = log_rev2 = flag = None header_lines = [] if diff_type == vclib.UNIFIED: f1 = '--- ' f2 = '+++ ' elif diff_type == vclib.CONTEXT: f1 = '*** ' f2 = '--- ' else: f1 = f2 = None # If we're parsing headers, then parse and tweak the diff headers, # collecting them in an array until we've read and handled them all. if f1 and f2: parsing = 1 len_f1 = len(f1) len_f2 = len(f2) while parsing: line = fp.readline() if not line: break if line[:len(f1)] == f1: match = _re_extract_rev.match(line) if match: date1 = match.group(1) log_rev1 = match.group(2) line = '%s%s\t%s\t%s%s\n' % (f1, path1, date1, log_rev1, sym1 and ' ' + sym1 or '') elif line[:len(f2)] == f2: match = _re_extract_rev.match(line) if match: date2 = match.group(1) log_rev2 = match.group(2) line = '%s%s\t%s\t%s%s\n' % (f2, path2, date2, log_rev2, sym2 and ' ' + sym2 or '') parsing = 0 elif line[:3] == 'Bin': flag = _RCSDIFF_IS_BINARY parsing = 0 elif (string.find(line, 'not found') != -1 or string.find(line, 'illegal option') != -1): flag = _RCSDIFF_ERROR parsing = 0 header_lines.append(line) if (log_rev1 and log_rev1 != rev1): raise debug.ViewVCException('rcsdiff found revision %s, but expected ' 'revision %s' % (log_rev1, rev1), '500 Internal Server Error') if (log_rev2 and log_rev2 != rev2): raise debug.ViewVCException('rcsdiff found revision %s, but expected ' 'revision %s' % (log_rev2, rev2), '500 Internal Server Error') return date1, date2, flag, string.join(header_lines, '') def _get_diff_path_parts(request, query_key, rev, base_rev): repos = request.repos if request.query_dict.has_key(query_key): parts = _path_parts(request.query_dict[query_key]) elif request.roottype == 'svn': try: parts = _path_parts(repos.get_location(request.where, repos._getrev(base_rev), repos._getrev(rev))) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') except vclib.ItemNotFound: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') else: parts = request.path_parts return parts def setup_diff(request): query_dict = request.query_dict rev1 = r1 = query_dict['r1'] rev2 = r2 = query_dict['r2'] sym1 = sym2 = None # hack on the diff revisions if r1 == 'text': rev1 = query_dict.get('tr1', None) if not rev1: raise debug.ViewVCException('Missing revision from the diff ' 'form text field', '400 Bad Request') else: idx = string.find(r1, ':') if idx == -1: rev1 = r1 else: rev1 = r1[:idx] sym1 = r1[idx+1:] if r2 == 'text': rev2 = query_dict.get('tr2', None) if not rev2: raise debug.ViewVCException('Missing revision from the diff ' 'form text field', '400 Bad Request') sym2 = '' else: idx = string.find(r2, ':') if idx == -1: rev2 = r2 else: rev2 = r2[:idx] sym2 = r2[idx+1:] if request.roottype == 'svn': try: rev1 = str(request.repos._getrev(rev1)) rev2 = str(request.repos._getrev(rev2)) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision(s) passed to diff', '400 Bad Request') p1 = _get_diff_path_parts(request, 'p1', rev1, request.pathrev) p2 = _get_diff_path_parts(request, 'p2', rev2, request.pathrev) try: if revcmp(rev1, rev2) > 0: rev1, rev2 = rev2, rev1 sym1, sym2 = sym2, sym1 p1, p2 = p2, p1 except ValueError: raise debug.ViewVCException('Invalid revision(s) passed to diff', '400 Bad Request') return p1, p2, rev1, rev2, sym1, sym2 def view_patch(request): if 'diff' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Diff generation is disabled', '403 Forbidden') cfg = request.cfg query_dict = request.query_dict p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request) mime_type1, encoding1 = calculate_mime_type(request, p1, rev1) mime_type2, encoding2 = calculate_mime_type(request, p2, rev2) if is_binary_file_mime_type(mime_type1, cfg) or \ is_binary_file_mime_type(mime_type2, cfg): raise debug.ViewVCException('Display of binary file content disabled ' 'by configuration', '403 Forbidden') # In the absence of a format dictation in the CGI params, we'll let # use the configured diff format, allowing 'c' to mean 'c' and # anything else to mean 'u'. format = query_dict.get('diff_format', cfg.options.diff_format == 'c' and 'c' or 'u') if format == 'c': diff_type = vclib.CONTEXT elif format == 'u': diff_type = vclib.UNIFIED else: raise debug.ViewVCException('Diff format %s not understood' % format, '400 Bad Request') try: fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') path_left = _path_join(p1) path_right = _path_join(p2) date1, date2, flag, headers = diff_parse_headers(fp, diff_type, path_left, path_right, rev1, rev2, sym1, sym2) server_fp = get_writeready_server_file(request, 'text/plain') server_fp.write(headers) copy_stream(fp, server_fp) fp.close() def view_diff(request): if 'diff' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Diff generation is disabled', '403 Forbidden') cfg = request.cfg query_dict = request.query_dict p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request) mime_type1, encoding1 = calculate_mime_type(request, p1, rev1) mime_type2, encoding2 = calculate_mime_type(request, p2, rev2) if is_binary_file_mime_type(mime_type1, cfg) or \ is_binary_file_mime_type(mime_type2, cfg): raise debug.ViewVCException('Display of binary file content disabled ' 'by configuration', '403 Forbidden') # since templates are in use and subversion allows changes to the dates, # we can't provide a strong etag if check_freshness(request, None, '%s-%s' % (rev1, rev2), weak=1): return # TODO: Is the slice necessary, or is limit enough? log_entry1 = request.repos.itemlog(p1, rev1, vclib.SORTBY_REV, 0, 1, {})[-1] log_entry2 = request.repos.itemlog(p2, rev2, vclib.SORTBY_REV, 0, 1, {})[-1] ago1 = log_entry1.date is not None \ and html_time(request, log_entry1.date, 1) or None ago2 = log_entry2.date is not None \ and html_time(request, log_entry2.date, 2) or None diff_type = None diff_options = {} human_readable = 0 format = query_dict.get('diff_format', cfg.options.diff_format) if format == 'c': diff_type = vclib.CONTEXT elif format == 's': diff_type = vclib.SIDE_BY_SIDE elif format == 'l': diff_type = vclib.UNIFIED diff_options['context'] = 15 human_readable = 1 elif format == 'f': diff_type = vclib.UNIFIED diff_options['context'] = None human_readable = 1 elif format == 'h': diff_type = vclib.UNIFIED human_readable = 1 elif format == 'u': diff_type = vclib.UNIFIED else: raise debug.ViewVCException('Diff format %s not understood' % format, '400 Bad Request') if human_readable or format == 'u': diff_options['funout'] = cfg.options.hr_funout if human_readable: diff_options['ignore_white'] = cfg.options.hr_ignore_white diff_options['ignore_keyword_subst'] = cfg.options.hr_ignore_keyword_subst try: fp = sidebyside = unified = None if (cfg.options.hr_intraline and idiff and ((human_readable and idiff.sidebyside) or (not human_readable and diff_type == vclib.UNIFIED))): f1 = request.repos.openfile(p1, rev1, {})[0] try: lines_left = f1.readlines() finally: f1.close() f2 = request.repos.openfile(p2, rev2, {})[0] try: lines_right = f2.readlines() finally: f2.close() if human_readable: sidebyside = idiff.sidebyside(lines_left, lines_right, diff_options.get("context", 5)) else: unified = idiff.unified(lines_left, lines_right, diff_options.get("context", 2)) else: fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type, diff_options) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') path_left = _path_join(p1) path_right = _path_join(p2) date1 = date2 = raw_diff_fp = None changes = [] if fp: date1, date2, flag, headers = diff_parse_headers(fp, diff_type, path_left, path_right, rev1, rev2, sym1, sym2) if human_readable: if flag is not None: changes = [ _item(type=flag) ] else: changes = DiffSource(fp, cfg) else: raw_diff_fp = MarkupPipeWrapper(fp, request.server.escape(headers), None, 1) no_format_params = request.query_dict.copy() no_format_params['diff_format'] = None diff_format_action, diff_format_hidden_values = \ request.get_form(params=no_format_params) fvi = get_file_view_info(request, path_left, rev1) left = _item(date=make_time_string(log_entry1.date, cfg), author=log_entry1.author, log=LogFormatter(request, log_entry1.log).get(maxlen=0, htmlize=1), size=log_entry1.size, ago=ago1, path=path_left, rev=rev1, tag=sym1, view_href=fvi.view_href, download_href=fvi.download_href, download_text_href=fvi.download_text_href, annotate_href=fvi.annotate_href, revision_href=fvi.revision_href, prefer_markup=fvi.prefer_markup) fvi = get_file_view_info(request, path_right, rev2) right = _item(date=make_time_string(log_entry2.date, cfg), author=log_entry2.author, log=LogFormatter(request, log_entry2.log).get(maxlen=0, htmlize=1), size=log_entry2.size, ago=ago2, path=path_right, rev=rev2, tag=sym2, view_href=fvi.view_href, download_href=fvi.download_href, download_text_href=fvi.download_text_href, annotate_href=fvi.annotate_href, revision_href=fvi.revision_href, prefer_markup=fvi.prefer_markup) data = common_template_data(request) data.merge(ezt.TemplateData({ 'left' : left, 'right' : right, 'raw_diff' : raw_diff_fp, 'changes' : changes, 'sidebyside': sidebyside, 'unified': unified, 'diff_format' : request.query_dict.get('diff_format', cfg.options.diff_format), 'patch_href' : request.get_url(view_func=view_patch, params=no_format_params, escape=1), 'diff_format_action' : diff_format_action, 'diff_format_hidden_values' : diff_format_hidden_values, })) generate_page(request, "diff", data) def generate_tarball_header(out, name, size=0, mode=None, mtime=0, uid=0, gid=0, typeflag=None, linkname='', uname='viewvc', gname='viewvc', devmajor=1, devminor=0, prefix=None, magic='ustar', version='00', chksum=None): if not mode: if name[-1:] == '/': mode = 0755 else: mode = 0644 if not typeflag: if linkname: typeflag = '2' # symbolic link elif name[-1:] == '/': typeflag = '5' # directory else: typeflag = '0' # regular file if not prefix: prefix = '' # generate a GNU tar extension header for a long name. if len(name) >= 100: generate_tarball_header(out, '././@LongLink', len(name), 0, 0, 0, 0, 'L') out.write(name) out.write('\0' * (511 - ((len(name) + 511) % 512))) # generate a GNU tar extension header for a long symlink name. if len(linkname) >= 100: generate_tarball_header(out, '././@LongLink', len(linkname), 0, 0, 0, 0, 'K') out.write(linkname) out.write('\0' * (511 - ((len(linkname) + 511) % 512))) block1 = struct.pack('100s 8s 8s 8s 12s 12s', name, '%07o' % mode, '%07o' % uid, '%07o' % gid, '%011o' % size, '%011o' % mtime) block2 = struct.pack('c 100s 6s 2s 32s 32s 8s 8s 155s', typeflag, linkname, magic, version, uname, gname, '%07o' % devmajor, '%07o' % devminor, prefix) if not chksum: dummy_chksum = ' ' block = block1 + dummy_chksum + block2 chksum = 0 for i in range(len(block)): chksum = chksum + ord(block[i]) block = block1 + struct.pack('8s', '%07o' % chksum) + block2 block = block + '\0' * (512 - len(block)) out.write(block) def generate_tarball(out, request, reldir, stack, dir_mtime=None): # get directory info from repository rep_path = request.path_parts + reldir entries = request.repos.listdir(rep_path, request.pathrev, {}) request.repos.dirlogs(rep_path, request.pathrev, entries, {}) entries.sort(lambda a, b: cmp(a.name, b.name)) # figure out corresponding path in tar file. everything gets put underneath # a single top level directory named after the repository directory being # tarred if request.path_parts: tar_dir = request.path_parts[-1] + '/' else: tar_dir = request.rootname + '/' if reldir: tar_dir = tar_dir + _path_join(reldir) + '/' cvs = request.roottype == 'cvs' # If our caller doesn't dictate a datestamp to use for the current # directory, its datestamps will be the youngest of the datestamps # of versioned items in that subdirectory. We'll be ignoring dead # or busted items and, in CVS, subdirs. if dir_mtime is None: dir_mtime = 0 for file in entries: if cvs and (file.kind != vclib.FILE or file.rev is None or file.dead): continue if (file.date is not None) and (file.date > dir_mtime): dir_mtime = file.date # Push current directory onto the stack. stack.append(tar_dir) # If this is Subversion, we generate a header for this directory # regardless of its contents. For CVS it will only get into the # tarball if it has files underneath it, which we determine later. if not cvs: generate_tarball_header(out, tar_dir, mtime=dir_mtime) # Run through the files in this directory, skipping busted and # unauthorized ones. for file in entries: if file.kind != vclib.FILE: continue if cvs and (file.rev is None or file.dead): continue # If we get here, we've seen at least one valid file in the # current directory. For CVS, we need to make sure there are # directory parents to contain it, so we flush the stack. if cvs: for dir in stack: generate_tarball_header(out, dir, mtime=dir_mtime) del stack[:] # Calculate the mode for the file. Sure, we could look directly # at the ,v file in CVS, but that's a layering violation we'd like # to avoid as much as possible. if request.repos.isexecutable(rep_path + [file.name], request.pathrev): mode = 0755 else: mode = 0644 # Is this thing a symlink? # ### FIXME: A better solution would be to have vclib returning ### symlinks with a new vclib.SYMLINK path type. symlink_target = None if hasattr(request.repos, 'get_symlink_target'): symlink_target = request.repos.get_symlink_target(rep_path + [file.name], request.pathrev) # If the object is a symlink, generate the appropriate header. # Otherwise, we're dealing with a regular file. if symlink_target: generate_tarball_header(out, tar_dir + file.name, 0, mode, file.date is not None and file.date or 0, typeflag='2', linkname=symlink_target) else: filesize = request.repos.filesize(rep_path + [file.name], request.pathrev) if filesize == -1: # Bummer. We have to calculate the filesize manually. fp = request.repos.openfile(rep_path + [file.name], request.pathrev, {})[0] filesize = 0 while 1: chunk = retry_read(fp) if not chunk: break filesize = filesize + len(chunk) fp.close() # Write the tarball header... generate_tarball_header(out, tar_dir + file.name, filesize, mode, file.date is not None and file.date or 0) # ...the file's contents ... fp = request.repos.openfile(rep_path + [file.name], request.pathrev, {})[0] while 1: chunk = retry_read(fp) if not chunk: break out.write(chunk) fp.close() # ... and then add the block padding. out.write('\0' * (511 - (filesize + 511) % 512)) # Recurse into subdirectories, skipping busted and unauthorized (or # configured-to-be-hidden) ones. for file in entries: if file.errors or file.kind != vclib.DIR: continue if request.cfg.options.hide_cvsroot \ and is_cvsroot_path(request.roottype, rep_path + [file.name]): continue mtime = request.roottype == 'svn' and file.date or None generate_tarball(out, request, reldir + [file.name], stack, mtime) # Pop the current directory from the stack. del stack[-1:] def download_tarball(request): cfg = request.cfg if 'tar' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Tarball generation is disabled', '403 Forbidden') # If debugging, we just need to open up the specified tar path for # writing. Otherwise, we get a writeable server output stream -- # disabling any default compression thereupon -- and wrap that in # our own gzip stream wrapper. if debug.TARFILE_PATH: fp = open(debug.TARFILE_PATH, 'w') else: tarfile = request.rootname if request.path_parts: tarfile = "%s-%s" % (tarfile, request.path_parts[-1]) request.server.addheader('Content-Disposition', 'attachment; filename="%s.tar.gz"' % (tarfile)) server_fp = get_writeready_server_file(request, 'application/x-gzip', allow_compress=False) request.server.flush() fp = gzip.GzipFile('', 'wb', 9, server_fp) ### FIXME: For Subversion repositories, we can get the real mtime of the ### top-level directory here. generate_tarball(fp, request, [], []) fp.write('\0' * 1024) fp.close() if debug.TARFILE_PATH: request.server.header('') print """ <html> <body> <p>Tarball '%s' successfully generated!</p> </body> </html>""" % (debug.TARFILE_PATH) def view_revision(request): if request.roottype != "svn": raise debug.ViewVCException("Revision view not supported for CVS " "repositories at this time.", "400 Bad Request") cfg = request.cfg query_dict = request.query_dict try: rev = request.repos._getrev(query_dict.get('revision')) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') youngest_rev = request.repos.get_youngest_revision() # The revision number acts as a weak validator (but we tell browsers # not to cache the youngest revision). if rev != youngest_rev and check_freshness(request, None, str(rev), weak=1): return # Fetch the revision information. date, author, msg, revprops, changes = request.repos.revinfo(rev) date_str = make_time_string(date, cfg) # Fix up the revprops list (rather like get_itemprops()). propnames = revprops.keys() propnames.sort() props = [] for name in propnames: lf = LogFormatter(request, revprops[name]) value = lf.get(maxlen=0, htmlize=1) undisplayable = ezt.boolean(0) # skip non-utf8 property names try: unicode(name, 'utf8') except: continue # note non-utf8 property values try: unicode(value, 'utf8') except: value = None undisplayable = ezt.boolean(1) props.append(_item(name=name, value=value, undisplayable=undisplayable)) # Sort the changes list by path. def changes_sort_by_path(a, b): return cmp(a.path_parts, b.path_parts) changes.sort(changes_sort_by_path) # Handle limit_changes parameter cfg_limit_changes = cfg.options.limit_changes limit_changes = int(query_dict.get('limit_changes', cfg_limit_changes)) more_changes = None more_changes_href = None first_changes = None first_changes_href = None num_changes = len(changes) if limit_changes and len(changes) > limit_changes: more_changes = len(changes) - limit_changes params = query_dict.copy() params['limit_changes'] = 0 more_changes_href = request.get_url(params=params, escape=1) changes = changes[:limit_changes] elif cfg_limit_changes and len(changes) > cfg_limit_changes: first_changes = cfg_limit_changes params = query_dict.copy() params['limit_changes'] = None first_changes_href = request.get_url(params=params, escape=1) # Add the hrefs, types, and prev info for change in changes: change.view_href = change.diff_href = change.type = change.log_href = None # If the path is newly added, don't claim text or property # modifications. if (change.action == vclib.ADDED or change.action == vclib.REPLACED) \ and not change.copied: change.text_changed = 0 change.props_changed = 0 # Calculate the view link URLs (for which we must have a pathtype). if change.pathtype: view_func = None if change.pathtype is vclib.FILE \ and 'markup' in cfg.options.allowed_views: view_func = view_markup elif change.pathtype is vclib.DIR: view_func = view_directory path = _path_join(change.path_parts) base_path = _path_join(change.base_path_parts) if change.action == vclib.DELETED: link_rev = str(change.base_rev) link_where = base_path else: link_rev = str(rev) link_where = path change.view_href = request.get_url(view_func=view_func, where=link_where, pathtype=change.pathtype, params={'pathrev' : link_rev}, escape=1) change.log_href = request.get_url(view_func=view_log, where=link_where, pathtype=change.pathtype, params={'pathrev' : link_rev}, escape=1) if change.pathtype is vclib.FILE and change.text_changed: change.diff_href = request.get_url(view_func=view_diff, where=path, pathtype=change.pathtype, params={'pathrev' : str(rev), 'r1' : str(rev), 'r2' : str(change.base_rev), }, escape=1) # use same variable names as the log template change.path = _path_join(change.path_parts) change.copy_path = _path_join(change.base_path_parts) change.copy_rev = change.base_rev change.text_mods = ezt.boolean(change.text_changed) change.prop_mods = ezt.boolean(change.props_changed) change.is_copy = ezt.boolean(change.copied) change.pathtype = (change.pathtype == vclib.FILE and 'file') \ or (change.pathtype == vclib.DIR and 'dir') \ or None del change.path_parts del change.base_path_parts del change.base_rev del change.text_changed del change.props_changed del change.copied prev_rev_href = next_rev_href = None if rev > 0: prev_rev_href = request.get_url(view_func=view_revision, where=None, pathtype=None, params={'revision': str(rev - 1)}, escape=1) if rev < request.repos.get_youngest_revision(): next_rev_href = request.get_url(view_func=view_revision, where=None, pathtype=None, params={'revision': str(rev + 1)}, escape=1) jump_rev_action, jump_rev_hidden_values = \ request.get_form(params={'revision': None}) lf = LogFormatter(request, msg) data = common_template_data(request) data.merge(ezt.TemplateData({ 'rev' : str(rev), 'author' : author, 'date' : date_str, 'log' : lf.get(maxlen=0, htmlize=1), 'properties' : props, 'ago' : date is not None and html_time(request, date, 1) or None, 'changes' : changes, 'prev_href' : prev_rev_href, 'next_href' : next_rev_href, 'num_changes' : num_changes, 'limit_changes': limit_changes, 'more_changes': more_changes, 'more_changes_href': more_changes_href, 'first_changes': first_changes, 'first_changes_href': first_changes_href, 'jump_rev_action' : jump_rev_action, 'jump_rev_hidden_values' : jump_rev_hidden_values, 'revision_href' : request.get_url(view_func=view_revision, where=None, pathtype=None, params={'revision': str(rev)}, escape=1), })) if rev == youngest_rev: request.server.addheader("Cache-control", "no-store") generate_page(request, "revision", data) def is_query_supported(request): """Returns true if querying is supported for the given path.""" return request.cfg.cvsdb.enabled \ and request.pathtype == vclib.DIR \ and request.roottype in ['cvs', 'svn'] def is_querydb_nonempty_for_root(request): """Return 1 iff commits database integration is supported *and* the current root is found in that database. Only does this check if check_database is set to 1.""" if request.cfg.cvsdb.enabled and request.roottype in ['cvs', 'svn']: if request.cfg.cvsdb.check_database_for_root: global cvsdb import cvsdb db = cvsdb.ConnectDatabaseReadOnly(request.cfg) repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath) if repos_root: return 1 else: return 1 return 0 def validate_query_args(request): # Do some additional input validation of query form arguments beyond # what is offered by the CGI param validation loop in Request.run_viewvc(). for arg_base in ['branch', 'file', 'comment', 'who']: # First, make sure the the XXX_match args have valid values: arg_match = arg_base + '_match' arg_match_value = request.query_dict.get(arg_match, 'exact') if not arg_match_value in ('exact', 'like', 'glob', 'regex', 'notregex'): raise debug.ViewVCException( 'An illegal value was provided for the "%s" parameter.' % (arg_match), '400 Bad Request') # Now, for those args which are supposed to be regular expressions (per # their corresponding XXX_match values), make sure they are. if arg_match_value == 'regex' or arg_match_value == 'notregex': arg_base_value = request.query_dict.get(arg_base) if arg_base_value: try: re.compile(arg_base_value) except: raise debug.ViewVCException( 'An illegal value was provided for the "%s" parameter.' % (arg_base), '400 Bad Request') def view_queryform(request): if not is_query_supported(request): raise debug.ViewVCException('Can not query project root "%s" at "%s".' % (request.rootname, request.where), '403 Forbidden') # Do some more precise input validation. validate_query_args(request) query_action, query_hidden_values = \ request.get_form(view_func=view_query, params={'limit_changes': None}) limit_changes = \ int(request.query_dict.get('limit_changes', request.cfg.options.limit_changes)) def escaped_query_dict_get(itemname, itemdefault=''): return request.server.escape(request.query_dict.get(itemname, itemdefault)) data = common_template_data(request) data.merge(ezt.TemplateData({ 'branch' : escaped_query_dict_get('branch', ''), 'branch_match' : escaped_query_dict_get('branch_match', 'exact'), 'dir' : escaped_query_dict_get('dir', ''), 'file' : escaped_query_dict_get('file', ''), 'file_match' : escaped_query_dict_get('file_match', 'exact'), 'who' : escaped_query_dict_get('who', ''), 'who_match' : escaped_query_dict_get('who_match', 'exact'), 'comment' : escaped_query_dict_get('comment', ''), 'comment_match' : escaped_query_dict_get('comment_match', 'exact'), 'querysort' : escaped_query_dict_get('querysort', 'date'), 'date' : escaped_query_dict_get('date', 'hours'), 'hours' : escaped_query_dict_get('hours', '2'), 'mindate' : escaped_query_dict_get('mindate', ''), 'maxdate' : escaped_query_dict_get('maxdate', ''), 'query_action' : query_action, 'query_hidden_values' : query_hidden_values, 'limit_changes' : limit_changes, 'dir_href' : request.get_url(view_func=view_directory, params={}, escape=1), })) generate_page(request, "query_form", data) def parse_date(datestr): """Parse a date string from the query form.""" match = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)(?:\ +' '(\d\d):(\d\d)(?::(\d\d))?)?$', datestr) if match: year = int(match.group(1)) month = int(match.group(2)) day = int(match.group(3)) hour = match.group(4) if hour is not None: hour = int(hour) else: hour = 0 minute = match.group(5) if minute is not None: minute = int(minute) else: minute = 0 second = match.group(6) if second is not None: second = int(second) else: second = 0 # return a "seconds since epoch" value assuming date given in UTC tm = (year, month, day, hour, minute, second, 0, 0, 0) return compat.timegm(tm) else: return None def english_query(request): """Generate a sentance describing the query.""" cfg = request.cfg ret = [ 'Checkins ' ] dir = request.query_dict.get('dir', '') if dir: ret.append('to ') if ',' in dir: ret.append('subdirectories') else: ret.append('subdirectory') ret.append(' <em>%s</em> ' % request.server.escape(dir)) file = request.query_dict.get('file', '') if file: if len(ret) != 1: ret.append('and ') ret.append('to file <em>%s</em> ' % request.server.escape(file)) who = request.query_dict.get('who', '') branch = request.query_dict.get('branch', '') if branch: ret.append('on branch <em>%s</em> ' % request.server.escape(branch)) else: ret.append('on all branches ') comment = request.query_dict.get('comment', '') if comment: ret.append('with comment <i>%s</i> ' % request.server.escape(comment)) if who: ret.append('by <em>%s</em> ' % request.server.escape(who)) date = request.query_dict.get('date', 'hours') if date == 'hours': ret.append('in the last %s hours' \ % request.server.escape(request.query_dict.get('hours', '2'))) elif date == 'day': ret.append('in the last day') elif date == 'week': ret.append('in the last week') elif date == 'month': ret.append('in the last month') elif date == 'all': ret.append('since the beginning of time') elif date == 'explicit': mindate = request.query_dict.get('mindate', '') maxdate = request.query_dict.get('maxdate', '') if mindate and maxdate: w1, w2 = 'between', 'and' else: w1, w2 = 'since', 'before' if mindate: mindate = make_time_string(parse_date(mindate), cfg) ret.append('%s <em>%s</em> ' % (w1, mindate)) if maxdate: maxdate = make_time_string(parse_date(maxdate), cfg) ret.append('%s <em>%s</em> ' % (w2, maxdate)) return string.join(ret, '') def prev_rev(rev): """Returns a string representing the previous revision of the argument.""" r = string.split(rev, '.') # decrement final revision component r[-1] = str(int(r[-1]) - 1) # prune if we pass the beginning of the branch if len(r) > 2 and r[-1] == '0': r = r[:-2] return string.join(r, '.') def build_commit(request, files, max_files, dir_strip, format): """Return a commit object build from the information in FILES, or None if no allowed files are present in the set. DIR_STRIP is the path prefix to remove from the commit object's set of files. If MAX_FILES is non-zero, it is used to limit the number of files returned in the commit object. FORMAT is the requested output format of the query request.""" cfg = request.cfg author = files[0].GetAuthor() date = files[0].GetTime() desc = files[0].GetDescription() commit_rev = files[0].GetRevision() len_strip = len(dir_strip) commit_files = [] num_allowed = 0 plus_count = 0 minus_count = 0 found_unreadable = 0 for f in files: dirname = f.GetDirectory() filename = f.GetFile() if dir_strip: assert dirname[:len_strip] == dir_strip assert len(dirname) == len_strip or dirname[len(dir_strip)] == '/' dirname = dirname[len_strip+1:] where = dirname and ("%s/%s" % (dirname, filename)) or filename rev = f.GetRevision() rev_prev = prev_rev(rev) commit_time = f.GetTime() if commit_time: commit_time = make_time_string(commit_time, cfg) change_type = f.GetTypeString() # In CVS, we can actually look at deleted revisions; in Subversion # we can't -- we'll look at the previous revision instead. exam_rev = rev if request.roottype == 'svn' and change_type == 'Remove': exam_rev = rev_prev # Check path access (since the commits database logic bypasses the # vclib layer and, thus, the vcauth stuff that layer uses). path_parts = _path_parts(where) if path_parts: # Skip files in CVSROOT if asked to hide such. if cfg.options.hide_cvsroot \ and is_cvsroot_path(request.roottype, path_parts): found_unreadable = 1 continue # We have to do a rare authz check here because this data comes # from the CVSdb, not from the vclib providers. # # WARNING: The Subversion CVSdb integration logic is weak, weak, # weak. It has no ability to track copies, so complex # situations like a copied directory with a deleted subfile (all # in the same revision) are very ... difficult. We've no choice # but to omit as unauthorized paths the authorization logic # can't find. try: readable = vclib.check_path_access(request.repos, path_parts, None, exam_rev) except vclib.ItemNotFound: readable = 0 if not readable: found_unreadable = 1 continue if request.roottype == 'svn': params = { 'pathrev': exam_rev } else: params = { 'revision': exam_rev, 'pathrev': f.GetBranch() or None } dir_href = request.get_url(view_func=view_directory, where=dirname, pathtype=vclib.DIR, params=params, escape=1) log_href = request.get_url(view_func=view_log, where=where, pathtype=vclib.FILE, params=params, escape=1) diff_href = view_href = download_href = None if 'markup' in cfg.options.allowed_views: view_href = request.get_url(view_func=view_markup, where=where, pathtype=vclib.FILE, params=params, escape=1) if 'co' in cfg.options.allowed_views: download_href = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params=params, escape=1) if change_type == 'Change': diff_href_params = params.copy() diff_href_params.update({ 'r1': rev_prev, 'r2': rev, 'diff_format': None }) diff_href = request.get_url(view_func=view_diff, where=where, pathtype=vclib.FILE, params=diff_href_params, escape=1) mime_type, encoding = calculate_mime_type(request, path_parts, exam_rev) prefer_markup = ezt.boolean(default_view(mime_type, cfg) == view_markup) # Update plus/minus line change count. plus = int(f.GetPlusCount()) minus = int(f.GetMinusCount()) plus_count = plus_count + plus minus_count = minus_count + minus num_allowed = num_allowed + 1 if max_files and num_allowed > max_files: continue commit_files.append(_item(date=commit_time, dir=request.server.escape(dirname), file=request.server.escape(filename), author=request.server.escape(f.GetAuthor()), rev=rev, branch=f.GetBranch(), plus=plus, minus=minus, type=change_type, dir_href=dir_href, log_href=log_href, view_href=view_href, download_href=download_href, prefer_markup=prefer_markup, diff_href=diff_href)) # No files survived authz checks? Let's just pretend this # little commit didn't happen, shall we? if not len(commit_files): return None commit = _item(num_files=len(commit_files), files=commit_files, plus=plus_count, minus=minus_count) commit.limited_files = ezt.boolean(num_allowed > len(commit_files)) # We'll mask log messages in commits which contain unreadable paths, # but even that is kinda iffy. If a person searches for # '/some/hidden/path' across log messages, then gets a response set # that shows commits lacking log message, said person can reasonably # assume that the log messages contained the hidden path, and that # this is likely because they are referencing a real path in the # repository -- a path the user isn't supposed to even know about. if found_unreadable: commit.log = None commit.short_log = None else: lf = LogFormatter(request, desc) htmlize = (format != 'rss') commit.log = lf.get(maxlen=0, htmlize=htmlize) commit.short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=htmlize) commit.author = request.server.escape(author) commit.rss_date = make_rss_time_string(date, request.cfg) if request.roottype == 'svn': commit.rev = commit_rev commit.rss_url = '%s://%s%s' % \ (request.server.getenv("HTTPS") == "on" and "https" or "http", request.server.getenv("HTTP_HOST"), request.get_url(view_func=view_revision, params={'revision': commit.rev}, escape=1)) else: commit.rev = None commit.rss_url = None return commit def query_backout(request, commits): server_fp = get_writeready_server_file(request, 'text/plain') if not commits: server_fp.write("""\ # No changes were selected by the query. # There is nothing to back out. """) return server_fp.write("""\ # This page can be saved as a shell script and executed. # It should be run at the top of your work area. It will update # your working copy to back out the changes selected by the # query. """) for commit in commits: for fileinfo in commit.files: if request.roottype == 'cvs': server_fp.write('cvs update -j %s -j %s %s/%s\n' % (fileinfo.rev, prev_rev(fileinfo.rev), fileinfo.dir, fileinfo.file)) elif request.roottype == 'svn': server_fp.write('svn merge -r %s:%s %s/%s\n' % (fileinfo.rev, prev_rev(fileinfo.rev), fileinfo.dir, fileinfo.file)) def view_query(request): if not is_query_supported(request): raise debug.ViewVCException('Can not query project root "%s" at "%s".' % (request.rootname, request.where), '403 Forbidden') cfg = request.cfg # Do some more precise input validation. validate_query_args(request) # get form data branch = request.query_dict.get('branch', '') branch_match = request.query_dict.get('branch_match', 'exact') dir = request.query_dict.get('dir', '') file = request.query_dict.get('file', '') file_match = request.query_dict.get('file_match', 'exact') who = request.query_dict.get('who', '') who_match = request.query_dict.get('who_match', 'exact') comment = request.query_dict.get('comment', '') comment_match = request.query_dict.get('comment_match', 'exact') querysort = request.query_dict.get('querysort', 'date') date = request.query_dict.get('date', 'hours') hours = request.query_dict.get('hours', '2') mindate = request.query_dict.get('mindate', '') maxdate = request.query_dict.get('maxdate', '') format = request.query_dict.get('format') limit_changes = int(request.query_dict.get('limit_changes', cfg.options.limit_changes)) match_types = { 'exact':1, 'like':1, 'glob':1, 'regex':1, 'notregex':1 } sort_types = { 'date':1, 'author':1, 'file':1 } date_types = { 'hours':1, 'day':1, 'week':1, 'month':1, 'all':1, 'explicit':1 } # parse various fields, validating or converting them if not match_types.has_key(branch_match): branch_match = 'exact' if not match_types.has_key(file_match): file_match = 'exact' if not match_types.has_key(who_match): who_match = 'exact' if not match_types.has_key(comment_match): comment_match = 'exact' if not sort_types.has_key(querysort): querysort = 'date' if not date_types.has_key(date): date = 'hours' mindate = parse_date(mindate) maxdate = parse_date(maxdate) global cvsdb import cvsdb db = cvsdb.ConnectDatabaseReadOnly(cfg) repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath) if not repos_root: raise debug.ViewVCException( "The root '%s' was not found in the commit database " % request.rootname) # create the database query from the form data query = cvsdb.CreateCheckinQuery() query.SetRepository(repos_root) # treat "HEAD" specially ... if branch_match == 'exact' and branch == 'HEAD': query.SetBranch('') elif branch: query.SetBranch(branch, branch_match) if dir: for subdir in string.split(dir, ','): path = (_path_join(repos_dir + request.path_parts + _path_parts(string.strip(subdir)))) query.SetDirectory(path, 'exact') query.SetDirectory('%s/%%' % cvsdb.EscapeLike(path), 'like') else: where = _path_join(repos_dir + request.path_parts) if where: # if we are in a subdirectory ... query.SetDirectory(where, 'exact') query.SetDirectory('%s/%%' % cvsdb.EscapeLike(where), 'like') if file: query.SetFile(file, file_match) if who: query.SetAuthor(who, who_match) if comment: query.SetComment(comment, comment_match) query.SetSortMethod(querysort) if date == 'hours': query.SetFromDateHoursAgo(int(hours)) elif date == 'day': query.SetFromDateDaysAgo(1) elif date == 'week': query.SetFromDateDaysAgo(7) elif date == 'month': query.SetFromDateDaysAgo(31) elif date == 'all': pass elif date == 'explicit': if mindate is not None: query.SetFromDateObject(mindate) if maxdate is not None: query.SetToDateObject(maxdate) # Set the admin-defined (via configuration) row limits. This is to avoid # slamming the database server with a monster query. if format == 'rss': query.SetLimit(cfg.cvsdb.rss_row_limit) else: query.SetLimit(cfg.cvsdb.row_limit) # run the query db.RunQuery(query) commit_list = query.GetCommitList() row_limit_reached = query.GetLimitReached() # gather commits commits = [] plus_count = 0 minus_count = 0 mod_time = -1 if commit_list: files = [] limited_files = 0 current_desc = commit_list[0].GetDescriptionID() current_rev = commit_list[0].GetRevision() dir_strip = _path_join(repos_dir) for commit in commit_list: commit_desc = commit.GetDescriptionID() commit_rev = commit.GetRevision() # base modification time on the newest commit if commit.GetTime() > mod_time: mod_time = commit.GetTime() # For CVS, group commits with the same commit message. # For Subversion, group them only if they have the same revision number if request.roottype == 'cvs': if current_desc == commit_desc: files.append(commit) continue else: if current_rev == commit_rev: files.append(commit) continue # append this grouping commit_item = build_commit(request, files, limit_changes, dir_strip, format) if commit_item: # update running plus/minus totals plus_count = plus_count + commit_item.plus minus_count = minus_count + commit_item.minus commits.append(commit_item) files = [ commit ] limited_files = 0 current_desc = commit_desc current_rev = commit_rev # we need to tack on our last commit grouping, if any commit_item = build_commit(request, files, limit_changes, dir_strip, format) if commit_item: # update running plus/minus totals plus_count = plus_count + commit_item.plus minus_count = minus_count + commit_item.minus commits.append(commit_item) # only show the branch column if we are querying all branches # or doing a non-exact branch match on a CVS repository. show_branch = ezt.boolean(request.roottype == 'cvs' and (branch == '' or branch_match != 'exact')) # backout link params = request.query_dict.copy() params['format'] = 'backout' backout_href = request.get_url(params=params, escape=1) # link to zero limit_changes value params = request.query_dict.copy() params['limit_changes'] = 0 limit_changes_href = request.get_url(params=params, escape=1) # if we got any results, use the newest commit as the modification time if mod_time >= 0: if check_freshness(request, mod_time): return if format == 'backout': query_backout(request, commits) return data = common_template_data(request) data.merge(ezt.TemplateData({ 'sql': request.server.escape(db.CreateSQLQueryString(query)), 'english_query': english_query(request), 'queryform_href': request.get_url(view_func=view_queryform, escape=1), 'backout_href': backout_href, 'plus_count': plus_count, 'minus_count': minus_count, 'show_branch': show_branch, 'querysort': querysort, 'commits': commits, 'row_limit_reached' : ezt.boolean(row_limit_reached), 'limit_changes': limit_changes, 'limit_changes_href': limit_changes_href, 'rss_link_href': request.get_url(view_func=view_query, params={'date': 'month'}, escape=1, prefix=1), })) if format == 'rss': generate_page(request, "rss", data, "application/rss+xml") else: generate_page(request, "query_results", data) _views = { 'annotate': view_annotate, 'co': view_checkout, 'diff': view_diff, 'dir': view_directory, 'graph': view_cvsgraph, 'graphimg': view_cvsgraph_image, 'log': view_log, 'markup': view_markup, 'patch': view_patch, 'query': view_query, 'queryform': view_queryform, 'revision': view_revision, 'roots': view_roots, 'tar': download_tarball, 'redirect_pathrev': redirect_pathrev, } _view_codes = {} for code, view in _views.items(): _view_codes[view] = code def list_roots(request): cfg = request.cfg allroots = { } # Add the viewable Subversion roots for root in cfg.general.svn_roots.keys(): auth = setup_authorizer(cfg, request.username, root) try: repos = vclib.svn.SubversionRepository(root, cfg.general.svn_roots[root], auth, cfg.utilities, cfg.options.svn_config_dir) lastmod = None if cfg.options.show_roots_lastmod: try: repos.open() youngest_rev = repos.youngest date, author, msg, revprops, changes = repos.revinfo(youngest_rev) date_str = make_time_string(date, cfg) ago = html_time(request, date) lf = LogFormatter(request, msg) log = lf.get(maxlen=0, htmlize=1) short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=1) lastmod = _item(ago=ago, author=author, date=date_str, log=log, short_log=short_log, rev=str(youngest_rev)) except: lastmod = None except vclib.ReposNotFound: continue allroots[root] = [cfg.general.svn_roots[root], 'svn', lastmod] # Add the viewable CVS roots for root in cfg.general.cvs_roots.keys(): auth = setup_authorizer(cfg, request.username, root) try: vclib.ccvs.CVSRepository(root, cfg.general.cvs_roots[root], auth, cfg.utilities, cfg.options.use_rcsparse) except vclib.ReposNotFound: continue allroots[root] = [cfg.general.cvs_roots[root], 'cvs', None] return allroots def expand_root_parents(cfg): """Expand the configured root parents into individual roots.""" # Each item in root_parents is a "directory : repo_type" string. for pp in cfg.general.root_parents: pos = string.rfind(pp, ':') if pos < 0: raise debug.ViewVCException( 'The path "%s" in "root_parents" does not include a ' 'repository type. Expected "cvs" or "svn".' % (pp)) repo_type = string.strip(pp[pos+1:]) pp = os.path.normpath(string.strip(pp[:pos])) if repo_type == 'cvs': roots = vclib.ccvs.expand_root_parent(pp) if cfg.options.hide_cvsroot and roots.has_key('CVSROOT'): del roots['CVSROOT'] cfg.general.cvs_roots.update(roots) elif repo_type == 'svn': roots = vclib.svn.expand_root_parent(pp) cfg.general.svn_roots.update(roots) else: raise debug.ViewVCException( 'The path "%s" in "root_parents" has an unrecognized ' 'repository type ("%s"). Expected "cvs" or "svn".' % (pp, repo_type)) def find_root_in_parents(cfg, rootname, roottype): """Return the rootpath for configured ROOTNAME of ROOTTYPE.""" # Easy out: caller wants rootname "CVSROOT", and we're hiding those. if rootname == 'CVSROOT' and cfg.options.hide_cvsroot: return None for pp in cfg.general.root_parents: pos = string.rfind(pp, ':') if pos < 0: continue repo_type = string.strip(pp[pos+1:]) if repo_type != roottype: continue pp = os.path.normpath(string.strip(pp[:pos])) rootpath = None if roottype == 'cvs': rootpath = vclib.ccvs.find_root_in_parent(pp, rootname) elif roottype == 'svn': rootpath = vclib.svn.find_root_in_parent(pp, rootname) if rootpath is not None: return rootpath return None def locate_root(cfg, rootname): """Return a 2-tuple ROOTTYPE, ROOTPATH for configured ROOTNAME.""" if cfg.general.cvs_roots.has_key(rootname): return 'cvs', cfg.general.cvs_roots[rootname] path_in_parent = find_root_in_parents(cfg, rootname, 'cvs') if path_in_parent: cfg.general.cvs_roots[rootname] = path_in_parent return 'cvs', path_in_parent if cfg.general.svn_roots.has_key(rootname): return 'svn', cfg.general.svn_roots[rootname] path_in_parent = find_root_in_parents(cfg, rootname, 'svn') if path_in_parent: cfg.general.svn_roots[rootname] = path_in_parent return 'svn', path_in_parent return None, None def load_config(pathname=None, server=None): """Load the ViewVC configuration file. SERVER is the server object that will be using this configuration. Consult the environment for the variable VIEWVC_CONF_PATHNAME and VIEWCVS_CONF_PATHNAME (its legacy name) and, if set, use its value as the path of the configuration file; otherwise, use PATHNAME (if provided). Failing all else, use a hardcoded default configuration path.""" debug.t_start('load-config') # See if the environment contains overrides to the configuration # path. If we have a SERVER object, consult its environment; use # the OS environment otherwise. env_get = server and server.getenv or os.environ.get env_pathname = (env_get("VIEWVC_CONF_PATHNAME") or env_get("VIEWCVS_CONF_PATHNAME")) # Try to find the configuration pathname by searching these ordered # locations: the environment, the passed-in PATHNAME, the hard-coded # default. pathname = (env_pathname or pathname or os.path.join(os.path.dirname(os.path.dirname(__file__)), "viewvc.conf")) # Load the configuration! cfg = config.Config() cfg.set_defaults() cfg.load_config(pathname, env_get("HTTP_HOST")) # Load mime types file(s), but reverse the order -- our # configuration uses a most-to-least preferred approach, but the # 'mimetypes' package wants things the other way around. if cfg.general.mime_types_files: files = cfg.general.mime_types_files[:] files.reverse() files = map(lambda x, y=pathname: os.path.join(os.path.dirname(y), x), files) mimetypes.init(files) debug.t_end('load-config') return cfg def view_error(server, cfg): exc_dict = debug.GetExceptionData() status = exc_dict['status'] if exc_dict['msg']: exc_dict['msg'] = server.escape(exc_dict['msg']) if exc_dict['stacktrace']: exc_dict['stacktrace'] = server.escape(exc_dict['stacktrace']) handled = 0 # use the configured error template if possible try: if cfg and not server.headerSent: server.header(status=status) template = get_view_template(cfg, "error") template.generate(server.file(), exc_dict) handled = 1 except: pass # but fallback to the old exception printer if no configuration is # available, or if something went wrong if not handled: debug.PrintException(server, exc_dict) def main(server, cfg): try: debug.t_start('main') try: # build a Request object, which contains info about the HTTP request request = Request(server, cfg) request.run_viewvc() except SystemExit, e: return except: view_error(server, cfg) finally: debug.t_end('main') debug.t_dump(server.file()) debug.DumpChildren(server) class _item: def __init__(self, **kw): vars(self).update(kw)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_3149_0
crossvul-python_data_bad_1456_0
# # Copyright (c) 2008--2015 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os from types import ListType from spacewalk.common import rhnFlags from spacewalk.common.rhnLog import log_debug, log_error from spacewalk.common.rhnConfig import CFG from spacewalk.common.rhnException import rhnFault from spacewalk.common.rhnTranslate import _ from spacewalk.server import rhnSQL from rhnLib import parseRPMFilename # # Functions that deal with the database # # New client # Returns a package path, given a server_id, package filename and channel label def get_package_path(server_id, pkg_spec, channel): log_debug(3, server_id, pkg_spec, channel) if isinstance(pkg_spec, ListType): pkg = pkg_spec[:4] # Insert EPOCH pkg.insert(1, None) else: pkg = parseRPMFilename(pkg_spec) if pkg is None: log_debug(4, "Error", "Requested weird package", pkg_spec) raise rhnFault(17, _("Invalid RPM package %s requested") % pkg_spec) statement = """ select p.id, p.path path, pe.epoch epoch from rhnPackageArch pa, rhnChannelPackage cp, rhnPackage p, rhnPackageEVR pe, rhnServerChannel sc, rhnPackageName pn, rhnChannel c where 1=1 and c.label = :channel and pn.name = :name and sc.server_id = :server_id and pe.version = :ver and pe.release = :rel and c.id = sc.channel_id and c.id = cp.channel_id and pa.label = :arch and pn.id = p.name_id and p.id = cp.package_id and p.evr_id = pe.id and sc.channel_id = cp.channel_id and p.package_arch_id = pa.id """ h = rhnSQL.prepare(statement) pkg = map(str, pkg) h.execute(name=pkg[0], ver=pkg[2], rel=pkg[3], arch=pkg[4], channel=channel, server_id=server_id) rs = h.fetchall_dict() if not rs: log_debug(4, "Error", "Non-existant package requested", server_id, pkg_spec, channel) raise rhnFault(17, _("Invalid RPM package %s requested") % pkg_spec) # It is unlikely for this query to return more than one row, # but it is possible # (having two packages with the same n, v, r, a and different epoch in # the same channel is prohibited by the RPM naming scheme; but extra # care won't hurt) max_row = rs[0] for each in rs[1:]: # Compare the epoch as string if _none2emptyString(each['epoch']) > _none2emptyString(max_row['epoch']): max_row = each # Set the flag for the proxy download accelerator rhnFlags.set("Download-Accelerator-Path", max_row['path']) return check_package_file(max_row['path'], max_row['id'], pkg_spec), max_row['id'] def check_package_file(rel_path, logpkg, raisepkg): if rel_path is None: log_error("Package path null for package id", logpkg) raise rhnFault(17, _("Invalid RPM package %s requested") % raisepkg) filePath = "%s/%s" % (CFG.MOUNT_POINT, rel_path) if not os.access(filePath, os.R_OK): # Package not found on the filesystem log_error("Package not found", filePath) raise rhnFault(17, _("Package not found")) return filePath def unlink_package_file(path): try: os.unlink(path) except OSError: log_debug(1, "Error unlinking %s;" % path) dirname = os.path.dirname(path) base_dirs = (CFG.MOUNT_POINT + '/' + CFG.PREPENDED_DIR, CFG.MOUNT_POINT) while dirname not in base_dirs: try: os.rmdir(dirname) except OSError, e: if e.errno == 39: # OSError: [Errno 39] Directory not empty break else: raise dirname = os.path.dirname(dirname) def get_all_package_paths(server_id, pkg_spec, channel): """ return the remote path if available and localpath for the requested package with respect to package id """ log_debug(3, server_id, pkg_spec, channel) remotepath = None # get the path and package localpath, pkg_id = get_package_path(server_id, pkg_spec, channel) return remotepath, localpath # New client # Returns the path to a source rpm def get_source_package_path(server_id, pkgFilename, channel): log_debug(3, server_id, pkgFilename, channel) rs = __query_source_package_path_by_name(server_id, pkgFilename, channel) if rs is None: log_debug(4, "Error", "Non-existant package requested", server_id, pkgFilename, channel) raise rhnFault(17, _("Invalid RPM package %s requested") % pkgFilename) # Set the flag for the proxy download accelerator rhnFlags.set("Download-Accelerator-Path", rs['path']) return check_package_file(rs['path'], pkgFilename, pkgFilename) # 0 or 1: is this source in this channel? def package_source_in_channel(server_id, pkgFilename, channel): log_debug(3, server_id, pkgFilename, channel) rs = __query_source_package_path_by_name(server_id, pkgFilename, channel) if rs is None: return 0 return 1 # The query used both in get_source_package_path and package_source_in_channel def __query_source_package_path_by_name(server_id, pkgFilename, channel): statement = """ select unique ps.path from rhnSourceRPM sr, rhnPackageSource ps, rhnPackage p, rhnChannelPackage cp, rhnChannel c, rhnServerChannel sc where sc.server_id = :server_id and sc.channel_id = cp.channel_id and cp.channel_id = c.id and c.label = :channel and cp.package_id = p.id and p.source_rpm_id = sr.id and sr.name = :name and p.source_rpm_id = ps.source_rpm_id and ((p.org_id is null and ps.org_id is null) or p.org_id = ps.org_id) """ h = rhnSQL.prepare(statement) h.execute(name=pkgFilename, channel=channel, server_id=server_id) return h.fetchone_dict() def get_info_for_package(pkg, channel_id, org_id): log_debug(3, pkg) pkg = map(str, pkg) params = {'name': pkg[0], 'ver': pkg[1], 'rel': pkg[2], 'epoch': pkg[3], 'arch': pkg[4], 'channel_id': channel_id, 'org_id': org_id} # yum repo has epoch="0" not only when epoch is "0" but also if it's NULL if pkg[3] == '0' or pkg[3] == '': epochStatement = "(epoch is null or epoch = :epoch)" else: epochStatement = "epoch = :epoch" if params['org_id']: orgStatement = "org_id = :org_id" else: orgStatement = "org_id is null" statement = """ select p.path, cp.channel_id, cv.checksum_type, cv.checksum from rhnPackage p join rhnPackageName pn on p.name_id = pn.id join rhnPackageEVR pe on p.evr_id = pe.id join rhnPackageArch pa on p.package_arch_id = pa.id left join rhnChannelPackage cp on p.id = cp.package_id and cp.channel_id = :channel_id join rhnChecksumView cv on p.checksum_id = cv.id where pn.name = :name and pe.version = :ver and pe.release = :rel and %s and pa.label = :arch and %s order by cp.channel_id nulls last """ % (epochStatement, orgStatement) h = rhnSQL.prepare(statement) h.execute(**params) ret = h.fetchone_dict() if not ret: return {'path': None, 'channel_id': None, 'checksum_type': None, 'checksum': None, } return ret def _none2emptyString(foo): if foo is None: return "" return str(foo) if __name__ == '__main__': """Test code. """ from spacewalk.common.rhnLog import initLOG initLOG("stdout", 1) rhnSQL.initDB() print # new client print get_package_path(1000463284, 'kernel-2.4.2-2.i686.rpm', 'redhat-linux-i386-7.1') print get_source_package_path(1000463284, 'kernel-2.4.2-2.i686.rpm', 'redhat-linux-i386-7.1')
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1456_0
crossvul-python_data_good_3890_3
import hashlib import json import os import uuid from django import forms from django.conf import settings from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from django.core.serializers.json import DjangoJSONEncoder from django.db import models from django.shortcuts import redirect, render from modelcluster.contrib.taggit import ClusterTaggableManager from modelcluster.fields import ParentalKey, ParentalManyToManyField from modelcluster.models import ClusterableModel from taggit.managers import TaggableManager from taggit.models import TaggedItemBase from wagtail.admin.edit_handlers import ( FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface) from wagtail.admin.forms import WagtailAdminPageForm from wagtail.admin.mail import send_mail from wagtail.contrib.forms.forms import FormBuilder from wagtail.contrib.forms.models import ( FORM_FIELD_CHOICES, AbstractEmailForm, AbstractFormField, AbstractFormSubmission) from wagtail.contrib.settings.models import BaseSetting, register_setting from wagtail.contrib.sitemaps import Sitemap from wagtail.contrib.table_block.blocks import TableBlock from wagtail.core.blocks import CharBlock, RawHTMLBlock, RichTextBlock, StructBlock from wagtail.core.fields import RichTextField, StreamField from wagtail.core.models import Orderable, Page, PageManager, PageQuerySet from wagtail.documents.edit_handlers import DocumentChooserPanel from wagtail.documents.models import AbstractDocument, Document from wagtail.images.blocks import ImageChooserBlock from wagtail.images.edit_handlers import ImageChooserPanel from wagtail.images.models import AbstractImage, AbstractRendition, Image from wagtail.search import index from wagtail.snippets.edit_handlers import SnippetChooserPanel from wagtail.snippets.models import register_snippet from wagtail.utils.decorators import cached_classmethod from .forms import FormClassAdditionalFieldPageForm, ValidatedPageForm from .views import CustomSubmissionsListView EVENT_AUDIENCE_CHOICES = ( ('public', "Public"), ('private', "Private"), ) COMMON_PANELS = ( FieldPanel('slug'), FieldPanel('seo_title'), FieldPanel('show_in_menus'), FieldPanel('search_description'), ) # Link fields class LinkFields(models.Model): link_external = models.URLField("External link", blank=True) link_page = models.ForeignKey( 'wagtailcore.Page', null=True, blank=True, related_name='+', on_delete=models.CASCADE ) link_document = models.ForeignKey( 'wagtaildocs.Document', null=True, blank=True, related_name='+', on_delete=models.CASCADE ) @property def link(self): if self.link_page: return self.link_page.url elif self.link_document: return self.link_document.url else: return self.link_external panels = [ FieldPanel('link_external'), PageChooserPanel('link_page'), DocumentChooserPanel('link_document'), ] class Meta: abstract = True # Carousel items class CarouselItem(LinkFields): image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' ) embed_url = models.URLField("Embed URL", blank=True) caption = models.CharField(max_length=255, blank=True) panels = [ ImageChooserPanel('image'), FieldPanel('embed_url'), FieldPanel('caption'), MultiFieldPanel(LinkFields.panels, "Link"), ] class Meta: abstract = True # Related links class RelatedLink(LinkFields): title = models.CharField(max_length=255, help_text="Link title") panels = [ FieldPanel('title'), MultiFieldPanel(LinkFields.panels, "Link"), ] class Meta: abstract = True # Simple page class SimplePage(Page): content = models.TextField() content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('content'), ] def get_admin_display_title(self): return "%s (simple page)" % super().get_admin_display_title() # Page with Excluded Fields when copied class PageWithExcludedCopyField(Page): content = models.TextField() # Exclude this field from being copied special_field = models.CharField( blank=True, max_length=255, default='Very Special') exclude_fields_in_copy = ['special_field'] content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('special_field'), FieldPanel('content'), ] class PageWithOldStyleRouteMethod(Page): """ Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse rather than a Page instance. As subclasses of Page may override route, we need to continue accepting this convention (albeit as a deprecated API). """ content = models.TextField() template = 'tests/simple_page.html' def route(self, request, path_components): return self.serve(request) # File page class FilePage(Page): file_field = models.FileField() FilePage.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('file_field'), ] # Event page class EventPageCarouselItem(Orderable, CarouselItem): page = ParentalKey('tests.EventPage', related_name='carousel_items', on_delete=models.CASCADE) class EventPageRelatedLink(Orderable, RelatedLink): page = ParentalKey('tests.EventPage', related_name='related_links', on_delete=models.CASCADE) class EventPageSpeakerAward(Orderable, models.Model): speaker = ParentalKey('tests.EventPageSpeaker', related_name='awards', on_delete=models.CASCADE) name = models.CharField("Award name", max_length=255) date_awarded = models.DateField(null=True, blank=True) panels = [ FieldPanel('name'), FieldPanel('date_awarded'), ] class EventPageSpeaker(Orderable, LinkFields, ClusterableModel): page = ParentalKey('tests.EventPage', related_name='speakers', related_query_name='speaker', on_delete=models.CASCADE) first_name = models.CharField("Name", max_length=255, blank=True) last_name = models.CharField("Surname", max_length=255, blank=True) image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' ) @property def name_display(self): return self.first_name + " " + self.last_name panels = [ FieldPanel('first_name'), FieldPanel('last_name'), ImageChooserPanel('image'), MultiFieldPanel(LinkFields.panels, "Link"), InlinePanel('awards', label="Awards"), ] class EventCategory(models.Model): name = models.CharField("Name", max_length=255) def __str__(self): return self.name # Override the standard WagtailAdminPageForm to add validation on start/end dates # that appears as a non-field error class EventPageForm(WagtailAdminPageForm): def clean(self): cleaned_data = super().clean() # Make sure that the event starts before it ends start_date = cleaned_data['date_from'] end_date = cleaned_data['date_to'] if start_date and end_date and start_date > end_date: raise ValidationError('The end date must be after the start date') return cleaned_data class EventPage(Page): date_from = models.DateField("Start date", null=True) date_to = models.DateField( "End date", null=True, blank=True, help_text="Not required if event is on a single day" ) time_from = models.TimeField("Start time", null=True, blank=True) time_to = models.TimeField("End time", null=True, blank=True) audience = models.CharField(max_length=255, choices=EVENT_AUDIENCE_CHOICES) location = models.CharField(max_length=255) body = RichTextField(blank=True) cost = models.CharField(max_length=255) signup_link = models.URLField(blank=True) feed_image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' ) categories = ParentalManyToManyField(EventCategory, blank=True) search_fields = [ index.SearchField('get_audience_display'), index.SearchField('location'), index.SearchField('body'), index.FilterField('url_path'), ] password_required_template = 'tests/event_page_password_required.html' base_form_class = EventPageForm EventPage.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('date_from'), FieldPanel('date_to'), FieldPanel('time_from'), FieldPanel('time_to'), FieldPanel('location'), FieldPanel('audience'), FieldPanel('cost'), FieldPanel('signup_link'), InlinePanel('carousel_items', label="Carousel items"), FieldPanel('body', classname="full"), InlinePanel('speakers', label="Speakers", heading="Speaker lineup"), InlinePanel('related_links', label="Related links"), FieldPanel('categories'), # InlinePanel related model uses `pk` not `id` InlinePanel('head_counts', label='Head Counts'), ] EventPage.promote_panels = [ MultiFieldPanel(COMMON_PANELS, "Common page configuration"), ImageChooserPanel('feed_image'), ] class HeadCountRelatedModelUsingPK(models.Model): """Related model that uses a custom primary key (pk) not id""" custom_id = models.AutoField(primary_key=True) event_page = ParentalKey( EventPage, on_delete=models.CASCADE, related_name='head_counts' ) head_count = models.IntegerField() panels = [FieldPanel('head_count')] # Override the standard WagtailAdminPageForm to add field that is not in model # so that we can test additional potential issues like comparing versions class FormClassAdditionalFieldPage(Page): location = models.CharField(max_length=255) body = RichTextField(blank=True) content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('location'), FieldPanel('body'), FieldPanel('code'), # not in model, see set base_form_class ] base_form_class = FormClassAdditionalFieldPageForm # Just to be able to test multi table inheritance class SingleEventPage(EventPage): excerpt = models.TextField( max_length=255, blank=True, null=True, help_text="Short text to describe what is this action about" ) # Give this page model a custom URL routing scheme def get_url_parts(self, request=None): url_parts = super().get_url_parts(request=request) if url_parts is None: return None else: site_id, root_url, page_path = url_parts return (site_id, root_url, page_path + 'pointless-suffix/') def route(self, request, path_components): if path_components == ['pointless-suffix']: # treat this as equivalent to a request for this page return super().route(request, []) else: # fall back to default routing rules return super().route(request, path_components) def get_admin_display_title(self): return "%s (single event)" % super().get_admin_display_title() SingleEventPage.content_panels = [FieldPanel('excerpt')] + EventPage.content_panels # "custom" sitemap object class EventSitemap(Sitemap): pass # Event index (has a separate AJAX template, and a custom template context) class EventIndex(Page): intro = RichTextField(blank=True) ajax_template = 'tests/includes/event_listing.html' def get_events(self): return self.get_children().live().type(EventPage) def get_paginator(self): return Paginator(self.get_events(), 4) def get_context(self, request, page=1): # Pagination paginator = self.get_paginator() try: events = paginator.page(page) except PageNotAnInteger: events = paginator.page(1) except EmptyPage: events = paginator.page(paginator.num_pages) # Update context context = super().get_context(request) context['events'] = events return context def route(self, request, path_components): if self.live and len(path_components) == 1: try: return self.serve(request, page=int(path_components[0])) except (TypeError, ValueError): pass return super().route(request, path_components) def get_static_site_paths(self): # Get page count page_count = self.get_paginator().num_pages # Yield a path for each page for page in range(page_count): yield '/%d/' % (page + 1) # Yield from superclass for path in super().get_static_site_paths(): yield path def get_sitemap_urls(self, request=None): # Add past events url to sitemap return super().get_sitemap_urls(request=request) + [ { 'location': self.full_url + 'past/', 'lastmod': self.latest_revision_created_at } ] def get_cached_paths(self): return super().get_cached_paths() + [ '/past/' ] EventIndex.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('intro', classname="full"), ] class FormField(AbstractFormField): page = ParentalKey('FormPage', related_name='form_fields', on_delete=models.CASCADE) class FormPage(AbstractEmailForm): def get_context(self, request): context = super().get_context(request) context['greeting'] = "hello world" return context FormPage.content_panels = [ FieldPanel('title', classname="full title"), InlinePanel('form_fields', label="Form fields"), MultiFieldPanel([ FieldPanel('to_address', classname="full"), FieldPanel('from_address', classname="full"), FieldPanel('subject', classname="full"), ], "Email") ] # FormPage with a non-HTML extension class JadeFormField(AbstractFormField): page = ParentalKey('JadeFormPage', related_name='form_fields', on_delete=models.CASCADE) class JadeFormPage(AbstractEmailForm): template = "tests/form_page.jade" JadeFormPage.content_panels = [ FieldPanel('title', classname="full title"), InlinePanel('form_fields', label="Form fields"), MultiFieldPanel([ FieldPanel('to_address', classname="full"), FieldPanel('from_address', classname="full"), FieldPanel('subject', classname="full"), ], "Email") ] # Form page that redirects to a different page class RedirectFormField(AbstractFormField): page = ParentalKey('FormPageWithRedirect', related_name='form_fields', on_delete=models.CASCADE) class FormPageWithRedirect(AbstractEmailForm): thank_you_redirect_page = models.ForeignKey( 'wagtailcore.Page', null=True, blank=True, on_delete=models.SET_NULL, related_name='+', ) def get_context(self, request): context = super(FormPageWithRedirect, self).get_context(request) context['greeting'] = "hello world" return context def render_landing_page(self, request, form_submission=None, *args, **kwargs): """ Renders the landing page OR if a receipt_page_redirect is chosen redirects to this page. """ if self.thank_you_redirect_page: return redirect(self.thank_you_redirect_page.url, permanent=False) return super(FormPageWithRedirect, self).render_landing_page(request, form_submission, *args, **kwargs) FormPageWithRedirect.content_panels = [ FieldPanel('title', classname="full title"), PageChooserPanel('thank_you_redirect_page'), InlinePanel('form_fields', label="Form fields"), MultiFieldPanel([ FieldPanel('to_address', classname="full"), FieldPanel('from_address', classname="full"), FieldPanel('subject', classname="full"), ], "Email") ] # FormPage with a custom FormSubmission class FormPageWithCustomSubmission(AbstractEmailForm): """ This Form page: * Have custom submission model * Have custom related_name (see `FormFieldWithCustomSubmission.page`) * Saves reference to a user * Doesn't render html form, if submission for current user is present """ intro = RichTextField(blank=True) thank_you_text = RichTextField(blank=True) def get_context(self, request, *args, **kwargs): context = super().get_context(request) context['greeting'] = "hello world" return context def get_form_fields(self): return self.custom_form_fields.all() def get_data_fields(self): data_fields = [ ('username', 'Username'), ] data_fields += super().get_data_fields() return data_fields def get_submission_class(self): return CustomFormPageSubmission def process_form_submission(self, form): form_submission = self.get_submission_class().objects.create( form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder), page=self, user=form.user ) if self.to_address: addresses = [x.strip() for x in self.to_address.split(',')] content = '\n'.join([x[1].label + ': ' + str(form.data.get(x[0])) for x in form.fields.items()]) send_mail(self.subject, content, addresses, self.from_address,) # process_form_submission should now return the created form_submission return form_submission def serve(self, request, *args, **kwargs): if self.get_submission_class().objects.filter(page=self, user__pk=request.user.pk).exists(): return render( request, self.template, self.get_context(request) ) return super().serve(request, *args, **kwargs) FormPageWithCustomSubmission.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('intro', classname="full"), InlinePanel('custom_form_fields', label="Form fields"), FieldPanel('thank_you_text', classname="full"), MultiFieldPanel([ FieldPanel('to_address', classname="full"), FieldPanel('from_address', classname="full"), FieldPanel('subject', classname="full"), ], "Email") ] class FormFieldWithCustomSubmission(AbstractFormField): page = ParentalKey(FormPageWithCustomSubmission, on_delete=models.CASCADE, related_name='custom_form_fields') class CustomFormPageSubmission(AbstractFormSubmission): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) def get_data(self): form_data = super().get_data() form_data.update({ 'username': self.user.username, }) return form_data # Custom form page with custom submission listing view and form submission class FormFieldForCustomListViewPage(AbstractFormField): page = ParentalKey( 'FormPageWithCustomSubmissionListView', related_name='form_fields', on_delete=models.CASCADE ) class FormPageWithCustomSubmissionListView(AbstractEmailForm): """Form Page with customised submissions listing view""" intro = RichTextField(blank=True) thank_you_text = RichTextField(blank=True) submissions_list_view_class = CustomSubmissionsListView def get_submission_class(self): return CustomFormPageSubmission def get_data_fields(self): data_fields = [ ('username', 'Username'), ] data_fields += super().get_data_fields() return data_fields content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('intro', classname="full"), InlinePanel('form_fields', label="Form fields"), FieldPanel('thank_you_text', classname="full"), MultiFieldPanel([ FieldPanel('to_address', classname="full"), FieldPanel('from_address', classname="full"), FieldPanel('subject', classname="full"), ], "Email") ] # FormPage with cutom FormBuilder EXTENDED_CHOICES = FORM_FIELD_CHOICES + (('ipaddress', 'IP Address'),) class ExtendedFormField(AbstractFormField): """Override the field_type field with extended choices.""" page = ParentalKey( 'FormPageWithCustomFormBuilder', related_name='form_fields', on_delete=models.CASCADE) field_type = models.CharField( verbose_name='field type', max_length=16, choices=EXTENDED_CHOICES) class CustomFormBuilder(FormBuilder): """ A custom FormBuilder that has an 'ipaddress' field with customised create_singleline_field with shorter max_length """ def create_singleline_field(self, field, options): options['max_length'] = 120 # usual default is 255 return forms.CharField(**options) def create_ipaddress_field(self, field, options): return forms.GenericIPAddressField(**options) class FormPageWithCustomFormBuilder(AbstractEmailForm): """ A Form page that has a custom form builder and uses a custom form field model with additional field_type choices. """ form_builder = CustomFormBuilder content_panels = [ FieldPanel('title', classname="full title"), InlinePanel('form_fields', label="Form fields"), MultiFieldPanel([ FieldPanel('to_address', classname="full"), FieldPanel('from_address', classname="full"), FieldPanel('subject', classname="full"), ], "Email") ] # Snippets class AdvertPlacement(models.Model): page = ParentalKey('wagtailcore.Page', related_name='advert_placements', on_delete=models.CASCADE) advert = models.ForeignKey('tests.Advert', related_name='+', on_delete=models.CASCADE) colour = models.CharField(max_length=255) class AdvertTag(TaggedItemBase): content_object = ParentalKey('Advert', related_name='tagged_items', on_delete=models.CASCADE) class Advert(ClusterableModel): url = models.URLField(null=True, blank=True) text = models.CharField(max_length=255) tags = TaggableManager(through=AdvertTag, blank=True) panels = [ FieldPanel('url'), FieldPanel('text'), FieldPanel('tags'), ] def __str__(self): return self.text register_snippet(Advert) class AdvertWithCustomPrimaryKey(ClusterableModel): advert_id = models.CharField(max_length=255, primary_key=True) url = models.URLField(null=True, blank=True) text = models.CharField(max_length=255) panels = [ FieldPanel('url'), FieldPanel('text'), ] def __str__(self): return self.text register_snippet(AdvertWithCustomPrimaryKey) class AdvertWithCustomUUIDPrimaryKey(ClusterableModel): advert_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) url = models.URLField(null=True, blank=True) text = models.CharField(max_length=255) panels = [ FieldPanel('url'), FieldPanel('text'), ] def __str__(self): return self.text register_snippet(AdvertWithCustomUUIDPrimaryKey) class AdvertWithTabbedInterface(models.Model): url = models.URLField(null=True, blank=True) text = models.CharField(max_length=255) something_else = models.CharField(max_length=255) advert_panels = [ FieldPanel('url'), FieldPanel('text'), ] other_panels = [ FieldPanel('something_else'), ] edit_handler = TabbedInterface([ ObjectList(advert_panels, heading='Advert'), ObjectList(other_panels, heading='Other'), ]) def __str__(self): return self.text class Meta: ordering = ('text',) register_snippet(AdvertWithTabbedInterface) class StandardIndex(Page): """ Index for the site """ parent_page_types = [Page] # A custom panel setup where all Promote fields are placed in the Content tab instead; # we use this to test that the 'promote' tab is left out of the output when empty StandardIndex.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('seo_title'), FieldPanel('slug'), InlinePanel('advert_placements', label="Adverts"), ] StandardIndex.promote_panels = [] class StandardChild(Page): pass # Test overriding edit_handler with a custom one StandardChild.edit_handler = TabbedInterface([ ObjectList(StandardChild.content_panels, heading='Content'), ObjectList(StandardChild.promote_panels, heading='Promote'), ObjectList(StandardChild.settings_panels, heading='Settings', classname='settings'), ObjectList([], heading='Dinosaurs'), ], base_form_class=WagtailAdminPageForm) class BusinessIndex(Page): """ Can be placed anywhere, can only have Business children """ subpage_types = ['tests.BusinessChild', 'tests.BusinessSubIndex'] class BusinessSubIndex(Page): """ Can be placed under BusinessIndex, and have BusinessChild children """ # BusinessNowherePage is 'incorrectly' added here as a possible child. # The rules on BusinessNowherePage prevent it from being a child here though. subpage_types = ['tests.BusinessChild', 'tests.BusinessNowherePage'] parent_page_types = ['tests.BusinessIndex', 'tests.BusinessChild'] class BusinessChild(Page): """ Can only be placed under Business indexes, no children allowed """ subpage_types = [] parent_page_types = ['tests.BusinessIndex', BusinessSubIndex] class BusinessNowherePage(Page): """ Not allowed to be placed anywhere """ parent_page_types = [] class TaggedPageTag(TaggedItemBase): content_object = ParentalKey('tests.TaggedPage', related_name='tagged_items', on_delete=models.CASCADE) class TaggedPage(Page): tags = ClusterTaggableManager(through=TaggedPageTag, blank=True) TaggedPage.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('tags'), ] class SingletonPage(Page): @classmethod def can_create_at(cls, parent): # You can only create one of these! return super(SingletonPage, cls).can_create_at(parent) \ and not cls.objects.exists() class SingletonPageViaMaxCount(Page): max_count = 1 class PageChooserModel(models.Model): page = models.ForeignKey('wagtailcore.Page', help_text='help text', on_delete=models.CASCADE) class EventPageChooserModel(models.Model): page = models.ForeignKey('tests.EventPage', help_text='more help text', on_delete=models.CASCADE) class SnippetChooserModel(models.Model): advert = models.ForeignKey(Advert, help_text='help text', on_delete=models.CASCADE) panels = [ SnippetChooserPanel('advert'), ] class SnippetChooserModelWithCustomPrimaryKey(models.Model): advertwithcustomprimarykey = models.ForeignKey(AdvertWithCustomPrimaryKey, help_text='help text', on_delete=models.CASCADE) panels = [ SnippetChooserPanel('advertwithcustomprimarykey'), ] class CustomImage(AbstractImage): caption = models.CharField(max_length=255, blank=True) fancy_caption = RichTextField(blank=True) not_editable_field = models.CharField(max_length=255, blank=True) admin_form_fields = Image.admin_form_fields + ( 'caption', 'fancy_caption', ) class CustomRendition(AbstractRendition): image = models.ForeignKey(CustomImage, related_name='renditions', on_delete=models.CASCADE) class Meta: unique_together = ( ('image', 'filter_spec', 'focal_point_key'), ) class CustomDocument(AbstractDocument): description = models.TextField(blank=True) fancy_description = RichTextField(blank=True) admin_form_fields = Document.admin_form_fields + ( 'description', 'fancy_description' ) class StreamModel(models.Model): body = StreamField([ ('text', CharBlock()), ('rich_text', RichTextBlock()), ('image', ImageChooserBlock()), ]) class ExtendedImageChooserBlock(ImageChooserBlock): """ Example of Block with custom get_api_representation method. If the request has an 'extended' query param, it returns a dict of id and title, otherwise, it returns the default value. """ def get_api_representation(self, value, context=None): image_id = super().get_api_representation(value, context=context) if 'request' in context and context['request'].query_params.get('extended', False): return { 'id': image_id, 'title': value.title } return image_id class StreamPage(Page): body = StreamField([ ('text', CharBlock()), ('rich_text', RichTextBlock()), ('image', ExtendedImageChooserBlock()), ('product', StructBlock([ ('name', CharBlock()), ('price', CharBlock()), ])), ('raw_html', RawHTMLBlock()), ]) api_fields = ('body',) content_panels = [ FieldPanel('title'), StreamFieldPanel('body'), ] class DefaultStreamPage(Page): body = StreamField([ ('text', CharBlock()), ('rich_text', RichTextBlock()), ('image', ImageChooserBlock()), ], default='') content_panels = [ FieldPanel('title'), StreamFieldPanel('body'), ] class MTIBasePage(Page): is_creatable = False class Meta: verbose_name = "MTI Base page" class MTIChildPage(MTIBasePage): # Should be creatable by default, no need to set anything pass class AbstractPage(Page): class Meta: abstract = True @register_setting class TestSetting(BaseSetting): title = models.CharField(max_length=100) email = models.EmailField(max_length=50) @register_setting(icon="tag") class IconSetting(BaseSetting): pass class NotYetRegisteredSetting(BaseSetting): pass @register_setting class FileUploadSetting(BaseSetting): file = models.FileField() class BlogCategory(models.Model): name = models.CharField(unique=True, max_length=80) class BlogCategoryBlogPage(models.Model): category = models.ForeignKey(BlogCategory, related_name="+", on_delete=models.CASCADE) page = ParentalKey('ManyToManyBlogPage', related_name='categories', on_delete=models.CASCADE) panels = [ FieldPanel('category'), ] class ManyToManyBlogPage(Page): """ A page type with two different kinds of M2M relation. We don't formally support these, but we don't want them to cause hard breakages either. """ body = RichTextField(blank=True) adverts = models.ManyToManyField(Advert, blank=True) blog_categories = models.ManyToManyField( BlogCategory, through=BlogCategoryBlogPage, blank=True) # make first_published_at editable on this page model settings_panels = Page.settings_panels + [ FieldPanel('first_published_at'), ] class OneToOnePage(Page): """ A Page containing a O2O relation. """ body = RichTextBlock(blank=True) page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE) class GenericSnippetPage(Page): """ A page containing a reference to an arbitrary snippet (or any model for that matter) linked by a GenericForeignKey """ snippet_content_type = models.ForeignKey(ContentType, on_delete=models.SET_NULL, null=True) snippet_object_id = models.PositiveIntegerField(null=True) snippet_content_object = GenericForeignKey('snippet_content_type', 'snippet_object_id') class CustomImageFilePath(AbstractImage): def get_upload_to(self, filename): """Create a path that's file-system friendly. By hashing the file's contents we guarantee an equal distribution of files within our root directories. This also gives us a better chance of uploading images with the same filename, but different contents - this isn't guaranteed as we're only using the first three characters of the checksum. """ original_filepath = super().get_upload_to(filename) folder_name, filename = original_filepath.split(os.path.sep) # Ensure that we consume the entire file, we can't guarantee that # the stream has not be partially (or entirely) consumed by # another process original_position = self.file.tell() self.file.seek(0) hash256 = hashlib.sha256() while True: data = self.file.read(256) if not data: break hash256.update(data) checksum = hash256.hexdigest() self.file.seek(original_position) return os.path.join(folder_name, checksum[:3], filename) class CustomPageQuerySet(PageQuerySet): def about_spam(self): return self.filter(title__contains='spam') CustomManager = PageManager.from_queryset(CustomPageQuerySet) class CustomManagerPage(Page): objects = CustomManager() class MyBasePage(Page): """ A base Page model, used to set site-wide defaults and overrides. """ objects = CustomManager() class Meta: abstract = True class MyCustomPage(MyBasePage): pass class ValidatedPage(Page): foo = models.CharField(max_length=255) base_form_class = ValidatedPageForm content_panels = Page.content_panels + [ FieldPanel('foo'), ] class DefaultRichTextFieldPage(Page): body = RichTextField() content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('body'), ] class DefaultRichBlockFieldPage(Page): body = StreamField([ ('rich_text', RichTextBlock()), ]) content_panels = Page.content_panels + [ StreamFieldPanel('body') ] class CustomRichTextFieldPage(Page): body = RichTextField(editor='custom') content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('body'), ] class CustomRichBlockFieldPage(Page): body = StreamField([ ('rich_text', RichTextBlock(editor='custom')), ]) content_panels = [ FieldPanel('title', classname="full title"), StreamFieldPanel('body'), ] class RichTextFieldWithFeaturesPage(Page): body = RichTextField(features=['quotation', 'embed', 'made-up-feature']) content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('body'), ] # a page that only contains RichTextField within an InlinePanel, # to test that the inline child's form media gets pulled through class SectionedRichTextPageSection(Orderable): page = ParentalKey('tests.SectionedRichTextPage', related_name='sections', on_delete=models.CASCADE) body = RichTextField() panels = [ FieldPanel('body') ] class SectionedRichTextPage(Page): content_panels = [ FieldPanel('title', classname="full title"), InlinePanel('sections') ] class InlineStreamPageSection(Orderable): page = ParentalKey('tests.InlineStreamPage', related_name='sections', on_delete=models.CASCADE) body = StreamField([ ('text', CharBlock()), ('rich_text', RichTextBlock()), ('image', ImageChooserBlock()), ]) panels = [ StreamFieldPanel('body') ] class InlineStreamPage(Page): content_panels = [ FieldPanel('title', classname="full title"), InlinePanel('sections') ] class TableBlockStreamPage(Page): table = StreamField([('table', TableBlock())]) content_panels = [StreamFieldPanel('table')] class UserProfile(models.Model): # Wagtail's schema must be able to coexist alongside a custom UserProfile model user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) favourite_colour = models.CharField(max_length=255) class PanelSettings(TestSetting): panels = [ FieldPanel('title') ] class TabbedSettings(TestSetting): edit_handler = TabbedInterface([ ObjectList([ FieldPanel('title') ], heading='First tab'), ObjectList([ FieldPanel('email') ], heading='Second tab'), ]) class AlwaysShowInMenusPage(Page): show_in_menus_default = True # test for AddField migrations on StreamFields using various default values class AddedStreamFieldWithoutDefaultPage(Page): body = StreamField([ ('title', CharBlock()) ]) class AddedStreamFieldWithEmptyStringDefaultPage(Page): body = StreamField([ ('title', CharBlock()) ], default='') class AddedStreamFieldWithEmptyListDefaultPage(Page): body = StreamField([ ('title', CharBlock()) ], default=[]) # test customising edit handler definitions on a per-request basis class PerUserContentPanels(ObjectList): def _replace_children_with_per_user_config(self): self.children = self.instance.basic_content_panels if self.request.user.is_superuser: self.children = self.instance.superuser_content_panels self.children = [ child.bind_to(model=self.model, instance=self.instance, request=self.request, form=self.form) for child in self.children] def on_instance_bound(self): # replace list of children when both instance and request are available if self.request: self._replace_children_with_per_user_config() else: super().on_instance_bound() def on_request_bound(self): # replace list of children when both instance and request are available if self.instance: self._replace_children_with_per_user_config() else: super().on_request_bound() class PerUserPageMixin: basic_content_panels = [] superuser_content_panels = [] @cached_classmethod def get_edit_handler(cls): tabs = [] if cls.basic_content_panels and cls.superuser_content_panels: tabs.append(PerUserContentPanels(heading='Content')) if cls.promote_panels: tabs.append(ObjectList(cls.promote_panels, heading='Promote')) if cls.settings_panels: tabs.append(ObjectList(cls.settings_panels, heading='Settings', classname='settings')) edit_handler = TabbedInterface(tabs, base_form_class=cls.base_form_class) return edit_handler.bind_to(model=cls) class SecretPage(PerUserPageMixin, Page): boring_data = models.TextField() secret_data = models.TextField() basic_content_panels = Page.content_panels + [ FieldPanel('boring_data'), ] superuser_content_panels = basic_content_panels + [ FieldPanel('secret_data'), ] class SimpleParentPage(Page): # `BusinessIndex` has been added to bring it in line with other tests subpage_types = ['tests.SimpleChildPage', BusinessIndex] class SimpleChildPage(Page): # `Page` has been added to bring it in line with other tests parent_page_types = ['tests.SimpleParentPage', Page] max_count_per_parent = 1 class PersonPage(Page): first_name = models.CharField( max_length=255, verbose_name='First Name', ) last_name = models.CharField( max_length=255, verbose_name='Last Name', ) content_panels = Page.content_panels + [ MultiFieldPanel([ FieldPanel('first_name'), FieldPanel('last_name'), ], 'Person'), InlinePanel('addresses', label='Address'), ] class Meta: verbose_name = 'Person' verbose_name_plural = 'Persons' class Address(index.Indexed, ClusterableModel, Orderable): address = models.CharField( max_length=255, verbose_name='Address', ) tags = ClusterTaggableManager( through='tests.AddressTag', blank=True, ) person = ParentalKey( to='tests.PersonPage', related_name='addresses', verbose_name='Person' ) panels = [ FieldPanel('address'), FieldPanel('tags'), ] class Meta: verbose_name = 'Address' verbose_name_plural = 'Addresses' class AddressTag(TaggedItemBase): content_object = ParentalKey( to='tests.Address', on_delete=models.CASCADE, related_name='tagged_items' )
./CrossVul/dataset_final_sorted/CWE-79/py/good_3890_3
crossvul-python_data_bad_5790_0
from __future__ import with_statement import os import re import urllib from django.conf import settings from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.core import mail from django.core.exceptions import SuspiciousOperation from django.core.urlresolvers import reverse, NoReverseMatch from django.http import QueryDict from django.utils.encoding import force_unicode from django.utils.html import escape from django.test import TestCase from django.test.utils import override_settings from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm, SetPasswordForm, PasswordResetForm) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def setUp(self): self.old_LANGUAGES = settings.LANGUAGES self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE settings.LANGUAGES = (('en', 'English'),) settings.LANGUAGE_CODE = 'en' self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) def tearDown(self): settings.LANGUAGES = self.old_LANGUAGES settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) def assertContainsEscaped(self, response, text, **kwargs): return self.assertContains(response, escape(force_unicode(text)), **kwargs) AuthViewsTestCase = override_settings(USE_TZ=False)(AuthViewsTestCase) class AuthViewNamedURLTests(AuthViewsTestCase): urls = 'django.contrib.auth.urls' def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb36': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown']) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) @override_settings(ALLOWED_HOSTS=['adminsite.com']) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://adminsite.com" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with self.assertRaises(SuspiciousOperation): self.client.post('/password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with self.assertRaises(SuspiciousOperation): self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertEqual(response.status_code, 200) self.assertTrue("Please enter your new password" in response.content) def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertTrue("The password reset link was invalid" in response.content) def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login']) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertEqual(response.status_code, 200) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) def test_password_change_done_fails(self): with self.settings(LOGIN_URL='/login/'): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/')) class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) class LoginURLSettings(AuthViewsTestCase): def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertEqual(200, response.status_code) self.assertTrue('Logged out' in response.content) self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urllib.quote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urllib.quote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out()
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5790_0
crossvul-python_data_good_5788_0
import os import re from django.conf import global_settings, settings from django.contrib.sites.models import Site, RequestSite from django.contrib.auth.models import User from django.core import mail from django.core.exceptions import SuspiciousOperation from django.core.urlresolvers import reverse, NoReverseMatch from django.http import QueryDict, HttpRequest from django.utils.encoding import force_text from django.utils.html import escape from django.utils.http import urlquote from django.utils._os import upath from django.test import TestCase from django.test.utils import override_settings from django.middleware.csrf import CsrfViewMiddleware from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm, SetPasswordForm, PasswordResetForm) from django.contrib.auth.tests.utils import skipIfCustomUser from django.contrib.auth.views import login as login_view @override_settings( LANGUAGES=( ('en', 'English'), ), LANGUAGE_CODE='en', TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS, TEMPLATE_DIRS=( os.path.join(os.path.dirname(upath(__file__)), 'templates'), ), USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',), ) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ fixtures = ['authtestdata.json'] urls = 'django.contrib.auth.tests.urls' def login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL)) self.assertTrue(SESSION_KEY in self.client.session) def assertContainsEscaped(self, response, text, **kwargs): return self.assertContains(response, escape(force_text(text)), **kwargs) @skipIfCustomUser class AuthViewNamedURLTests(AuthViewsTestCase): urls = 'django.contrib.auth.urls' def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb36': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) @skipIfCustomUser class PasswordResetTest(AuthViewsTestCase): def test_email_not_found(self): "Error is raised if the provided email address isn't currently registered" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'}) self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown']) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("staffmember@example.com", mail.outbox[0].from_email) @override_settings(ALLOWED_HOSTS=['adminsite.com']) def test_admin_reset(self): "If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override." response = self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='adminsite.com' ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertTrue("http://adminsite.com" in mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with self.assertRaises(SuspiciousOperation): self.client.post('/password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with self.assertRaises(SuspiciousOperation): self.client.post('/admin_password_reset/', {'email': 'staffmember@example.com'}, HTTP_HOST='www.example:dr.frankenstein@evil.tld' ) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_user(self): # Ensure that we get a 200 response for a non-existant user, not a 404 response = self.client.get('/reset/123456-1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_overflow_user(self): # Ensure that we get a 200 response for a base36 user id that overflows int response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying # to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # It redirects us to a 'complete' page: self.assertEqual(response.status_code, 302) # Check the password has been changed u = User.objects.get(email='staffmember@example.com') self.assertTrue(u.check_password("anewpassword")) # Check we can't use the link again response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) @override_settings(AUTH_USER_MODEL='auth.CustomUser') class CustomUserPasswordResetTest(AuthViewsTestCase): fixtures = ['custom_user.json'] def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertTrue(urlmatch is not None, "No URL found in sent email") return urlmatch.group(), urlmatch.groups()[0] def test_confirm_valid_custom_user(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") @skipIfCustomUser class ChangePasswordTest(AuthViewsTestCase): def fail_login(self, password='password'): response = self.client.post('/login/', { 'username': 'testclient', 'password': password, }) self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login'] % { 'username': User._meta.get_field('username').verbose_name }) def logout(self): response = self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/password_change/done/')) def test_password_change_done_fails(self): with self.settings(LOGIN_URL='/login/'): response = self.client.get('/password_change/done/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/')) @skipIfCustomUser class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('django.contrib.auth.views.login')) self.assertEqual(response.status_code, 200) if Site._meta.installed: site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertTrue(isinstance(response.context['form'], AuthenticationForm), 'Login form is not an AuthenticationForm') def test_security_check(self, password='password'): login_url = reverse('django.contrib.auth.views.login') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com', 'javascript:alert("XSS")'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', 'HTTPS:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': password, }) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) def test_login_csrf_rotate(self, password='password'): """ Makes sure that a login rotates the currently-used CSRF token. """ # Do a GET to establish a CSRF token # TestClient isn't used here as we're testing middleware, essentially. req = HttpRequest() CsrfViewMiddleware().process_view(req, login_view, (), {}) req.META["CSRF_COOKIE_USED"] = True resp = login_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None) token1 = csrf_cookie.coded_value # Prepare the POST request req = HttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = token1 req.method = "POST" req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1} req.REQUEST = req.POST # Use POST request to log in SessionMiddleware().process_request(req) CsrfViewMiddleware().process_view(req, login_view, (), {}) req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view req.META["SERVER_PORT"] = 80 req.META["CSRF_COOKIE_USED"] = True resp = login_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None) token2 = csrf_cookie.coded_value # Check the CSRF token switched self.assertNotEqual(token1, token2) @skipIfCustomUser class LoginURLSettings(AuthViewsTestCase): def setUp(self): super(LoginURLSettings, self).setUp() self.old_LOGIN_URL = settings.LOGIN_URL def tearDown(self): super(LoginURLSettings, self).tearDown() settings.LOGIN_URL = self.old_LOGIN_URL def get_login_required_url(self, login_url): settings.LOGIN_URL = login_url response = self.client.get('/login_required/') self.assertEqual(response.status_code, 302) return response['Location'] def test_standard_login_url(self): login_url = '/login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver%s?%s' % (login_url, querystring.urlencode('/'))) def test_remote_login_url(self): login_url = 'http://remote.example.com/login' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_https_login_url(self): login_url = 'https:///login/' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) def test_login_url_with_querystring(self): login_url = '/login/?pretty=1' login_required_url = self.get_login_required_url(login_url) querystring = QueryDict('pretty=1', mutable=True) querystring['next'] = '/login_required/' self.assertEqual(login_required_url, 'http://testserver/login/?%s' % querystring.urlencode('/')) def test_remote_login_url_with_next_querystring(self): login_url = 'http://remote.example.com/login/' login_required_url = self.get_login_required_url('%s?next=/default/' % login_url) querystring = QueryDict('', mutable=True) querystring['next'] = 'http://testserver/login_required/' self.assertEqual(login_required_url, '%s?%s' % (login_url, querystring.urlencode('/'))) @skipIfCustomUser class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertTrue(SESSION_KEY not in self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertContains(response, 'Logged out') self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertTrue('site' in response.context) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) response = self.client.get('/logout/next_page/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/login/')) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertEqual(response.status_code, 302) self.assertTrue(response['Location'].endswith('/somewhere/')) self.confirm_logged_out() def test_security_check(self, password='password'): logout_url = reverse('django.contrib.auth.views.logout') # Those URLs should not pass the security check for bad_url in ('http://example.com', 'https://example.com', 'ftp://exampel.com', '//example.com', 'javascript:alert("XSS")'): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': urlquote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertFalse(bad_url in response['Location'], "%s should be blocked" % bad_url) self.confirm_logged_out() # These URLs *should* still pass the security check for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https:///', 'HTTPS:///', '//testserver/', '/url%20with%20spaces/'): # see ticket #12534 safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': urlquote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertTrue(good_url in response['Location'], "%s should be allowed" % good_url) self.confirm_logged_out() @skipIfCustomUser class ChangelistTests(AuthViewsTestCase): urls = 'django.contrib.auth.tests.urls_admin' # #20078 - users shouldn't be allowed to guess password hashes via # repeated password__startswith queries. def test_changelist_disallows_password_lookups(self): # Make me a superuser before loging in. User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True) self.login() # A lookup that tries to filter on password isn't OK with self.assertRaises(SuspiciousOperation): response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
./CrossVul/dataset_final_sorted/CWE-79/py/good_5788_0
crossvul-python_data_bad_5190_1
from __future__ import unicode_literals import re import sys import types from django.conf import settings from django.core.urlresolvers import Resolver404, resolve from django.http import HttpResponse, HttpResponseNotFound from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import force_escape, pprint from django.utils import lru_cache, six, timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, smart_text from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. DEBUG_ENGINE = Engine(debug=True) HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE') CLEANSED_SUBSTITUTE = '********************' class CallableSettingWrapper(object): """ Object to wrap callable appearing in settings * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def cleanse_setting(key, value): """Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if HIDDEN_SETTINGS.search(key): cleansed = CLEANSED_SUBSTITUTE else: if isinstance(value, dict): cleansed = {k: cleanse_setting(k, v) for k, v in value.items()} else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): # For fixing #21345 and #23070 cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(): "Returns a dictionary of the settings module, with sensitive settings blurred out." settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = cleanse_setting(k, getattr(settings, k)) return settings_dict def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = ExceptionReporter(request, exc_type, exc_value, tb) if request.is_ajax(): text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain') else: html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') @lru_cache.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) class ExceptionReporterFilter(object): """ Base for all exception reporter filter classes. All overridable hooks contain lenient default behaviors. """ def get_post_parameters(self, request): if request is None: return {} else: return request.POST def get_traceback_frame_variables(self, request, tb_frame): return list(tb_frame.f_locals.items()) class SafeExceptionReporterFilter(ExceptionReporterFilter): """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replaces the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = CLEANSED_SUBSTITUTE return multivaluedict def get_post_parameters(self, request): """ Replaces the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k, v in cleansed.items(): cleansed[k] = CLEANSED_SUBSTITUTE return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = CLEANSED_SUBSTITUTE return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy # MultiValueDicts will have a return value. is_multivalue_dict = isinstance(value, MultiValueDict) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_multivalue_dict: # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replaces the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name, value in tb_frame.f_locals.items(): cleansed[name] = CLEANSED_SUBSTITUTE else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = CLEANSED_SUBSTITUTE else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = CLEANSED_SUBSTITUTE cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE return cleansed.items() class ExceptionReporter(object): """ A class to organize and coordinate reporting on exceptions. """ def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.postmortem = None # Handle deprecated string exceptions if isinstance(self.exc_type, six.string_types): self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type) self.exc_type = type(self.exc_value) def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # The force_escape filter assume unicode, make sure that works if isinstance(v, six.binary_type): v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > 4096: v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, force_escape(v))) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_text( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'filtered_POST': self.filter.get_post_parameters(self.request), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = smart_text(self.exc_value, errors='replace') if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): "Return HTML version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): "Return plain text version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except (OSError, IOError): pass if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], six.binary_type): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [six.text_type(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): def explicit_or_implicit_cause(exc_value): explicit = getattr(exc_value, '__cause__', None) implicit = getattr(exc_value, '__context__', None) return explicit or implicit # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = explicit_or_implicit_cause(exc_value) frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception (always in Python 2, # sometimes in Python 3), take the traceback from self.tb (Python 2 # doesn't have a __traceback__ attribute on Exception) exc_value = exceptions.pop() tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__ while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is not None: frames.append({ 'exc_cause': explicit_or_implicit_cause(exc_value), 'exc_cause_explicit': getattr(exc_value, '__cause__', True), 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) # If the traceback for current exception is consumed, try the # other exception. if six.PY2: tb = tb.tb_next elif not tb.tb_next and exceptions: exc_value = exceptions.pop() tb = exc_value.__traceback__ else: tb = tb.tb_next return frames def format_exception(self): """ Return the same data as from traceback.format_exception. """ import traceback frames = self.get_traceback_frames() tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames] list = ['Traceback (most recent call last):\n'] list += traceback.format_list(tb) list += traceback.format_exception_only(self.exc_type, self.exc_value) return list def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried # empty URLconf or (request.path == '/' and len(tried) == 1 # default URLconf and len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Resolver404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE) c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): "Create an empty URLconf 404 error response." t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE) c = Context({ "title": _("Welcome to Django"), "heading": _("It worked!"), "subheading": _("Congratulations on your first Django-powered page."), "instructions": _("Of course, you haven't actually done any work yet. " "Next, start your first app by running <code>python manage.py startapp [app_label]</code>."), "explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your " "Django settings file and you haven't configured any URLs. Get to work!"), }) return HttpResponse(t.render(c), content_type='text/html') # # Templates are embedded in the file so that we know the error handler will # always work even if the template loader is broken. # TECHNICAL_500_TEMPLATE = (""" <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } code, pre { font-size: 100%; white-space: pre-wrap; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } table.vars { margin:5px 0 2px 40px; } table.vars td, table.req td { font-family:monospace; } table td.code { width:100%; } table td.code pre { overflow:hidden; } table.source th { color:#666; } table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; } ul.traceback { list-style-type:none; color: #222; } ul.traceback li.frame { padding-bottom:1em; color:#666; } ul.traceback li.user { background-color:#e0e0e0; color:#000 } div.context { padding:10px 0; overflow:hidden; } div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; } div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; } div.context ol li pre { display:inline; } div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; } div.context ol.context-line li span { position:absolute; right:32px; } .user div.context ol.context-line li { background-color:#bbb; color:#000; } .user div.context ol li { color:#666; } div.commands { margin-left: 40px; } div.commands a { color:#555; text-decoration:none; } .user div.commands a { color: black; } #summary { background: #ffc; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #template, #template-not-exist { background:#f6f6f6; } #template-not-exist ul { margin: 0 0 10px 20px; } #template-not-exist .postmortem-section { margin-bottom: 3px; } #unicode-hint { background:#eee; } #traceback { background:#eee; } #requestinfo { background:#f6f6f6; padding-left:120px; } #summary table { border:none; background:transparent; } #requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; } #requestinfo h3 { margin-bottom:-1em; } .error { background: #ffc; } .specific { color:#cc3300; font-weight:bold; } h2 span.commands { font-size:.7em;} span.commands a:link {color:#5E5694;} pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; } .append-bottom { margin-bottom: 10px; } </style> {% if not is_email %} <script type="text/javascript"> //<!-- function getElementsByClassName(oElm, strTagName, strClassName){ // Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com var arrElements = (strTagName == "*" && document.all)? document.all : oElm.getElementsByTagName(strTagName); var arrReturnElements = new Array(); strClassName = strClassName.replace(/\-/g, "\\-"); var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)"); var oElement; for(var i=0; i<arrElements.length; i++){ oElement = arrElements[i]; if(oRegExp.test(oElement.className)){ arrReturnElements.push(oElement); } } return (arrReturnElements) } function hideAll(elems) { for (var e = 0; e < elems.length; e++) { elems[e].style.display = 'none'; } } window.onload = function() { hideAll(getElementsByClassName(document, 'table', 'vars')); hideAll(getElementsByClassName(document, 'ol', 'pre-context')); hideAll(getElementsByClassName(document, 'ol', 'post-context')); hideAll(getElementsByClassName(document, 'div', 'pastebin')); } function toggle() { for (var i = 0; i < arguments.length; i++) { var e = document.getElementById(arguments[i]); if (e) { e.style.display = e.style.display == 'none' ? 'block': 'none'; } } return false; } function varToggle(link, id) { toggle('v' + id); var s = link.getElementsByTagName('span')[0]; var uarr = String.fromCharCode(0x25b6); var darr = String.fromCharCode(0x25bc); s.innerHTML = s.innerHTML == uarr ? darr : uarr; return false; } function switchPastebinFriendly(link) { s1 = "Switch to copy-and-paste view"; s2 = "Switch back to interactive view"; link.innerHTML = link.innerHTML.trim() == s1 ? s2: s1; toggle('browserTraceback', 'pastebinTraceback'); return false; } //--> </script> {% endif %} </head> <body> <div id="summary"> <h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</h1> <pre class="exception_value">""" """{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}""" """</pre> <table class="meta"> {% if request %} <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.get_raw_uri|escape }}</td> </tr> {% endif %} <tr> <th>Django Version:</th> <td>{{ django_version_info }}</td> </tr> {% if exception_type %} <tr> <th>Exception Type:</th> <td>{{ exception_type }}</td> </tr> {% endif %} {% if exception_type and exception_value %} <tr> <th>Exception Value:</th> <td><pre>{{ exception_value|force_escape }}</pre></td> </tr> {% endif %} {% if lastframe %} <tr> <th>Exception Location:</th> <td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td> </tr> {% endif %} <tr> <th>Python Executable:</th> <td>{{ sys_executable|escape }}</td> </tr> <tr> <th>Python Version:</th> <td>{{ sys_version_info }}</td> </tr> <tr> <th>Python Path:</th> <td><pre>{{ sys_path|pprint }}</pre></td> </tr> <tr> <th>Server time:</th> <td>{{server_time|date:"r"}}</td> </tr> </table> </div> {% if unicode_hint %} <div id="unicode-hint"> <h2>Unicode error hint</h2> <p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p> </div> {% endif %} {% if template_does_not_exist %} <div id="template-not-exist"> <h2>Template-loader postmortem</h2> {% if postmortem %} <p class="append-bottom">Django tried loading these templates, in this order:</p> {% for entry in postmortem %} <p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p> <ul> {% if entry.tried %} {% for attempt in entry.tried %} <li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li> {% endfor %} </ul> {% else %} <li>This engine did not provide a list of tried templates.</li> {% endif %} </ul> {% endfor %} {% else %} <p>No templates were found because your 'TEMPLATES' setting is not configured.</p> {% endif %} </div> {% endif %} {% if template_info %} <div id="template"> <h2>Error during template rendering</h2> <p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p> <h3>{{ template_info.message }}</h3> <table class="source{% if template_info.top %} cut-top{% endif %} {% if template_info.bottom != template_info.total %} cut-bottom{% endif %}"> {% for source_line in template_info.source_lines %} {% if source_line.0 == template_info.line %} <tr class="error"><th>{{ source_line.0 }}</th> <td>{{ template_info.before }}""" """<span class="specific">{{ template_info.during }}</span>""" """{{ template_info.after }}</td> </tr> {% else %} <tr><th>{{ source_line.0 }}</th> <td>{{ source_line.1 }}</td></tr> {% endif %} {% endfor %} </table> </div> {% endif %} {% if frames %} <div id="traceback"> <h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);"> Switch to copy-and-paste view</a></span>{% endif %} </h2> {% autoescape off %} <div id="browserTraceback"> <ul class="traceback"> {% for frame in frames %} {% ifchanged frame.exc_cause %}{% if frame.exc_cause %} <li><h3> {% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %} </h3></li> {% endif %}{% endifchanged %} <li class="frame {{ frame.type }}"> <code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code> {% if frame.context_line %} <div class="context" id="c{{ frame.id }}"> {% if frame.pre_context and not is_email %} <ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}"> {% for line in frame.pre_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} <ol start="{{ frame.lineno }}" class="context-line"> <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre> """ """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol> {% if frame.post_context and not is_email %} <ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}"> {% for line in frame.post_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} </div> {% endif %} {% if frame.vars %} <div class="commands"> {% if is_email %} <h2>Local Vars</h2> {% else %} <a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>&#x25b6;</span> Local vars</a> {% endif %} </div> <table class="vars" id="v{{ frame.id }}"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in frame.vars|dictsort:"0" %} <tr> <td>{{ var.0|force_escape }}</td> <td class="code"><pre>{{ var.1 }}</pre></td> </tr> {% endfor %} </tbody> </table> {% endif %} </li> {% endfor %} </ul> </div> {% endautoescape %} <form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post"> {% if not is_email %} <div id="pastebinTraceback" class="pastebin"> <input type="hidden" name="language" value="PythonConsole"> <input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}"> <input type="hidden" name="source" value="Django Dpaste Agent"> <input type="hidden" name="poster" value="Django"> <textarea name="content" id="traceback_area" cols="140" rows="25"> Environment: {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.get_raw_uri|escape }} {% endif %} Django Version: {{ django_version_info }} Python Version: {{ sys_version_info }} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader postmortem {% if postmortem %}Django tried loading these templates, in this order: {% for entry in postmortem %} Using engine {{ entry.backend.name }}: {% if entry.tried %}{% for attempt in entry.tried %}""" """ * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }}) {% endfor %}{% else %} This engine did not provide a list of tried templates. {% endif %}{% endfor %} {% else %}No templates were found because your 'TEMPLATES' setting is not configured. {% endif %}{% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}""" "{% for source_line in template_info.source_lines %}" "{% if source_line.0 == template_info.line %}" " {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}" "{% else %}" " {{ source_line.0 }} : {{ source_line.1 }}" """{% endif %}{% endfor %}{% endif %} Traceback:{% for frame in frames %} {% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %}{% endif %}{% endifchanged %} File "{{ frame.filename|escape }}" in {{ frame.function|escape }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %} Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %} Exception Value: {{ exception_value|force_escape }} </textarea> <br><br> <input type="submit" value="Share this traceback on a public website"> </div> </form> </div> {% endif %} {% endif %} <div id="requestinfo"> <h2>Request information</h2> {% if request %} <h3 id="get-info">GET</h3> {% if request.GET %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.GET.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No GET data</p> {% endif %} <h3 id="post-info">POST</h3> {% if filtered_POST %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in filtered_POST.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No POST data</p> {% endif %} <h3 id="files-info">FILES</h3> {% if request.FILES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.FILES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No FILES data</p> {% endif %} <h3 id="cookie-info">COOKIES</h3> {% if request.COOKIES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.COOKIES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No cookie data</p> {% endif %} <h3 id="meta-info">META</h3> <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.META.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>Request data not supplied</p> {% endif %} <h3 id="settings-info">Settings</h3> <h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4> <table class="req"> <thead> <tr> <th>Setting</th> <th>Value</th> </tr> </thead> <tbody> {% for var in settings.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> </div> {% if not is_email %} <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard page generated by the handler for this status code. </p> </div> {% endif %} </body> </html> """) TECHNICAL_500_TEXT_TEMPLATE = ("""""" """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %} {% firstof exception_value 'No exception message supplied' %} {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.get_raw_uri }}{% endif %} Django Version: {{ django_version_info }} Python Executable: {{ sys_executable }} Python Version: {{ sys_version_info }} Python Path: {{ sys_path }} Server time: {{server_time|date:"r"}} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader postmortem {% if postmortem %}Django tried loading these templates, in this order: {% for entry in postmortem %} Using engine {{ entry.backend.name }}: {% if entry.tried %}{% for attempt in entry.tried %}""" """ * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }}) {% endfor %}{% else %} This engine did not provide a list of tried templates. {% endif %}{% endfor %} {% else %}No templates were found because your 'TEMPLATES' setting is not configured. {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }} {% for source_line in template_info.source_lines %}""" "{% if source_line.0 == template_info.line %}" " {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}" "{% else %}" " {{ source_line.0 }} : {{ source_line.1 }}" """{% endif %}{% endfor %}{% endif %}{% if frames %} Traceback:""" "{% for frame in frames %}" "{% ifchanged frame.exc_cause %}" " {% if frame.exc_cause %}" """ {% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %} {% endif %} {% endifchanged %} File "{{ frame.filename }}" in {{ frame.function }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %} {% endfor %} {% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %} {% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %} {% if request %}Request information: GET:{% for k, v in request.GET.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %} POST:{% for k, v in filtered_POST.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %} FILES:{% for k, v in request.FILES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %} COOKIES:{% for k, v in request.COOKIES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %} META:{% for k, v in request.META.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% else %}Request data not supplied {% endif %} Settings: Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% if not is_email %} You're seeing this error because you have DEBUG = True in your Django settings file. Change that to False, and Django will display a standard page generated by the handler for this status code. {% endif %} """) TECHNICAL_404_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <title>Page not found at {{ request.path_info|escape }}</title> <meta name="robots" content="NONE,NOARCHIVE"> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } table { border:none; border-collapse: collapse; width:100%; } td, th { vertical-align:top; padding:2px 3px; } th { width:12em; text-align:right; color:#666; padding-right:.5em; } #info { background:#f6f6f6; } #info ol { margin: 0.5em 4em; } #info ol li { font-family: monospace; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>Page not found <span>(404)</span></h1> <table class="meta"> <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% if raising_view_name %} <tr> <th>Raised by:</th> <td>{{ raising_view_name }}</td> </tr> {% endif %} </table> </div> <div id="info"> {% if urlpatterns %} <p> Using the URLconf defined in <code>{{ urlconf }}</code>, Django tried these URL patterns, in this order: </p> <ol> {% for pattern in urlpatterns %} <li> {% for pat in pattern %} {{ pat.regex.pattern }} {% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %} {% endfor %} </li> {% endfor %} </ol> <p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p> {% else %} <p>{{ reason }}</p> {% endif %} </div> <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard 404 page. </p> </div> </body> </html> """ DEFAULT_URLCONF_TEMPLATE = """ <!DOCTYPE html> <html lang="en"><head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } #summary { background: #e0ebff; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #instructions { background:#f6f6f6; } #summary table { border:none; background:transparent; } </style> </head> <body> <div id="summary"> <h1>{{ heading }}</h1> <h2>{{ subheading }}</h2> </div> <div id="instructions"> <p> {{ instructions|safe }} </p> </div> <div id="explanation"> <p> {{ explanation|safe }} </p> </div> </body></html> """
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5190_1
crossvul-python_data_bad_1735_1
from Products.CMFCore.URLTool import URLTool as BaseTool from Products.CMFCore.utils import getToolByName from AccessControl import ClassSecurityInfo from App.class_init import InitializeClass from Products.CMFPlone.PloneBaseTool import PloneBaseTool from posixpath import normpath from urlparse import urlparse, urljoin import re class URLTool(PloneBaseTool, BaseTool): meta_type = 'Plone URL Tool' security = ClassSecurityInfo() toolicon = 'skins/plone_images/link_icon.png' security.declarePublic('isURLInPortal') def isURLInPortal(self, url, context=None): """ Check if a given url is on the same host and contains the portal path. Used to ensure that login forms can determine relevant referrers (i.e. in portal). Also return true for some relative urls if context is passed in to allow for url parsing. When context is not provided, assume that relative urls are in the portal. It is assumed that http://portal is the same portal as https://portal. External sites listed in 'allow_external_login_sites' of site_properties are also considered within the portal to allow for single sign on. """ # sanitize url url = re.sub('^[\x00-\x20]+', '', url).strip() p_url = self() _, u_host, u_path, _, _, _ = urlparse(url) if not u_host and not u_path.startswith('/'): if context is None: return True # old behavior if not context.isPrincipiaFolderish: useurl = context.aq_parent.absolute_url() else: useurl = context.absolute_url() else: useurl = p_url # when u_path.startswith('/') if not useurl.endswith('/'): useurl += '/' # urljoin to current url to get an absolute path _, u_host, u_path, _, _, _ = urlparse(urljoin(useurl, url)) # normalise to end with a '/' so /foobar is not considered within /foo if not u_path: u_path = '/' else: u_path = normpath(u_path) if not u_path.endswith('/'): u_path += '/' _, host, path, _, _, _ = urlparse(p_url) if not path.endswith('/'): path += '/' if host == u_host and u_path.startswith(path): return True props = getToolByName(self, 'portal_properties').site_properties for external_site in props.getProperty('allow_external_login_sites', []): _, host, path, _, _, _ = urlparse(external_site) if not path.endswith('/'): path += '/' if host == u_host and u_path.startswith(path): return True return False URLTool.__doc__ = BaseTool.__doc__ InitializeClass(URLTool)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1735_1
crossvul-python_data_bad_1644_2
"""Tornado handlers for frontend config storage.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import os import io import errno from tornado import web from IPython.utils.py3compat import PY3 from ...base.handlers import IPythonHandler, json_errors class ConfigHandler(IPythonHandler): SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH') @web.authenticated @json_errors def get(self, section_name): self.set_header("Content-Type", 'application/json') self.finish(json.dumps(self.config_manager.get(section_name))) @web.authenticated @json_errors def put(self, section_name): data = self.get_json_body() # Will raise 400 if content is not valid JSON self.config_manager.set(section_name, data) self.set_status(204) @web.authenticated @json_errors def patch(self, section_name): new_data = self.get_json_body() section = self.config_manager.update(section_name, new_data) self.finish(json.dumps(section)) # URL to handler mappings section_name_regex = r"(?P<section_name>\w+)" default_handlers = [ (r"/api/config/%s" % section_name_regex, ConfigHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_2
crossvul-python_data_bad_1644_7
import json from tornado import web from ...base.handlers import IPythonHandler, json_errors class NbconvertRootHandler(IPythonHandler): SUPPORTED_METHODS = ('GET',) @web.authenticated @json_errors def get(self): try: from IPython.nbconvert.exporters.export import exporter_map except ImportError as e: raise web.HTTPError(500, "Could not import nbconvert: %s" % e) res = {} for format, exporter in exporter_map.items(): res[format] = info = {} info['output_mimetype'] = exporter.output_mimetype self.finish(json.dumps(res)) default_handlers = [ (r"/api/nbconvert", NbconvertRootHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_7
crossvul-python_data_bad_2186_0
# # djblets_js.py -- JavaScript-related template tags # # Copyright (c) 2007-2009 Christian Hammond # Copyright (c) 2007-2009 David Trowbridge # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals import json from django import template from django.core.serializers import serialize from django.db.models.query import QuerySet from django.utils import six from django.utils.safestring import mark_safe from djblets.util.serializers import DjbletsJSONEncoder register = template.Library() @register.simple_tag def form_dialog_fields(form): """ Translates a Django Form object into a JavaScript list of fields. The resulting list of fields can be used to represent the form dynamically. """ s = '' for field in form: s += "{ name: '%s', " % field.name if field.is_hidden: s += "hidden: true, " else: s += "label: '%s', " % field.label_tag(field.label + ":") if field.field.required: s += "required: true, " if field.field.help_text: s += "help_text: '%s', " % field.field.help_text s += "widget: '%s' }," % six.text_type(field) # Chop off the last ',' return "[ %s ]" % s[:-1] @register.filter def json_dumps(value, indent=None): if isinstance(value, QuerySet): result = serialize('json', value, indent=indent) else: result = json.dumps(value, indent=indent, cls=DjbletsJSONEncoder) return mark_safe(result) @register.filter def json_dumps_items(d, append=''): """Dumps a list of keys/values from a dictionary, without braces. This works very much like ``json_dumps``, but doesn't output the surrounding braces. This allows it to be used within a JavaScript object definition alongside other custom keys. If the dictionary is not empty, and ``append`` is passed, it will be appended onto the results. This is most useful when you want to append a comma after all the dictionary items, in order to provide further keys in the template. """ if not d: return '' return mark_safe(json_dumps(d)[1:-1] + append)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2186_0
crossvul-python_data_good_40_0
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Backend-independent qute://* code. Module attributes: pyeval_output: The output of the last :pyeval command. _HANDLERS: The handlers registered via decorators. """ import html import json import os import time import textwrap import mimetypes import urllib import collections import pkg_resources import sip from PyQt5.QtCore import QUrlQuery, QUrl import qutebrowser from qutebrowser.config import config, configdata, configexc, configdiff from qutebrowser.utils import (version, utils, jinja, log, message, docutils, objreg, urlutils) from qutebrowser.misc import objects pyeval_output = ":pyeval was never called" spawn_output = ":spawn was never called" _HANDLERS = {} class NoHandlerFound(Exception): """Raised when no handler was found for the given URL.""" pass class QuteSchemeOSError(Exception): """Called when there was an OSError inside a handler.""" pass class QuteSchemeError(Exception): """Exception to signal that a handler should return an ErrorReply. Attributes correspond to the arguments in networkreply.ErrorNetworkReply. Attributes: errorstring: Error string to print. error: Numerical error value. """ def __init__(self, errorstring, error): self.errorstring = errorstring self.error = error super().__init__(errorstring) class Redirect(Exception): """Exception to signal a redirect should happen. Attributes: url: The URL to redirect to, as a QUrl. """ def __init__(self, url): super().__init__(url.toDisplayString()) self.url = url class add_handler: # noqa: N801,N806 pylint: disable=invalid-name """Decorator to register a qute://* URL handler. Attributes: _name: The 'foo' part of qute://foo backend: Limit which backends the handler can run with. """ def __init__(self, name, backend=None): self._name = name self._backend = backend self._function = None def __call__(self, function): self._function = function _HANDLERS[self._name] = self.wrapper return function def wrapper(self, *args, **kwargs): """Call the underlying function.""" if self._backend is not None and objects.backend != self._backend: return self.wrong_backend_handler(*args, **kwargs) else: return self._function(*args, **kwargs) def wrong_backend_handler(self, url): """Show an error page about using the invalid backend.""" html = jinja.render('error.html', title="Error while opening qute://url", url=url.toDisplayString(), error='{} is not available with this ' 'backend'.format(url.toDisplayString())) return 'text/html', html def data_for_url(url): """Get the data to show for the given URL. Args: url: The QUrl to show. Return: A (mimetype, data) tuple. """ norm_url = url.adjusted(QUrl.NormalizePathSegments | QUrl.StripTrailingSlash) if norm_url != url: raise Redirect(norm_url) path = url.path() host = url.host() query = urlutils.query_string(url) # A url like "qute:foo" is split as "scheme:path", not "scheme:host". log.misc.debug("url: {}, path: {}, host {}".format( url.toDisplayString(), path, host)) if not path or not host: new_url = QUrl() new_url.setScheme('qute') # When path is absent, e.g. qute://help (with no trailing slash) if host: new_url.setHost(host) # When host is absent, e.g. qute:help else: new_url.setHost(path) new_url.setPath('/') if query: new_url.setQuery(query) if new_url.host(): # path was a valid host raise Redirect(new_url) try: handler = _HANDLERS[host] except KeyError: raise NoHandlerFound(url) try: mimetype, data = handler(url) except OSError as e: # FIXME:qtwebengine how to handle this? raise QuteSchemeOSError(e) except QuteSchemeError as e: raise assert mimetype is not None, url if mimetype == 'text/html' and isinstance(data, str): # We let handlers return HTML as text data = data.encode('utf-8', errors='xmlcharrefreplace') return mimetype, data @add_handler('bookmarks') def qute_bookmarks(_url): """Handler for qute://bookmarks. Display all quickmarks / bookmarks.""" bookmarks = sorted(objreg.get('bookmark-manager').marks.items(), key=lambda x: x[1]) # Sort by title quickmarks = sorted(objreg.get('quickmark-manager').marks.items(), key=lambda x: x[0]) # Sort by name html = jinja.render('bookmarks.html', title='Bookmarks', bookmarks=bookmarks, quickmarks=quickmarks) return 'text/html', html @add_handler('tabs') def qute_tabs(_url): """Handler for qute://tabs. Display information about all open tabs.""" tabs = collections.defaultdict(list) for win_id, window in objreg.window_registry.items(): if sip.isdeleted(window): continue tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) for tab in tabbed_browser.widgets(): if tab.url() not in [QUrl("qute://tabs/"), QUrl("qute://tabs")]: urlstr = tab.url().toDisplayString() tabs[str(win_id)].append((tab.title(), urlstr)) html = jinja.render('tabs.html', title='Tabs', tab_list_by_window=tabs) return 'text/html', html def history_data(start_time, offset=None): """Return history data. Arguments: start_time: select history starting from this timestamp. offset: number of items to skip """ # history atimes are stored as ints, ensure start_time is not a float start_time = int(start_time) hist = objreg.get('web-history') if offset is not None: entries = hist.entries_before(start_time, limit=1000, offset=offset) else: # end is 24hrs earlier than start end_time = start_time - 24*60*60 entries = hist.entries_between(end_time, start_time) return [{"url": html.escape(e.url), "title": html.escape(e.title) or html.escape(e.url), "time": e.atime} for e in entries] @add_handler('history') def qute_history(url): """Handler for qute://history. Display and serve history.""" if url.path() == '/data': try: offset = QUrlQuery(url).queryItemValue("offset") offset = int(offset) if offset else None except ValueError as e: raise QuteSchemeError("Query parameter offset is invalid", e) # Use start_time in query or current time. try: start_time = QUrlQuery(url).queryItemValue("start_time") start_time = float(start_time) if start_time else time.time() except ValueError as e: raise QuteSchemeError("Query parameter start_time is invalid", e) return 'text/html', json.dumps(history_data(start_time, offset)) else: return 'text/html', jinja.render( 'history.html', title='History', gap_interval=config.val.history_gap_interval ) @add_handler('javascript') def qute_javascript(url): """Handler for qute://javascript. Return content of file given as query parameter. """ path = url.path() if path: path = "javascript" + os.sep.join(path.split('/')) return 'text/html', utils.read_file(path, binary=False) else: raise QuteSchemeError("No file specified", ValueError()) @add_handler('pyeval') def qute_pyeval(_url): """Handler for qute://pyeval.""" html = jinja.render('pre.html', title='pyeval', content=pyeval_output) return 'text/html', html @add_handler('spawn-output') def qute_spawn_output(_url): """Handler for qute://spawn-output.""" html = jinja.render('pre.html', title='spawn output', content=spawn_output) return 'text/html', html @add_handler('version') @add_handler('verizon') def qute_version(_url): """Handler for qute://version.""" html = jinja.render('version.html', title='Version info', version=version.version(), copyright=qutebrowser.__copyright__) return 'text/html', html @add_handler('plainlog') def qute_plainlog(url): """Handler for qute://plainlog. An optional query parameter specifies the minimum log level to print. For example, qute://log?level=warning prints warnings and errors. Level can be one of: vdebug, debug, info, warning, error, critical. """ if log.ram_handler is None: text = "Log output was disabled." else: level = QUrlQuery(url).queryItemValue('level') if not level: level = 'vdebug' text = log.ram_handler.dump_log(html=False, level=level) html = jinja.render('pre.html', title='log', content=text) return 'text/html', html @add_handler('log') def qute_log(url): """Handler for qute://log. An optional query parameter specifies the minimum log level to print. For example, qute://log?level=warning prints warnings and errors. Level can be one of: vdebug, debug, info, warning, error, critical. """ if log.ram_handler is None: html_log = None else: level = QUrlQuery(url).queryItemValue('level') if not level: level = 'vdebug' html_log = log.ram_handler.dump_log(html=True, level=level) html = jinja.render('log.html', title='log', content=html_log) return 'text/html', html @add_handler('gpl') def qute_gpl(_url): """Handler for qute://gpl. Return HTML content as string.""" return 'text/html', utils.read_file('html/license.html') @add_handler('help') def qute_help(url): """Handler for qute://help.""" urlpath = url.path() if not urlpath or urlpath == '/': urlpath = 'index.html' else: urlpath = urlpath.lstrip('/') if not docutils.docs_up_to_date(urlpath): message.error("Your documentation is outdated! Please re-run " "scripts/asciidoc2html.py.") path = 'html/doc/{}'.format(urlpath) if not urlpath.endswith('.html'): try: bdata = utils.read_file(path, binary=True) except OSError as e: raise QuteSchemeOSError(e) mimetype, _encoding = mimetypes.guess_type(urlpath) assert mimetype is not None, url return mimetype, bdata try: data = utils.read_file(path) except OSError: # No .html around, let's see if we find the asciidoc asciidoc_path = path.replace('.html', '.asciidoc') if asciidoc_path.startswith('html/doc/'): asciidoc_path = asciidoc_path.replace('html/doc/', '../doc/help/') try: asciidoc = utils.read_file(asciidoc_path) except OSError: asciidoc = None if asciidoc is None: raise preamble = textwrap.dedent(""" There was an error loading the documentation! This most likely means the documentation was not generated properly. If you are running qutebrowser from the git repository, please (re)run scripts/asciidoc2html.py and reload this page. If you're running a released version this is a bug, please use :report to report it. Falling back to the plaintext version. --------------------------------------------------------------- """) return 'text/plain', (preamble + asciidoc).encode('utf-8') else: return 'text/html', data @add_handler('backend-warning') def qute_backend_warning(_url): """Handler for qute://backend-warning.""" html = jinja.render('backend-warning.html', distribution=version.distribution(), Distribution=version.Distribution, version=pkg_resources.parse_version, title="Legacy backend warning") return 'text/html', html def _qute_settings_set(url): """Handler for qute://settings/set.""" query = QUrlQuery(url) option = query.queryItemValue('option', QUrl.FullyDecoded) value = query.queryItemValue('value', QUrl.FullyDecoded) # https://github.com/qutebrowser/qutebrowser/issues/727 if option == 'content.javascript.enabled' and value == 'false': msg = ("Refusing to disable javascript via qute://settings " "as it needs javascript support.") message.error(msg) return 'text/html', b'error: ' + msg.encode('utf-8') try: config.instance.set_str(option, value, save_yaml=True) return 'text/html', b'ok' except configexc.Error as e: message.error(str(e)) return 'text/html', b'error: ' + str(e).encode('utf-8') @add_handler('settings') def qute_settings(url): """Handler for qute://settings. View/change qute configuration.""" if url.path() == '/set': return _qute_settings_set(url) html = jinja.render('settings.html', title='settings', configdata=configdata, confget=config.instance.get_str) return 'text/html', html @add_handler('bindings') def qute_bindings(_url): """Handler for qute://bindings. View keybindings.""" bindings = {} defaults = config.val.bindings.default modes = set(defaults.keys()).union(config.val.bindings.commands) modes.remove('normal') modes = ['normal'] + sorted(list(modes)) for mode in modes: bindings[mode] = config.key_instance.get_bindings_for(mode) html = jinja.render('bindings.html', title='Bindings', bindings=bindings) return 'text/html', html @add_handler('back') def qute_back(url): """Handler for qute://back. Simple page to free ram / lazy load a site, goes back on focusing the tab. """ html = jinja.render( 'back.html', title='Suspended: ' + urllib.parse.unquote(url.fragment())) return 'text/html', html @add_handler('configdiff') def qute_configdiff(url): """Handler for qute://configdiff.""" if url.path() == '/old': try: return 'text/html', configdiff.get_diff() except OSError as e: error = (b'Failed to read old config: ' + str(e.strerror).encode('utf-8')) return 'text/plain', error else: data = config.instance.dump_userconfig().encode('utf-8') return 'text/plain', data @add_handler('pastebin-version') def qute_pastebin_version(_url): """Handler that pastebins the version string.""" version.pastebin_version() return 'text/plain', b'Paste called.'
./CrossVul/dataset_final_sorted/CWE-79/py/good_40_0
crossvul-python_data_good_5789_1
from __future__ import unicode_literals import base64 import calendar import datetime import re import sys try: from urllib import parse as urllib_parse except ImportError: # Python 2 import urllib as urllib_parse import urlparse urllib_parse.urlparse = urlparse.urlparse from binascii import Error as BinasciiError from email.utils import formatdate from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str, force_text from django.utils.functional import allow_lazy from django.utils import six ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(urllib_parse.quote(force_str(url), force_str(safe))) urlquote = allow_lazy(urlquote, six.text_type) def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe))) urlquote_plus = allow_lazy(urlquote_plus, six.text_type) def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_text(urllib_parse.unquote(force_str(quoted_url))) urlunquote = allow_lazy(urlunquote, six.text_type) def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_text(urllib_parse.unquote_plus(force_str(quoted_url))) urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first cast to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return urllib_parse.urlencode( [(force_str(k), [force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ return formatdate(epoch_seconds, usegmt=True) def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an integer expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2]) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int to avoid # returning a long (#15067). The long type was removed in Python 3. if not six.PY3 and value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ digits = "0123456789abcdefghijklmnopqrstuvwxyz" factor = 0 if i < 0: raise ValueError("Negative base36 conversion input.") if not six.PY3: if not isinstance(i, six.integer_types): raise TypeError("Non-integer base36 conversion input.") if i > sys.maxint: raise ValueError("Base36 conversion input too large.") # Find starting factor while True: factor += 1 if i < 36 ** factor: factor -= 1 break base36 = [] # Construct base36 representation while factor >= 0: j = 36 ** factor base36.append(digits[i // j]) i = i % j factor -= 1 return ''.join(base36) def urlsafe_base64_encode(s): """ Encodes a bytestring in base64 for use in URLs, stripping any trailing equal signs. """ return base64.urlsafe_b64encode(s).rstrip(b'\n=') def urlsafe_base64_decode(s): """ Decodes a base64 encoded string, adding back any trailing equal signs that might have been stripped. """ s = s.encode('utf-8') # base64encode should only return ASCII. try: return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'=')) except (LookupError, BinasciiError) as e: raise ValueError(e) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.encode('ascii').decode('unicode_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necessary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2) try: return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port) except ValueError: return False def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url. """ if not url: return False url_info = urllib_parse.urlparse(url) return (not url_info.netloc or url_info.netloc == host) and \ (not url_info.scheme or url_info.scheme in ['http', 'https'])
./CrossVul/dataset_final_sorted/CWE-79/py/good_5789_1
crossvul-python_data_bad_4097_1
from collections import OrderedDict import django.forms from django.utils.translation import gettext_lazy as _ from wagtail.admin.forms import WagtailAdminPageForm from wagtail.contrib.forms.utils import get_field_clean_name class BaseForm(django.forms.Form): def __init__(self, *args, **kwargs): kwargs.setdefault('label_suffix', '') self.user = kwargs.pop('user', None) self.page = kwargs.pop('page', None) super().__init__(*args, **kwargs) class FormBuilder: def __init__(self, fields): self.fields = fields def create_singleline_field(self, field, options): # TODO: This is a default value - it may need to be changed options['max_length'] = 255 return django.forms.CharField(**options) def create_multiline_field(self, field, options): return django.forms.CharField(widget=django.forms.Textarea, **options) def create_date_field(self, field, options): return django.forms.DateField(**options) def create_datetime_field(self, field, options): return django.forms.DateTimeField(**options) def create_email_field(self, field, options): return django.forms.EmailField(**options) def create_url_field(self, field, options): return django.forms.URLField(**options) def create_number_field(self, field, options): return django.forms.DecimalField(**options) def create_dropdown_field(self, field, options): options['choices'] = map( lambda x: (x.strip(), x.strip()), field.choices.split(',') ) return django.forms.ChoiceField(**options) def create_multiselect_field(self, field, options): options['choices'] = map( lambda x: (x.strip(), x.strip()), field.choices.split(',') ) return django.forms.MultipleChoiceField(**options) def create_radio_field(self, field, options): options['choices'] = map( lambda x: (x.strip(), x.strip()), field.choices.split(',') ) return django.forms.ChoiceField(widget=django.forms.RadioSelect, **options) def create_checkboxes_field(self, field, options): options['choices'] = [(x.strip(), x.strip()) for x in field.choices.split(',')] options['initial'] = [x.strip() for x in field.default_value.split(',')] return django.forms.MultipleChoiceField( widget=django.forms.CheckboxSelectMultiple, **options ) def create_checkbox_field(self, field, options): return django.forms.BooleanField(**options) def create_hidden_field(self, field, options): return django.forms.CharField(widget=django.forms.HiddenInput, **options) def get_create_field_function(self, type): """ Takes string of field type and returns a Django Form Field Instance. Assumes form field creation functions are in the format: 'create_fieldtype_field' """ create_field_function = getattr(self, 'create_%s_field' % type, None) if create_field_function: return create_field_function else: import inspect method_list = [ f[0] for f in inspect.getmembers(self.__class__, inspect.isfunction) if f[0].startswith('create_') and f[0].endswith('_field') ] raise AttributeError( "Could not find function matching format \ create_<fieldname>_field for type: " + type, "Must be one of: " + ", ".join(method_list) ) @property def formfields(self): formfields = OrderedDict() for field in self.fields: options = self.get_field_options(field) create_field = self.get_create_field_function(field.field_type) formfields[field.clean_name] = create_field(field, options) return formfields def get_field_options(self, field): options = {} options['label'] = field.label options['help_text'] = field.help_text options['required'] = field.required options['initial'] = field.default_value return options def get_form_class(self): return type(str('WagtailForm'), (BaseForm,), self.formfields) class SelectDateForm(django.forms.Form): date_from = django.forms.DateTimeField( required=False, widget=django.forms.DateInput(attrs={'placeholder': _('Date from')}) ) date_to = django.forms.DateTimeField( required=False, widget=django.forms.DateInput(attrs={'placeholder': _('Date to')}) ) class WagtailAdminFormPageForm(WagtailAdminPageForm): def clean(self): super().clean() # Check for dupe form field labels - fixes #585 if 'form_fields' in self.formsets: _forms = self.formsets['form_fields'].forms for f in _forms: f.is_valid() for i, form in enumerate(_forms): if 'label' in form.changed_data: label = form.cleaned_data.get('label') clean_name = get_field_clean_name(label) for idx, ff in enumerate(_forms): # Exclude self ff_clean_name = get_field_clean_name(ff.cleaned_data.get('label')) if idx != i and clean_name == ff_clean_name: form.add_error( 'label', django.forms.ValidationError(_('There is another field with the label %s, please change one of them.' % label)) )
./CrossVul/dataset_final_sorted/CWE-79/py/bad_4097_1
crossvul-python_data_good_1456_0
# # Copyright (c) 2008--2015 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import os from types import ListType from spacewalk.common import rhnFlags from spacewalk.common.rhnLog import log_debug, log_error from spacewalk.common.rhnConfig import CFG from spacewalk.common.rhnException import rhnFault from spacewalk.common.rhnTranslate import _ from spacewalk.server import rhnSQL from rhnLib import parseRPMFilename # # Functions that deal with the database # # New client # Returns a package path, given a server_id, package filename and channel label def get_package_path(server_id, pkg_spec, channel): log_debug(3, server_id, pkg_spec, channel) if isinstance(pkg_spec, ListType): pkg = pkg_spec[:4] # Insert EPOCH pkg.insert(1, None) else: pkg = parseRPMFilename(pkg_spec) if pkg is None: log_debug(4, "Error", "Requested weird package", pkg_spec) raise rhnFault(17, _("Invalid RPM package %s requested") % pkg_spec) statement = """ select p.id, p.path path, pe.epoch epoch from rhnPackageArch pa, rhnChannelPackage cp, rhnPackage p, rhnPackageEVR pe, rhnServerChannel sc, rhnPackageName pn, rhnChannel c where 1=1 and c.label = :channel and pn.name = :name and sc.server_id = :server_id and pe.version = :ver and pe.release = :rel and c.id = sc.channel_id and c.id = cp.channel_id and pa.label = :arch and pn.id = p.name_id and p.id = cp.package_id and p.evr_id = pe.id and sc.channel_id = cp.channel_id and p.package_arch_id = pa.id """ h = rhnSQL.prepare(statement) pkg = map(str, pkg) h.execute(name=pkg[0], ver=pkg[2], rel=pkg[3], arch=pkg[4], channel=channel, server_id=server_id) rs = h.fetchall_dict() if not rs: log_debug(4, "Error", "Non-existant package requested", server_id, pkg_spec, channel) raise rhnFault(17, _("Invalid RPM package %s requested") % pkg_spec) # It is unlikely for this query to return more than one row, # but it is possible # (having two packages with the same n, v, r, a and different epoch in # the same channel is prohibited by the RPM naming scheme; but extra # care won't hurt) max_row = rs[0] for each in rs[1:]: # Compare the epoch as string if _none2emptyString(each['epoch']) > _none2emptyString(max_row['epoch']): max_row = each # Set the flag for the proxy download accelerator rhnFlags.set("Download-Accelerator-Path", max_row['path']) return check_package_file(max_row['path'], max_row['id'], pkg_spec), max_row['id'] def check_package_file(rel_path, logpkg, raisepkg): if rel_path is None: log_error("Package path null for package id", logpkg) raise rhnFault(17, _("Invalid RPM package %s requested") % raisepkg) filePath = "%s/%s" % (CFG.MOUNT_POINT, rel_path) if not os.access(filePath, os.R_OK): # Package not found on the filesystem log_error("Package not found", filePath) raise rhnFault(17, _("Package not found")) return filePath def unlink_package_file(path): try: os.unlink(path) except OSError: log_debug(1, "Error unlinking %s;" % path) dirname = os.path.dirname(path) base_dirs = (CFG.MOUNT_POINT + '/' + CFG.PREPENDED_DIR, CFG.MOUNT_POINT) while dirname not in base_dirs: try: os.rmdir(dirname) except OSError, e: if e.errno == 39: # OSError: [Errno 39] Directory not empty break else: raise dirname = os.path.dirname(dirname) def get_all_package_paths(server_id, pkg_spec, channel): """ return the remote path if available and localpath for the requested package with respect to package id """ log_debug(3, server_id, pkg_spec, channel) remotepath = None # get the path and package localpath, pkg_id = get_package_path(server_id, pkg_spec, channel) return remotepath, localpath # New client # Returns the path to a source rpm def get_source_package_path(server_id, pkgFilename, channel): log_debug(3, server_id, pkgFilename, channel) rs = __query_source_package_path_by_name(server_id, pkgFilename, channel) if rs is None: log_debug(4, "Error", "Non-existant package requested", server_id, pkgFilename, channel) raise rhnFault(17, _("Invalid RPM package %s requested") % pkgFilename) # Set the flag for the proxy download accelerator rhnFlags.set("Download-Accelerator-Path", rs['path']) return check_package_file(rs['path'], pkgFilename, pkgFilename) # 0 or 1: is this source in this channel? def package_source_in_channel(server_id, pkgFilename, channel): log_debug(3, server_id, pkgFilename, channel) rs = __query_source_package_path_by_name(server_id, pkgFilename, channel) if rs is None: return 0 return 1 # The query used both in get_source_package_path and package_source_in_channel def __query_source_package_path_by_name(server_id, pkgFilename, channel): statement = """ select unique ps.path from rhnSourceRPM sr, rhnPackageSource ps, rhnPackage p, rhnChannelPackage cp, rhnChannel c, rhnServerChannel sc where sc.server_id = :server_id and sc.channel_id = cp.channel_id and cp.channel_id = c.id and c.label = :channel and cp.package_id = p.id and p.source_rpm_id = sr.id and sr.name = :name and p.source_rpm_id = ps.source_rpm_id and ((p.org_id is null and ps.org_id is null) or p.org_id = ps.org_id) """ h = rhnSQL.prepare(statement) h.execute(name=pkgFilename, channel=channel, server_id=server_id) return h.fetchone_dict() def get_info_for_package(pkg, channel_id, org_id): log_debug(3, pkg) pkg = map(str, pkg) params = {'name': pkg[0], 'ver': pkg[1], 'rel': pkg[2], 'epoch': pkg[3], 'arch': pkg[4], 'channel_id': channel_id, 'org_id': org_id} # yum repo has epoch="0" not only when epoch is "0" but also if it's NULL if pkg[3] == '0' or pkg[3] == '' or pkg[3]==None: epochStatement = "(epoch is null or epoch = :epoch)" else: epochStatement = "epoch = :epoch" if params['org_id']: orgStatement = "org_id = :org_id" else: orgStatement = "org_id is null" statement = """ select p.path, cp.channel_id, cv.checksum_type, cv.checksum from rhnPackage p join rhnPackageName pn on p.name_id = pn.id join rhnPackageEVR pe on p.evr_id = pe.id join rhnPackageArch pa on p.package_arch_id = pa.id left join rhnChannelPackage cp on p.id = cp.package_id and cp.channel_id = :channel_id join rhnChecksumView cv on p.checksum_id = cv.id where pn.name = :name and pe.version = :ver and pe.release = :rel and %s and pa.label = :arch and %s order by cp.channel_id nulls last """ % (epochStatement, orgStatement) h = rhnSQL.prepare(statement) h.execute(**params) ret = h.fetchone_dict() if not ret: return {'path': None, 'channel_id': None, 'checksum_type': None, 'checksum': None, } return ret def _none2emptyString(foo): if foo is None: return "" return str(foo) if __name__ == '__main__': """Test code. """ from spacewalk.common.rhnLog import initLOG initLOG("stdout", 1) rhnSQL.initDB() print # new client print get_package_path(1000463284, 'kernel-2.4.2-2.i686.rpm', 'redhat-linux-i386-7.1') print get_source_package_path(1000463284, 'kernel-2.4.2-2.i686.rpm', 'redhat-linux-i386-7.1')
./CrossVul/dataset_final_sorted/CWE-79/py/good_1456_0
crossvul-python_data_good_5190_1
from __future__ import unicode_literals import re import sys import types from django.conf import settings from django.core.urlresolvers import Resolver404, resolve from django.http import HttpResponse, HttpResponseNotFound from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import force_escape, pprint from django.utils import lru_cache, six, timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, smart_text from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. DEBUG_ENGINE = Engine(debug=True) HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE') CLEANSED_SUBSTITUTE = '********************' class CallableSettingWrapper(object): """ Object to wrap callable appearing in settings * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def cleanse_setting(key, value): """Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if HIDDEN_SETTINGS.search(key): cleansed = CLEANSED_SUBSTITUTE else: if isinstance(value, dict): cleansed = {k: cleanse_setting(k, v) for k, v in value.items()} else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): # For fixing #21345 and #23070 cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(): "Returns a dictionary of the settings module, with sensitive settings blurred out." settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = cleanse_setting(k, getattr(settings, k)) return settings_dict def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = ExceptionReporter(request, exc_type, exc_value, tb) if request.is_ajax(): text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain') else: html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') @lru_cache.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) class ExceptionReporterFilter(object): """ Base for all exception reporter filter classes. All overridable hooks contain lenient default behaviors. """ def get_post_parameters(self, request): if request is None: return {} else: return request.POST def get_traceback_frame_variables(self, request, tb_frame): return list(tb_frame.f_locals.items()) class SafeExceptionReporterFilter(ExceptionReporterFilter): """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replaces the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = CLEANSED_SUBSTITUTE return multivaluedict def get_post_parameters(self, request): """ Replaces the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k, v in cleansed.items(): cleansed[k] = CLEANSED_SUBSTITUTE return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = CLEANSED_SUBSTITUTE return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy # MultiValueDicts will have a return value. is_multivalue_dict = isinstance(value, MultiValueDict) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_multivalue_dict: # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replaces the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name, value in tb_frame.f_locals.items(): cleansed[name] = CLEANSED_SUBSTITUTE else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = CLEANSED_SUBSTITUTE else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = CLEANSED_SUBSTITUTE cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE return cleansed.items() class ExceptionReporter(object): """ A class to organize and coordinate reporting on exceptions. """ def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.postmortem = None # Handle deprecated string exceptions if isinstance(self.exc_type, six.string_types): self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type) self.exc_type = type(self.exc_value) def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # The force_escape filter assume unicode, make sure that works if isinstance(v, six.binary_type): v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > 4096: v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, force_escape(v))) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_text( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'filtered_POST': self.filter.get_post_parameters(self.request), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = smart_text(self.exc_value, errors='replace') if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): "Return HTML version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): "Return plain text version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except (OSError, IOError): pass if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], six.binary_type): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [six.text_type(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): def explicit_or_implicit_cause(exc_value): explicit = getattr(exc_value, '__cause__', None) implicit = getattr(exc_value, '__context__', None) return explicit or implicit # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = explicit_or_implicit_cause(exc_value) frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception (always in Python 2, # sometimes in Python 3), take the traceback from self.tb (Python 2 # doesn't have a __traceback__ attribute on Exception) exc_value = exceptions.pop() tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__ while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is not None: frames.append({ 'exc_cause': explicit_or_implicit_cause(exc_value), 'exc_cause_explicit': getattr(exc_value, '__cause__', True), 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) # If the traceback for current exception is consumed, try the # other exception. if six.PY2: tb = tb.tb_next elif not tb.tb_next and exceptions: exc_value = exceptions.pop() tb = exc_value.__traceback__ else: tb = tb.tb_next return frames def format_exception(self): """ Return the same data as from traceback.format_exception. """ import traceback frames = self.get_traceback_frames() tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames] list = ['Traceback (most recent call last):\n'] list += traceback.format_list(tb) list += traceback.format_exception_only(self.exc_type, self.exc_value) return list def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried # empty URLconf or (request.path == '/' and len(tried) == 1 # default URLconf and len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Resolver404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE) c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): "Create an empty URLconf 404 error response." t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE) c = Context({ "title": _("Welcome to Django"), "heading": _("It worked!"), "subheading": _("Congratulations on your first Django-powered page."), "instructions": _("Of course, you haven't actually done any work yet. " "Next, start your first app by running <code>python manage.py startapp [app_label]</code>."), "explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your " "Django settings file and you haven't configured any URLs. Get to work!"), }) return HttpResponse(t.render(c), content_type='text/html') # # Templates are embedded in the file so that we know the error handler will # always work even if the template loader is broken. # TECHNICAL_500_TEMPLATE = (""" <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } code, pre { font-size: 100%; white-space: pre-wrap; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } table.vars { margin:5px 0 2px 40px; } table.vars td, table.req td { font-family:monospace; } table td.code { width:100%; } table td.code pre { overflow:hidden; } table.source th { color:#666; } table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; } ul.traceback { list-style-type:none; color: #222; } ul.traceback li.frame { padding-bottom:1em; color:#666; } ul.traceback li.user { background-color:#e0e0e0; color:#000 } div.context { padding:10px 0; overflow:hidden; } div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; } div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; } div.context ol li pre { display:inline; } div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; } div.context ol.context-line li span { position:absolute; right:32px; } .user div.context ol.context-line li { background-color:#bbb; color:#000; } .user div.context ol li { color:#666; } div.commands { margin-left: 40px; } div.commands a { color:#555; text-decoration:none; } .user div.commands a { color: black; } #summary { background: #ffc; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #template, #template-not-exist { background:#f6f6f6; } #template-not-exist ul { margin: 0 0 10px 20px; } #template-not-exist .postmortem-section { margin-bottom: 3px; } #unicode-hint { background:#eee; } #traceback { background:#eee; } #requestinfo { background:#f6f6f6; padding-left:120px; } #summary table { border:none; background:transparent; } #requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; } #requestinfo h3 { margin-bottom:-1em; } .error { background: #ffc; } .specific { color:#cc3300; font-weight:bold; } h2 span.commands { font-size:.7em;} span.commands a:link {color:#5E5694;} pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; } .append-bottom { margin-bottom: 10px; } </style> {% if not is_email %} <script type="text/javascript"> //<!-- function getElementsByClassName(oElm, strTagName, strClassName){ // Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com var arrElements = (strTagName == "*" && document.all)? document.all : oElm.getElementsByTagName(strTagName); var arrReturnElements = new Array(); strClassName = strClassName.replace(/\-/g, "\\-"); var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)"); var oElement; for(var i=0; i<arrElements.length; i++){ oElement = arrElements[i]; if(oRegExp.test(oElement.className)){ arrReturnElements.push(oElement); } } return (arrReturnElements) } function hideAll(elems) { for (var e = 0; e < elems.length; e++) { elems[e].style.display = 'none'; } } window.onload = function() { hideAll(getElementsByClassName(document, 'table', 'vars')); hideAll(getElementsByClassName(document, 'ol', 'pre-context')); hideAll(getElementsByClassName(document, 'ol', 'post-context')); hideAll(getElementsByClassName(document, 'div', 'pastebin')); } function toggle() { for (var i = 0; i < arguments.length; i++) { var e = document.getElementById(arguments[i]); if (e) { e.style.display = e.style.display == 'none' ? 'block': 'none'; } } return false; } function varToggle(link, id) { toggle('v' + id); var s = link.getElementsByTagName('span')[0]; var uarr = String.fromCharCode(0x25b6); var darr = String.fromCharCode(0x25bc); s.textContent = s.textContent == uarr ? darr : uarr; return false; } function switchPastebinFriendly(link) { s1 = "Switch to copy-and-paste view"; s2 = "Switch back to interactive view"; link.textContent = link.textContent.trim() == s1 ? s2: s1; toggle('browserTraceback', 'pastebinTraceback'); return false; } //--> </script> {% endif %} </head> <body> <div id="summary"> <h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</h1> <pre class="exception_value">""" """{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}""" """</pre> <table class="meta"> {% if request %} <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.get_raw_uri|escape }}</td> </tr> {% endif %} <tr> <th>Django Version:</th> <td>{{ django_version_info }}</td> </tr> {% if exception_type %} <tr> <th>Exception Type:</th> <td>{{ exception_type }}</td> </tr> {% endif %} {% if exception_type and exception_value %} <tr> <th>Exception Value:</th> <td><pre>{{ exception_value|force_escape }}</pre></td> </tr> {% endif %} {% if lastframe %} <tr> <th>Exception Location:</th> <td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td> </tr> {% endif %} <tr> <th>Python Executable:</th> <td>{{ sys_executable|escape }}</td> </tr> <tr> <th>Python Version:</th> <td>{{ sys_version_info }}</td> </tr> <tr> <th>Python Path:</th> <td><pre>{{ sys_path|pprint }}</pre></td> </tr> <tr> <th>Server time:</th> <td>{{server_time|date:"r"}}</td> </tr> </table> </div> {% if unicode_hint %} <div id="unicode-hint"> <h2>Unicode error hint</h2> <p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p> </div> {% endif %} {% if template_does_not_exist %} <div id="template-not-exist"> <h2>Template-loader postmortem</h2> {% if postmortem %} <p class="append-bottom">Django tried loading these templates, in this order:</p> {% for entry in postmortem %} <p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p> <ul> {% if entry.tried %} {% for attempt in entry.tried %} <li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li> {% endfor %} </ul> {% else %} <li>This engine did not provide a list of tried templates.</li> {% endif %} </ul> {% endfor %} {% else %} <p>No templates were found because your 'TEMPLATES' setting is not configured.</p> {% endif %} </div> {% endif %} {% if template_info %} <div id="template"> <h2>Error during template rendering</h2> <p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p> <h3>{{ template_info.message }}</h3> <table class="source{% if template_info.top %} cut-top{% endif %} {% if template_info.bottom != template_info.total %} cut-bottom{% endif %}"> {% for source_line in template_info.source_lines %} {% if source_line.0 == template_info.line %} <tr class="error"><th>{{ source_line.0 }}</th> <td>{{ template_info.before }}""" """<span class="specific">{{ template_info.during }}</span>""" """{{ template_info.after }}</td> </tr> {% else %} <tr><th>{{ source_line.0 }}</th> <td>{{ source_line.1 }}</td></tr> {% endif %} {% endfor %} </table> </div> {% endif %} {% if frames %} <div id="traceback"> <h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);"> Switch to copy-and-paste view</a></span>{% endif %} </h2> {% autoescape off %} <div id="browserTraceback"> <ul class="traceback"> {% for frame in frames %} {% ifchanged frame.exc_cause %}{% if frame.exc_cause %} <li><h3> {% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %} </h3></li> {% endif %}{% endifchanged %} <li class="frame {{ frame.type }}"> <code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code> {% if frame.context_line %} <div class="context" id="c{{ frame.id }}"> {% if frame.pre_context and not is_email %} <ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}"> {% for line in frame.pre_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} <ol start="{{ frame.lineno }}" class="context-line"> <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre> """ """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol> {% if frame.post_context and not is_email %} <ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}"> {% for line in frame.post_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} </div> {% endif %} {% if frame.vars %} <div class="commands"> {% if is_email %} <h2>Local Vars</h2> {% else %} <a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>&#x25b6;</span> Local vars</a> {% endif %} </div> <table class="vars" id="v{{ frame.id }}"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in frame.vars|dictsort:"0" %} <tr> <td>{{ var.0|force_escape }}</td> <td class="code"><pre>{{ var.1 }}</pre></td> </tr> {% endfor %} </tbody> </table> {% endif %} </li> {% endfor %} </ul> </div> {% endautoescape %} <form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post"> {% if not is_email %} <div id="pastebinTraceback" class="pastebin"> <input type="hidden" name="language" value="PythonConsole"> <input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}"> <input type="hidden" name="source" value="Django Dpaste Agent"> <input type="hidden" name="poster" value="Django"> <textarea name="content" id="traceback_area" cols="140" rows="25"> Environment: {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.get_raw_uri|escape }} {% endif %} Django Version: {{ django_version_info }} Python Version: {{ sys_version_info }} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader postmortem {% if postmortem %}Django tried loading these templates, in this order: {% for entry in postmortem %} Using engine {{ entry.backend.name }}: {% if entry.tried %}{% for attempt in entry.tried %}""" """ * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }}) {% endfor %}{% else %} This engine did not provide a list of tried templates. {% endif %}{% endfor %} {% else %}No templates were found because your 'TEMPLATES' setting is not configured. {% endif %}{% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}""" "{% for source_line in template_info.source_lines %}" "{% if source_line.0 == template_info.line %}" " {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}" "{% else %}" " {{ source_line.0 }} : {{ source_line.1 }}" """{% endif %}{% endfor %}{% endif %} Traceback:{% for frame in frames %} {% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %}{% endif %}{% endifchanged %} File "{{ frame.filename|escape }}" in {{ frame.function|escape }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %} Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %} Exception Value: {{ exception_value|force_escape }} </textarea> <br><br> <input type="submit" value="Share this traceback on a public website"> </div> </form> </div> {% endif %} {% endif %} <div id="requestinfo"> <h2>Request information</h2> {% if request %} <h3 id="get-info">GET</h3> {% if request.GET %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.GET.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No GET data</p> {% endif %} <h3 id="post-info">POST</h3> {% if filtered_POST %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in filtered_POST.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No POST data</p> {% endif %} <h3 id="files-info">FILES</h3> {% if request.FILES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.FILES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No FILES data</p> {% endif %} <h3 id="cookie-info">COOKIES</h3> {% if request.COOKIES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.COOKIES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No cookie data</p> {% endif %} <h3 id="meta-info">META</h3> <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.META.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>Request data not supplied</p> {% endif %} <h3 id="settings-info">Settings</h3> <h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4> <table class="req"> <thead> <tr> <th>Setting</th> <th>Value</th> </tr> </thead> <tbody> {% for var in settings.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> </div> {% if not is_email %} <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard page generated by the handler for this status code. </p> </div> {% endif %} </body> </html> """) TECHNICAL_500_TEXT_TEMPLATE = ("""""" """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %} {% firstof exception_value 'No exception message supplied' %} {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.get_raw_uri }}{% endif %} Django Version: {{ django_version_info }} Python Executable: {{ sys_executable }} Python Version: {{ sys_version_info }} Python Path: {{ sys_path }} Server time: {{server_time|date:"r"}} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader postmortem {% if postmortem %}Django tried loading these templates, in this order: {% for entry in postmortem %} Using engine {{ entry.backend.name }}: {% if entry.tried %}{% for attempt in entry.tried %}""" """ * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }}) {% endfor %}{% else %} This engine did not provide a list of tried templates. {% endif %}{% endfor %} {% else %}No templates were found because your 'TEMPLATES' setting is not configured. {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }} {% for source_line in template_info.source_lines %}""" "{% if source_line.0 == template_info.line %}" " {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}" "{% else %}" " {{ source_line.0 }} : {{ source_line.1 }}" """{% endif %}{% endfor %}{% endif %}{% if frames %} Traceback:""" "{% for frame in frames %}" "{% ifchanged frame.exc_cause %}" " {% if frame.exc_cause %}" """ {% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %} {% endif %} {% endifchanged %} File "{{ frame.filename }}" in {{ frame.function }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %} {% endfor %} {% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %} {% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %} {% if request %}Request information: GET:{% for k, v in request.GET.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %} POST:{% for k, v in filtered_POST.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %} FILES:{% for k, v in request.FILES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %} COOKIES:{% for k, v in request.COOKIES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %} META:{% for k, v in request.META.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% else %}Request data not supplied {% endif %} Settings: Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% if not is_email %} You're seeing this error because you have DEBUG = True in your Django settings file. Change that to False, and Django will display a standard page generated by the handler for this status code. {% endif %} """) TECHNICAL_404_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <title>Page not found at {{ request.path_info|escape }}</title> <meta name="robots" content="NONE,NOARCHIVE"> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } table { border:none; border-collapse: collapse; width:100%; } td, th { vertical-align:top; padding:2px 3px; } th { width:12em; text-align:right; color:#666; padding-right:.5em; } #info { background:#f6f6f6; } #info ol { margin: 0.5em 4em; } #info ol li { font-family: monospace; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>Page not found <span>(404)</span></h1> <table class="meta"> <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% if raising_view_name %} <tr> <th>Raised by:</th> <td>{{ raising_view_name }}</td> </tr> {% endif %} </table> </div> <div id="info"> {% if urlpatterns %} <p> Using the URLconf defined in <code>{{ urlconf }}</code>, Django tried these URL patterns, in this order: </p> <ol> {% for pattern in urlpatterns %} <li> {% for pat in pattern %} {{ pat.regex.pattern }} {% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %} {% endfor %} </li> {% endfor %} </ol> <p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p> {% else %} <p>{{ reason }}</p> {% endif %} </div> <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard 404 page. </p> </div> </body> </html> """ DEFAULT_URLCONF_TEMPLATE = """ <!DOCTYPE html> <html lang="en"><head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } #summary { background: #e0ebff; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #instructions { background:#f6f6f6; } #summary table { border:none; background:transparent; } </style> </head> <body> <div id="summary"> <h1>{{ heading }}</h1> <h2>{{ subheading }}</h2> </div> <div id="instructions"> <p> {{ instructions|safe }} </p> </div> <div id="explanation"> <p> {{ explanation|safe }} </p> </div> </body></html> """
./CrossVul/dataset_final_sorted/CWE-79/py/good_5190_1
crossvul-python_data_good_3149_0
# -*-python-*- # # Copyright (C) 1999-2016 The ViewCVS Group. All Rights Reserved. # # By using this file, you agree to the terms and conditions set forth in # the LICENSE.html file which can be found at the top level of the ViewVC # distribution or at http://viewvc.org/license-1.html. # # For more information, visit http://viewvc.org/ # # ----------------------------------------------------------------------- # # viewvc: View CVS/SVN repositories via a web browser # # ----------------------------------------------------------------------- __version__ = '1.1.26-dev' # this comes from our library; measure the startup time import debug debug.t_start('startup') debug.t_start('imports') # standard modules that we know are in the path or builtin import sys import os import fnmatch import gzip import mimetypes import re import rfc822 import stat import string import struct import tempfile import time import types import urllib # These modules come from our library (the stub has set up the path) import accept import compat import config import ezt import popen import sapi import vcauth import vclib import vclib.ccvs import vclib.svn try: import idiff except (SyntaxError, ImportError): idiff = None debug.t_end('imports') ######################################################################### checkout_magic_path = '*checkout*' # According to RFC 1738 the '~' character is unsafe in URLs. # But for compatibility with URLs bookmarked with old releases of ViewCVS: oldstyle_checkout_magic_path = '~checkout~' docroot_magic_path = '*docroot*' viewcvs_mime_type = 'text/vnd.viewcvs-markup' alt_mime_type = 'text/x-cvsweb-markup' view_roots_magic = '*viewroots*' # Put here the variables we need in order to hold our state - they # will be added (with their current value) to (almost) any link/query # string you construct. _sticky_vars = [ 'hideattic', 'sortby', 'sortdir', 'logsort', 'diff_format', 'search', 'limit_changes', ] # for reading/writing between a couple descriptors CHUNK_SIZE = 8192 # for rcsdiff processing of header _RCSDIFF_IS_BINARY = 'binary-diff' _RCSDIFF_ERROR = 'error' # special characters that don't need to be URL encoded _URL_SAFE_CHARS = "/*~" class Request: def __init__(self, server, cfg): self.server = server self.cfg = cfg self.script_name = _normalize_path(server.getenv('SCRIPT_NAME', '')) self.browser = server.getenv('HTTP_USER_AGENT', 'unknown') # process the Accept-Language: header, and load the key/value # files, given the selected language hal = server.getenv('HTTP_ACCEPT_LANGUAGE','') try: self.lang_selector = accept.language(hal) except accept.AcceptLanguageParseError: self.lang_selector = accept.language('en') self.language = self.lang_selector.select_from(cfg.general.languages) self.kv = cfg.load_kv_files(self.language) # check for an authenticated username self.username = server.getenv('REMOTE_USER') # if we allow compressed output, see if the client does too self.gzip_compress_level = 0 if cfg.options.allow_compress: http_accept_encoding = os.environ.get("HTTP_ACCEPT_ENCODING", "") if "gzip" in filter(None, map(lambda x: string.strip(x), string.split(http_accept_encoding, ","))): self.gzip_compress_level = 9 # make this configurable? def run_viewvc(self): cfg = self.cfg # This function first parses the query string and sets the following # variables. Then it executes the request. self.view_func = None # function to call to process the request self.repos = None # object representing current repository self.rootname = None # name of current root (as used in viewvc.conf) self.roottype = None # current root type ('svn' or 'cvs') self.rootpath = None # physical path to current root self.pathtype = None # type of path, either vclib.FILE or vclib.DIR self.where = None # path to file or directory in current root self.query_dict = {} # validated and cleaned up query options self.path_parts = None # for convenience, equals where.split('/') self.pathrev = None # current path revision or tag self.auth = None # authorizer module in use # redirect if we're loading from a valid but irregular URL # These redirects aren't neccessary to make ViewVC work, it functions # just fine without them, but they make it easier for server admins to # implement access restrictions based on URL needs_redirect = 0 # Process the query params for name, values in self.server.params().items(): # we only care about the first value value = values[0] # patch up old queries that use 'cvsroot' to look like they used 'root' if name == 'cvsroot': name = 'root' needs_redirect = 1 # same for 'only_with_tag' and 'pathrev' if name == 'only_with_tag': name = 'pathrev' needs_redirect = 1 # redirect view=rev to view=revision, too if name == 'view' and value == 'rev': value = 'revision' needs_redirect = 1 # validate the parameter _validate_param(name, value) # if we're here, then the parameter is okay self.query_dict[name] = value # Resolve the view parameter into a handler function. self.view_func = _views.get(self.query_dict.get('view', None), self.view_func) # Process PATH_INFO component of query string path_info = self.server.getenv('PATH_INFO', '') # clean it up. this removes duplicate '/' characters and any that may # exist at the front or end of the path. ### we might want to redirect to the cleaned up URL path_parts = _path_parts(path_info) if path_parts: # handle magic path prefixes if path_parts[0] == docroot_magic_path: # if this is just a simple hunk of doc, then serve it up self.where = _path_join(path_parts[1:]) return view_doc(self) elif path_parts[0] in (checkout_magic_path, oldstyle_checkout_magic_path): path_parts.pop(0) self.view_func = view_checkout if not cfg.options.checkout_magic: needs_redirect = 1 # handle tarball magic suffixes if self.view_func is download_tarball: if (self.query_dict.get('parent')): del path_parts[-1] elif path_parts[-1][-7:] == ".tar.gz": path_parts[-1] = path_parts[-1][:-7] # Figure out root name self.rootname = self.query_dict.get('root') if self.rootname == view_roots_magic: del self.query_dict['root'] self.rootname = "" needs_redirect = 1 elif self.rootname is None: if cfg.options.root_as_url_component: if path_parts: self.rootname = path_parts.pop(0) else: self.rootname = "" elif self.view_func != view_roots: self.rootname = cfg.general.default_root elif cfg.options.root_as_url_component: needs_redirect = 1 # Take care of old-new roots mapping for old_root, new_root in cfg.general.renamed_roots.items(): if self.rootname == old_root: self.rootname = new_root needs_redirect = 1 self.where = _path_join(path_parts) self.path_parts = path_parts if self.rootname: roottype, rootpath = locate_root(cfg, self.rootname) if roottype: # Overlay root-specific options. cfg.overlay_root_options(self.rootname) # Setup an Authorizer for this rootname and username debug.t_start('setup-authorizer') self.auth = setup_authorizer(cfg, self.username) debug.t_end('setup-authorizer') # Create the repository object debug.t_start('select-repos') try: if roottype == 'cvs': self.rootpath = vclib.ccvs.canonicalize_rootpath(rootpath) self.repos = vclib.ccvs.CVSRepository(self.rootname, self.rootpath, self.auth, cfg.utilities, cfg.options.use_rcsparse) # required so that spawned rcs programs correctly expand # $CVSHeader$ os.environ['CVSROOT'] = self.rootpath elif roottype == 'svn': self.rootpath = vclib.svn.canonicalize_rootpath(rootpath) self.repos = vclib.svn.SubversionRepository(self.rootname, self.rootpath, self.auth, cfg.utilities, cfg.options.svn_config_dir) else: raise vclib.ReposNotFound() except vclib.ReposNotFound: pass debug.t_end('select-repos') if self.repos is None: raise debug.ViewVCException( 'The root "%s" is unknown. If you believe the value is ' 'correct, then please double-check your configuration.' % self.rootname, "404 Not Found") if self.repos: debug.t_start('select-repos') self.repos.open() debug.t_end('select-repos') type = self.repos.roottype() if type == vclib.SVN: self.roottype = 'svn' elif type == vclib.CVS: self.roottype = 'cvs' else: raise debug.ViewVCException( 'The root "%s" has an unknown type ("%s"). Expected "cvs" or "svn".' % (self.rootname, type), "500 Internal Server Error") # If this is using an old-style 'rev' parameter, redirect to new hotness. # Subversion URLs will now use 'pathrev'; CVS ones use 'revision'. if self.repos and self.query_dict.has_key('rev'): if self.roottype == 'svn' \ and not self.query_dict.has_key('pathrev') \ and not self.view_func == view_revision: self.query_dict['pathrev'] = self.query_dict['rev'] del self.query_dict['rev'] else: # elif not self.query_dict.has_key('revision'): ? self.query_dict['revision'] = self.query_dict['rev'] del self.query_dict['rev'] needs_redirect = 1 if self.repos and self.view_func is not redirect_pathrev: # If this is an intended-to-be-hidden CVSROOT path, complain. if cfg.options.hide_cvsroot \ and is_cvsroot_path(self.roottype, path_parts): raise debug.ViewVCException("Unknown location: /%s" % self.where, "404 Not Found") # Make sure path exists self.pathrev = pathrev = self.query_dict.get('pathrev') self.pathtype = _repos_pathtype(self.repos, path_parts, pathrev) if self.pathtype is None: # Path doesn't exist, see if it could be an old-style ViewVC URL # with a fake suffix. result = _strip_suffix('.diff', path_parts, pathrev, vclib.FILE, \ self.repos, view_diff) or \ _strip_suffix('.tar.gz', path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) or \ _strip_suffix('root.tar.gz', path_parts, pathrev, vclib.DIR,\ self.repos, download_tarball) or \ _strip_suffix(self.rootname + '-root.tar.gz', \ path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) or \ _strip_suffix('root', \ path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) or \ _strip_suffix(self.rootname + '-root', \ path_parts, pathrev, vclib.DIR, \ self.repos, download_tarball) if result: self.path_parts, self.pathtype, self.view_func = result self.where = _path_join(self.path_parts) needs_redirect = 1 else: raise debug.ViewVCException("Unknown location: /%s" % self.where, "404 Not Found") # If we have an old ViewCVS Attic URL which is still valid, redirect if self.roottype == 'cvs': attic_parts = None if (self.pathtype == vclib.FILE and len(self.path_parts) > 1 and self.path_parts[-2] == 'Attic'): attic_parts = self.path_parts[:-2] + self.path_parts[-1:] elif (self.pathtype == vclib.DIR and len(self.path_parts) > 0 and self.path_parts[-1] == 'Attic'): attic_parts = self.path_parts[:-1] if attic_parts: self.path_parts = attic_parts self.where = _path_join(attic_parts) needs_redirect = 1 if self.view_func is None: # view parameter is not set, try looking at pathtype and the # other parameters if not self.rootname: self.view_func = view_roots elif self.pathtype == vclib.DIR: # ViewCVS 0.9.2 used to put ?tarball=1 at the end of tarball urls if self.query_dict.has_key('tarball'): self.view_func = download_tarball else: self.view_func = view_directory elif self.pathtype == vclib.FILE: if self.query_dict.has_key('r1') and self.query_dict.has_key('r2'): self.view_func = view_diff elif self.query_dict.has_key('annotate'): self.view_func = view_annotate elif self.query_dict.has_key('graph'): if not self.query_dict.has_key('makeimage'): self.view_func = view_cvsgraph else: self.view_func = view_cvsgraph_image elif self.query_dict.has_key('revision') \ or cfg.options.default_file_view != "log": if cfg.options.default_file_view == "markup" \ or self.query_dict.get('content-type', None) \ in (viewcvs_mime_type, alt_mime_type): self.view_func = view_markup else: self.view_func = view_checkout else: self.view_func = view_log # If we've chosen the roots or revision view, our effective # location is not really "inside" the repository, so we have no # path and therefore no path parts or type, either. if self.view_func is view_revision or self.view_func is view_roots: self.where = '' self.path_parts = [] self.pathtype = None # if we have a directory and the request didn't end in "/", then redirect # so that it does. if (self.pathtype == vclib.DIR and path_info[-1:] != '/' and self.view_func is not download_tarball and self.view_func is not redirect_pathrev): needs_redirect = 1 # startup is done now. debug.t_end('startup') # If we need to redirect, do so. Otherwise, handle our requested view. if needs_redirect: self.server.redirect(self.get_url()) else: debug.t_start('view-func') self.view_func(self) debug.t_end('view-func') def get_url(self, escape=0, partial=0, prefix=0, **args): """Constructs a link to another ViewVC page just like the get_link function except that it returns a single URL instead of a URL split into components. If PREFIX is set, include the protocol and server name portions of the URL.""" url, params = apply(self.get_link, (), args) qs = compat.urlencode(params) if qs: result = urllib.quote(url, _URL_SAFE_CHARS) + '?' + qs else: result = urllib.quote(url, _URL_SAFE_CHARS) if partial: result = result + (qs and '&' or '?') if escape: result = self.server.escape(result) if prefix: result = '%s://%s%s' % \ (self.server.getenv("HTTPS") == "on" and "https" or "http", self.server.getenv("HTTP_HOST"), result) return result def get_form(self, **args): """Constructs a link to another ViewVC page just like the get_link function except that it returns a base URL suitable for use as an HTML form action, and an iterable object with .name and .value attributes representing stuff that should be in <input type=hidden> tags with the link parameters.""" url, params = apply(self.get_link, (), args) action = self.server.escape(urllib.quote(url, _URL_SAFE_CHARS)) hidden_values = [] for name, value in params.items(): hidden_values.append(_item(name=self.server.escape(name), value=self.server.escape(value))) return action, hidden_values def get_link(self, view_func=None, where=None, pathtype=None, params=None): """Constructs a link pointing to another ViewVC page. All arguments correspond to members of the Request object. If they are set to None they take values from the current page. Return value is a base URL and a dictionary of parameters""" cfg = self.cfg if view_func is None: view_func = self.view_func if params is None: params = self.query_dict.copy() else: params = params.copy() # must specify both where and pathtype or neither assert (where is None) == (pathtype is None) # if we are asking for the revision info view, we don't need any # path information if (view_func is view_revision or view_func is view_roots or view_func is redirect_pathrev): where = pathtype = None elif where is None: where = self.where pathtype = self.pathtype # no need to add sticky variables for views with no links sticky_vars = not (view_func is view_checkout or view_func is download_tarball) # The logic used to construct the URL is an inverse of the # logic used to interpret URLs in Request.run_viewvc url = self.script_name # add checkout magic if neccessary if view_func is view_checkout and cfg.options.checkout_magic: url = url + '/' + checkout_magic_path # add root to url rootname = None if view_func is not view_roots: if cfg.options.root_as_url_component: # remove root from parameter list if present try: rootname = params['root'] except KeyError: rootname = self.rootname else: del params['root'] # add root path component if rootname is not None: url = url + '/' + rootname else: # add root to parameter list try: rootname = params['root'] except KeyError: rootname = params['root'] = self.rootname # no need to specify default root if rootname == cfg.general.default_root: del params['root'] # add 'pathrev' value to parameter list if (self.pathrev is not None and not params.has_key('pathrev') and view_func is not view_revision and rootname == self.rootname): params['pathrev'] = self.pathrev # add path if where: url = url + '/' + where # add trailing slash for a directory if pathtype == vclib.DIR: url = url + '/' # normalize top level URLs for use in Location headers and A tags elif not url: url = '/' # no need to explicitly specify directory view for a directory if view_func is view_directory and pathtype == vclib.DIR: view_func = None # no need to explicitly specify roots view when in root_as_url # mode or there's no default root if view_func is view_roots and (cfg.options.root_as_url_component or not cfg.general.default_root): view_func = None # no need to explicitly specify annotate view when # there's an annotate parameter if view_func is view_annotate and params.get('annotate') is not None: view_func = None # no need to explicitly specify diff view when # there's r1 and r2 parameters if (view_func is view_diff and params.get('r1') is not None and params.get('r2') is not None): view_func = None # no need to explicitly specify checkout view when it's the default # view or when checkout_magic is enabled if view_func is view_checkout: if ((cfg.options.default_file_view == "co" and pathtype == vclib.FILE) or cfg.options.checkout_magic): view_func = None # no need to explicitly specify markup view when it's the default view if view_func is view_markup: if (cfg.options.default_file_view == "markup" \ and pathtype == vclib.FILE): view_func = None # set the view parameter view_code = _view_codes.get(view_func) if view_code and not (params.has_key('view') and params['view'] is None): params['view'] = view_code # add sticky values to parameter list if sticky_vars: for name in _sticky_vars: value = self.query_dict.get(name) if value is not None and not params.has_key(name): params[name] = value # remove null values from parameter list for name, value in params.items(): if value is None: del params[name] return url, params def _path_parts(path): """Split up a repository path into a list of path components""" # clean it up. this removes duplicate '/' characters and any that may # exist at the front or end of the path. return filter(None, string.split(path, '/')) def _normalize_path(path): """Collapse leading slashes in the script name You only get multiple slashes in the script name when users accidentally type urls like http://abc.com//viewvc.cgi/, but we correct for it because we output the script name in links and web browsers interpret //viewvc.cgi/ as http://viewvc.cgi/ """ i = 0 for c in path: if c != '/': break i = i + 1 if i: return path[i-1:] return path def _validate_param(name, value): """Validate whether the given value is acceptable for the param name. If the value is not allowed, then an error response is generated, and this function throws an exception. Otherwise, it simply returns None. """ # First things first -- check that we have a legal parameter name. try: validator = _legal_params[name] except KeyError: raise debug.ViewVCException( 'An illegal parameter name was provided.', '400 Bad Request') # Is there a validator? Is it a regex or a function? Validate if # we can, returning without incident on valid input. if validator is None: return elif hasattr(validator, 'match'): if validator.match(value): return else: if validator(value): return # If we get here, the input value isn't valid. raise debug.ViewVCException( 'An illegal value was provided for the "%s" parameter.' % (name), '400 Bad Request') def _validate_regex(value): ### we need to watch the flow of these parameters through the system ### to ensure they don't hit the page unescaped. otherwise, these ### parameters could constitute a CSS attack. try: re.compile(value) return True except: return None def _validate_view(value): # Return true iff VALUE is one of our allowed views. return _views.has_key(value) def _validate_mimetype(value): # For security purposes, we only allow mimetypes from a predefined set # thereof. return value in (viewcvs_mime_type, alt_mime_type, 'text/plain') # obvious things here. note that we don't need uppercase for alpha. _re_validate_alpha = re.compile('^[a-z]+$') _re_validate_number = re.compile('^[0-9]+$') _re_validate_boolint = re.compile('^[01]$') # when comparing two revs, we sometimes construct REV:SYMBOL, so ':' is needed _re_validate_revnum = re.compile('^[-_.a-zA-Z0-9:~\\[\\]/]*$') # date time values _re_validate_datetime = re.compile(r'^(\d\d\d\d-\d\d-\d\d(\s+\d\d:\d\d' '(:\d\d)?)?)?$') # the legal query parameters and their validation functions _legal_params = { 'root' : None, 'view' : _validate_view, 'search' : _validate_regex, 'p1' : None, 'p2' : None, 'hideattic' : _re_validate_boolint, 'limit_changes' : _re_validate_number, 'sortby' : _re_validate_alpha, 'sortdir' : _re_validate_alpha, 'logsort' : _re_validate_alpha, 'diff_format' : _re_validate_alpha, 'pathrev' : _re_validate_revnum, 'dir_pagestart' : _re_validate_number, 'log_pagestart' : _re_validate_number, 'annotate' : _re_validate_revnum, 'graph' : _re_validate_revnum, 'makeimage' : _re_validate_boolint, 'r1' : _re_validate_revnum, 'tr1' : _re_validate_revnum, 'r2' : _re_validate_revnum, 'tr2' : _re_validate_revnum, 'revision' : _re_validate_revnum, 'content-type' : _validate_mimetype, # for query 'file_match' : _re_validate_alpha, 'branch_match' : _re_validate_alpha, 'who_match' : _re_validate_alpha, 'comment_match' : _re_validate_alpha, 'dir' : None, 'file' : None, 'branch' : None, 'who' : None, 'comment' : None, 'querysort' : _re_validate_alpha, 'date' : _re_validate_alpha, 'hours' : _re_validate_number, 'mindate' : _re_validate_datetime, 'maxdate' : _re_validate_datetime, 'format' : _re_validate_alpha, # for redirect_pathrev 'orig_path' : None, 'orig_pathtype' : None, 'orig_pathrev' : None, 'orig_view' : None, # deprecated 'parent' : _re_validate_boolint, 'rev' : _re_validate_revnum, 'tarball' : _re_validate_boolint, 'hidecvsroot' : _re_validate_boolint, } def _path_join(path_parts): return string.join(path_parts, '/') def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func): """strip the suffix from a repository path if the resulting path is of the specified type, otherwise return None""" if not path_parts: return None l = len(suffix) if path_parts[-1][-l:] == suffix: path_parts = path_parts[:] if len(path_parts[-1]) == l: del path_parts[-1] else: path_parts[-1] = path_parts[-1][:-l] t = _repos_pathtype(repos, path_parts, rev) if pathtype == t: return path_parts, t, view_func return None def _repos_pathtype(repos, path_parts, rev): """Return the type of a repository path, or None if the path doesn't exist""" try: return repos.itemtype(path_parts, rev) except vclib.ItemNotFound: return None def _orig_path(request, rev_param='revision', path_param=None): "Get original path of requested file at old revision before copies or moves" # The 'pathrev' variable is interpreted by nearly all ViewVC views to # provide a browsable snapshot of a repository at some point in its history. # 'pathrev' is a tag name for CVS repositories and a revision number for # Subversion repositories. It's automatically propagated between pages by # logic in the Request.get_link() function which adds it to links like a # sticky variable. When 'pathrev' is set, directory listings only include # entries that exist in the specified revision or tag. Similarly, log pages # will only show revisions preceding the point in history specified by # 'pathrev.' Markup, checkout, and annotate pages show the 'pathrev' # revision of files by default when no other revision is specified. # # In Subversion repositories, paths are always considered to refer to the # pathrev revision. For example, if there is a "circle.jpg" in revision 3, # which is renamed and modified as "square.jpg" in revision 4, the original # circle image is visible at the following URLs: # # *checkout*/circle.jpg?pathrev=3 # *checkout*/square.jpg?revision=3 # *checkout*/square.jpg?revision=3&pathrev=4 # # Note that the following: # # *checkout*/circle.jpg?rev=3 # # now gets redirected to one of the following URLs: # # *checkout*/circle.jpg?pathrev=3 (for Subversion) # *checkout*/circle.jpg?revision=3 (for CVS) # rev = request.query_dict.get(rev_param, request.pathrev) path = request.query_dict.get(path_param, request.where) if rev is not None and hasattr(request.repos, '_getrev'): try: pathrev = request.repos._getrev(request.pathrev) rev = request.repos._getrev(rev) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') return _path_parts(request.repos.get_location(path, pathrev, rev)), rev return _path_parts(path), rev def setup_authorizer(cfg, username, rootname=None): """Setup the authorizer. If ROOTNAME is provided, assume that per-root options have not been overlayed. Otherwise, assume they have (and fetch the authorizer for the configured root).""" if rootname is None: authorizer = cfg.options.authorizer params = cfg.get_authorizer_params() else: authorizer, params = cfg.get_authorizer_and_params_hack(rootname) # No configured authorizer? No problem. if not authorizer: return None # First, try to load a module with the configured name. import imp fp = None try: try: fp, path, desc = imp.find_module("%s" % (authorizer), vcauth.__path__) my_auth = imp.load_module('viewvc', fp, path, desc) except ImportError: raise debug.ViewVCException( 'Invalid authorizer (%s) specified for root "%s"' \ % (authorizer, rootname), '500 Internal Server Error') finally: if fp: fp.close() # Add a rootname mapping callback function to the parameters. def _root_lookup_func(cb_rootname): return locate_root(cfg, cb_rootname) # Finally, instantiate our Authorizer. return my_auth.ViewVCAuthorizer(_root_lookup_func, username, params) def check_freshness(request, mtime=None, etag=None, weak=0): cfg = request.cfg # See if we are supposed to disable etags (for debugging, usually) if not cfg.options.generate_etags: return 0 request_etag = request_mtime = None if etag is not None: if weak: etag = 'W/"%s"' % etag else: etag = '"%s"' % etag request_etag = request.server.getenv('HTTP_IF_NONE_MATCH') if mtime is not None: try: request_mtime = request.server.getenv('HTTP_IF_MODIFIED_SINCE') request_mtime = rfc822.mktime_tz(rfc822.parsedate_tz(request_mtime)) except: request_mtime = None # if we have an etag, use that for freshness checking. # if not available, then we use the last-modified time. # if not available, then the document isn't fresh. if etag is not None: isfresh = (request_etag == etag) elif mtime is not None: isfresh = (request_mtime >= mtime) else: isfresh = 0 # require revalidation after the configured amount of time if cfg and cfg.options.http_expiration_time >= 0: expiration = compat.formatdate(time.time() + cfg.options.http_expiration_time) request.server.addheader('Expires', expiration) request.server.addheader('Cache-Control', 'max-age=%d' % cfg.options.http_expiration_time) if isfresh: request.server.header(status='304 Not Modified') else: if etag is not None: request.server.addheader('ETag', etag) if mtime is not None: request.server.addheader('Last-Modified', compat.formatdate(mtime)) return isfresh def get_view_template(cfg, view_name, language="en"): # See if the configuration specifies a template for this view. If # not, use the default template path for this view. tname = vars(cfg.templates).get(view_name) or view_name + ".ezt" # Template paths are relative to the configurated template_dir (if # any, "templates" otherwise), so build the template path as such. tname = os.path.join(cfg.options.template_dir or "templates", tname) # Allow per-language template selection. tname = string.replace(tname, '%lang%', language) # Finally, construct the whole template path. tname = cfg.path(tname) debug.t_start('ezt-parse') template = ezt.Template(tname) debug.t_end('ezt-parse') return template def get_writeready_server_file(request, content_type=None, encoding=None, content_length=None, allow_compress=True): """Return a file handle to a response body stream, after outputting any queued special headers (on REQUEST.server) and (optionally) a 'Content-Type' header whose value is CONTENT_TYPE and character set is ENCODING. If CONTENT_LENGTH is provided and compression is not in use, also generate a 'Content-Length' header for this response. Callers my use ALLOW_COMPRESS to disable compression where it would otherwise be allowed. (Such as when transmitting an already-compressed response.) After this function is called, it is too late to add new headers to the response.""" if allow_compress and request.gzip_compress_level: request.server.addheader('Content-Encoding', 'gzip') elif content_length is not None: request.server.addheader('Content-Length', content_length) if content_type and encoding: request.server.header("%s; charset=%s" % (content_type, encoding)) elif content_type: request.server.header(content_type) else: request.server.header() if allow_compress and request.gzip_compress_level: fp = gzip.GzipFile('', 'wb', request.gzip_compress_level, request.server.file()) else: fp = request.server.file() return fp def generate_page(request, view_name, data, content_type=None): server_fp = get_writeready_server_file(request, content_type) template = get_view_template(request.cfg, view_name, request.language) template.generate(server_fp, data) def nav_path(request): """Return current path as list of items with "name" and "href" members The href members are view_directory links for directories and view_log links for files, but are set to None when the link would point to the current view""" if not request.repos: return [] is_dir = request.pathtype == vclib.DIR # add root item items = [] root_item = _item(name=request.server.escape(request.repos.name), href=None) if request.path_parts or request.view_func is not view_directory: root_item.href = request.get_url(view_func=view_directory, where='', pathtype=vclib.DIR, params={}, escape=1) items.append(root_item) # add path part items path_parts = [] for part in request.path_parts: path_parts.append(part) is_last = len(path_parts) == len(request.path_parts) item = _item(name=request.server.escape(part), href=None) if not is_last or (is_dir and request.view_func is not view_directory): item.href = request.get_url(view_func=view_directory, where=_path_join(path_parts), pathtype=vclib.DIR, params={}, escape=1) elif not is_dir and request.view_func is not view_log: item.href = request.get_url(view_func=view_log, where=_path_join(path_parts), pathtype=vclib.FILE, params={}, escape=1) items.append(item) return items def prep_tags(request, tags): url, params = request.get_link(params={'pathrev': None}) params = compat.urlencode(params) if params: url = urllib.quote(url, _URL_SAFE_CHARS) + '?' + params + '&pathrev=' else: url = urllib.quote(url, _URL_SAFE_CHARS) + '?pathrev=' url = request.server.escape(url) links = [ ] for tag in tags: links.append(_item(name=tag.name, href=url+tag.name)) links.sort(lambda a, b: cmp(a.name, b.name)) return links def guess_mime(filename): return mimetypes.guess_type(filename)[0] def is_viewable_image(mime_type): return mime_type and mime_type in ('image/gif', 'image/jpeg', 'image/png') def is_text(mime_type): return not mime_type or mime_type[:5] == 'text/' def is_cvsroot_path(roottype, path_parts): return roottype == 'cvs' and path_parts and path_parts[0] == 'CVSROOT' def is_plain_text(mime_type): return not mime_type or mime_type == 'text/plain' def default_view(mime_type, cfg): "Determine whether file should be viewed through markup page or sent raw" # If the mime type is text/anything or a supported image format we view # through the markup page. If the mime type is something else, we send # it directly to the browser. That way users can see things like flash # animations, pdfs, word documents, multimedia, etc, which wouldn't be # very useful marked up. If the mime type is totally unknown (happens when # we encounter an unrecognized file extension) we also view it through # the markup page since that's better than sending it text/plain. if ('markup' in cfg.options.allowed_views and (is_viewable_image(mime_type) or is_text(mime_type))): return view_markup return view_checkout def is_binary_file_mime_type(mime_type, cfg): """Return True iff MIME_TYPE is set and matches one of the binary file mime type patterns in CFG.""" if mime_type: for pattern in cfg.options.binary_mime_types: if fnmatch.fnmatch(mime_type, pattern): return True return False def get_file_view_info(request, where, rev=None, mime_type=None, pathrev=-1): """Return an object holding common hrefs and a viewability flag used for various views of FILENAME at revision REV whose MIME type is MIME_TYPE. The object's members include: view_href download_href download_text_href annotate_href revision_href prefer_markup """ rev = rev and str(rev) or None mime_type = mime_type or guess_mime(where) if pathrev == -1: # cheesy default value, since we need to preserve None pathrev = request.pathrev view_href = None download_href = None download_text_href = None annotate_href = None revision_href = None if 'markup' in request.cfg.options.allowed_views: view_href = request.get_url(view_func=view_markup, where=where, pathtype=vclib.FILE, params={'revision': rev, 'pathrev': pathrev}, escape=1) if 'co' in request.cfg.options.allowed_views: download_href = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params={'revision': rev, 'pathrev': pathrev}, escape=1) if not is_plain_text(mime_type): download_text_href = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params={'content-type': 'text/plain', 'revision': rev, 'pathrev': pathrev}, escape=1) if 'annotate' in request.cfg.options.allowed_views: annotate_href = request.get_url(view_func=view_annotate, where=where, pathtype=vclib.FILE, params={'annotate': rev, 'pathrev': pathrev}, escape=1) if request.roottype == 'svn': revision_href = request.get_url(view_func=view_revision, params={'revision': rev}, escape=1) is_binary_file = is_binary_file_mime_type(mime_type, request.cfg) if is_binary_file: download_text_href = annotate_href = view_href = None prefer_markup = False else: prefer_markup = default_view(mime_type, request.cfg) == view_markup return _item(view_href=view_href, download_href=download_href, download_text_href=download_text_href, annotate_href=annotate_href, revision_href=revision_href, prefer_markup=ezt.boolean(prefer_markup)) # Matches URLs _re_rewrite_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)' '(://[-a-zA-Z0-9%.~:_/]+)((\?|\&)' '([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*' '(#([-a-zA-Z0-9%.~:_]+)?)?)') # Matches email addresses _re_rewrite_email = re.compile('([-a-zA-Z0-9_.\+]+)@' '(([-a-zA-Z0-9]+\.)+[A-Za-z]{2,4})') # Matches revision references _re_rewrite_svnrevref = re.compile(r'\b(r|rev #?|revision #?)([0-9]+)\b') class ViewVCHtmlFormatterTokens: def __init__(self, tokens): self.tokens = tokens def get_result(self, maxlen=0): """Format the tokens per the registered set of formatters, and limited to MAXLEN visible characters (or unlimited if MAXLEN is 0). Return a 3-tuple containing the formatted result string, the number of visible characters in the result string, and a boolean flag indicating whether or not S was truncated.""" out = '' out_len = 0 for token in self.tokens: chunk, chunk_len = token.converter(token.match, token.userdata, maxlen) out = out + chunk out_len = out_len + chunk_len if maxlen: maxlen = maxlen - chunk_len if maxlen <= 0: return out, out_len, 1 return out, out_len, 0 class ViewVCHtmlFormatter: """Format a string as HTML-encoded output with customizable markup rules, for example turning strings that look like URLs into anchor links. NOTE: While there might appear to be some unused portions of this interface, there is a good chance that there are consumers outside of ViewVC itself that make use of these things. """ def __init__(self): self._formatters = [] def format_url(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as linkified URL, with no more than MAXLEN characters in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum. - the number of non-HTML-tag characters returned. """ s = mobj.group(0) trunc_s = maxlen and s[:maxlen] or s return '<a href="%s">%s</a>' % (sapi.escape(s), sapi.escape(trunc_s)), \ len(trunc_s) def format_email(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as linkified email address, with no more than MAXLEN characters in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum. - the number of non-HTML-tag characters returned. """ s = mobj.group(0) trunc_s = maxlen and s[:maxlen] or s return '<a href="mailto:%s">%s</a>' % (urllib.quote(s), self._entity_encode(trunc_s)), \ len(trunc_s) def format_email_obfuscated(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an entity-encoded email address, with no more than MAXLEN characters in the non-HTML-tag bits. If MAXLEN is 0, there is no maximum. - the number of non-HTML-tag characters returned. """ s = mobj.group(0) trunc_s = maxlen and s[:maxlen] or s return self._entity_encode(trunc_s), len(trunc_s) def format_email_truncated(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an HTML-escaped truncated email address of no more than MAXLEN characters. If MAXLEN is 0, there is no maximum. - the number of characters returned. """ s = mobj.group(1) s_len = len(s) if (maxlen == 0) or (s_len < (maxlen - 1)): return self._entity_encode(s) + '&#64;&hellip;', s_len + 2 elif s_len < maxlen: return self._entity_encode(s) + '&#64;', s_len + 1 else: trunc_s = mobj.group(1)[:maxlen] return self._entity_encode(trunc_s), len(trunc_s) def format_svnrevref(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an linkified URL to a ViewVC Subversion revision view, with no more than MAXLEN characters in the non-HTML-tag portions. If MAXLEN is 0, there is no maximum. - the number of characters returned. USERDATA is a function that accepts a revision reference and returns a URL to that revision. """ s = mobj.group(0) revref = mobj.group(2) trunc_s = maxlen and s[:maxlen] or s revref_url = userdata(revref) return '<a href="%s">%s</a>' % (sapi.escape(revref_url), sapi.escape(trunc_s)), \ len(trunc_s) def format_custom_url(self, mobj, userdata, maxlen=0): """Return a 2-tuple containing: - the text represented by MatchObject MOBJ, formatted as an linkified URL created by substituting match groups 0-9 into USERDATA (which is a format string that uses \N to represent the substitution locations) and with no more than MAXLEN characters in the non-HTML-tag portions. If MAXLEN is 0, there is no maximum. - the number of characters returned. """ format = userdata text = mobj.group(0) url = format for i in range(9): try: repl = mobj.group(i) except: repl = '' url = url.replace('\%d' % (i), repl) trunc_s = maxlen and text[:maxlen] or text return '<a href="%s">%s</a>' % (sapi.escape(url), sapi.escape(trunc_s)), \ len(trunc_s) def format_text(self, s, unused, maxlen=0): """Return a 2-tuple containing: - the text S, HTML-escaped, containing no more than MAXLEN characters. If MAXLEN is 0, there is no maximum. - the number of characters returned. """ trunc_s = maxlen and s[:maxlen] or s return sapi.escape(trunc_s), len(trunc_s) def add_formatter(self, regexp, conv, userdata=None): """Register a formatter which finds instances of strings matching REGEXP, and using the function CONV and USERDATA to format them. CONV is a function which accepts three parameters: - the MatchObject which holds the string portion to be formatted, - the USERDATA object, - the maximum number of characters from that string to use for human-readable output (or 0 to indicate no maximum). """ if type(regexp) == type(''): regexp = re.compile(regexp) self._formatters.append([regexp, conv, userdata]) def get_result(self, s, maxlen=0): """Format S per the set of formatters registered with this object, and limited to MAXLEN visible characters (or unlimited if MAXLEN is 0). Return a 3-tuple containing the formatted result string, the number of visible characters in the result string, and a boolean flag indicating whether or not S was truncated. """ return self.tokenize_text(s).get_result(maxlen) def tokenize_text(self, s): """Return a ViewVCHtmlFormatterTokens object containing the tokens created when parsing the string S. Callers can use that object's get_result() function to retrieve HTML-formatted text. """ tokens = [] # We could just have a "while s:" here instead of "for line: while # line:", but for really large log messages with heavy # tokenization, the cost in both performance and memory # consumption of the approach taken was atrocious. for line in string.split(string.replace(s, '\r\n', '\n'), '\n'): line = line + '\n' while line: best_match = best_conv = best_userdata = None for test in self._formatters: match = test[0].search(line) # If we find and match and (a) its our first one, or (b) it # matches text earlier than our previous best match, or (c) it # matches text at the same location as our previous best match # but extends to cover more text than that match, then this is # our new best match. # # Implied here is that when multiple formatters match exactly # the same text, the first formatter in the registration list wins. if match \ and ((best_match is None) \ or (match.start() < best_match.start()) or ((match.start() == best_match.start()) \ and (match.end() > best_match.end()))): best_match = match best_conv = test[1] best_userdata = test[2] # If we found a match... if best_match: # ... add any non-matching stuff first, then the matching bit. start = best_match.start() end = best_match.end() if start > 0: tokens.append(_item(match=line[:start], converter=self.format_text, userdata=None)) tokens.append(_item(match=best_match, converter=best_conv, userdata=best_userdata)) line = line[end:] else: # Otherwise, just add the rest of the string. tokens.append(_item(match=line, converter=self.format_text, userdata=None)) line = '' return ViewVCHtmlFormatterTokens(tokens) def _entity_encode(self, s): return string.join(map(lambda x: '&#%d;' % (ord(x)), s), '') class LogFormatter: def __init__(self, request, log): self.request = request self.log = log or '' self.tokens = None self.cache = {} # (maxlen, htmlize) => resulting_log def get(self, maxlen=0, htmlize=1): cfg = self.request.cfg # Prefer the cache. if self.cache.has_key((maxlen, htmlize)): return self.cache[(maxlen, htmlize)] # If we are HTML-izing... if htmlize: # ...and we don't yet have ViewVCHtmlFormatter() object tokens... if not self.tokens: # ... then get them. lf = ViewVCHtmlFormatter() # Rewrite URLs. lf.add_formatter(_re_rewrite_url, lf.format_url) # Rewrite Subversion revision references. if self.request.roottype == 'svn': def revision_to_url(rev): return self.request.get_url(view_func=view_revision, params={'revision': rev}, escape=0) lf.add_formatter(_re_rewrite_svnrevref, lf.format_svnrevref, revision_to_url) # Rewrite email addresses. if cfg.options.mangle_email_addresses == 2: lf.add_formatter(_re_rewrite_email, lf.format_email_truncated) elif cfg.options.mangle_email_addresses == 1: lf.add_formatter(_re_rewrite_email, lf.format_email_obfuscated) else: lf.add_formatter(_re_rewrite_email, lf.format_email) # Add custom rewrite handling per configuration. for rule in cfg.options.custom_log_formatting: rule = rule.replace('\\:', '\x01') regexp, format = map(lambda x: x.strip(), rule.split(':', 1)) regexp = regexp.replace('\x01', ':') format = format.replace('\x01', ':') lf.add_formatter(re.compile(regexp), lf.format_custom_url, format) # Tokenize the log message. self.tokens = lf.tokenize_text(self.log) # Use our formatter to ... you know ... format. log, log_len, truncated = self.tokens.get_result(maxlen) result_log = log + (truncated and '&hellip;' or '') # But if we're not HTML-izing... else: # ...then do much more simplistic transformations as necessary. log = self.log if cfg.options.mangle_email_addresses == 2: log = re.sub(_re_rewrite_email, r'\1@...', log) result_log = maxlen and log[:maxlen] or log # In either case, populate the cache and return the results. self.cache[(maxlen, htmlize)] = result_log return result_log _time_desc = { 1 : 'second', 60 : 'minute', 3600 : 'hour', 86400 : 'day', 604800 : 'week', 2628000 : 'month', 31536000 : 'year', } def get_time_text(request, interval, num): "Get some time text, possibly internationalized." ### some languages have even harder pluralization rules. we'll have to ### deal with those on demand if num == 0: return '' text = _time_desc[interval] if num == 1: attr = text + '_singular' fmt = '%d ' + text else: attr = text + '_plural' fmt = '%d ' + text + 's' try: fmt = getattr(request.kv.i18n.time, attr) except AttributeError: pass return fmt % num def little_time(request): try: return request.kv.i18n.time.little_time except AttributeError: return 'very little time' def html_time(request, secs, extended=0): secs = long(time.time()) - secs if secs < 2: return little_time(request) breaks = _time_desc.keys() breaks.sort() i = 0 while i < len(breaks): if secs < 2 * breaks[i]: break i = i + 1 value = breaks[i - 1] s = get_time_text(request, value, secs / value) if extended and i > 1: secs = secs % value value = breaks[i - 2] ext = get_time_text(request, value, secs / value) if ext: ### this is not i18n compatible. pass on it for now s = s + ', ' + ext return s def common_template_data(request, revision=None, mime_type=None): """Return a ezt.TemplateData instance with data dictionary items common to most ViewVC views.""" cfg = request.cfg # Initialize data dictionary members (sorted alphanumerically) data = ezt.TemplateData({ 'annotate_href' : None, 'cfg' : cfg, 'docroot' : cfg.options.docroot is None \ and request.script_name + '/' + docroot_magic_path \ or cfg.options.docroot, 'download_href' : None, 'download_text_href' : None, 'graph_href': None, 'kv' : request.kv, 'lockinfo' : None, 'log_href' : None, 'nav_path' : nav_path(request), 'pathtype' : None, 'prefer_markup' : ezt.boolean(0), 'queryform_href' : None, 'rev' : None, 'revision_href' : None, 'rootname' : request.rootname \ and request.server.escape(request.rootname) or None, 'rootpath' : request.rootpath, 'roots_href' : None, 'roottype' : request.roottype, 'rss_href' : None, 'tarball_href' : None, 'up_href' : None, 'username' : request.username, 'view' : _view_codes[request.view_func], 'view_href' : None, 'vsn' : __version__, 'where' : request.server.escape(request.where), }) rev = revision if not rev: rev = request.query_dict.get('annotate') if not rev: rev = request.query_dict.get('revision') if not rev and request.roottype == 'svn': rev = request.query_dict.get('pathrev') try: data['rev'] = hasattr(request.repos, '_getrev') \ and request.repos._getrev(rev) or rev except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') if request.pathtype == vclib.DIR: data['pathtype'] = 'dir' elif request.pathtype == vclib.FILE: data['pathtype'] = 'file' if request.path_parts: dir = _path_join(request.path_parts[:-1]) data['up_href'] = request.get_url(view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1) if 'roots' in cfg.options.allowed_views: data['roots_href'] = request.get_url(view_func=view_roots, escape=1, params={}) if request.pathtype == vclib.FILE: fvi = get_file_view_info(request, request.where, data['rev'], mime_type) data['view_href'] = fvi.view_href data['download_href'] = fvi.download_href data['download_text_href'] = fvi.download_text_href data['annotate_href'] = fvi.annotate_href data['revision_href'] = fvi.revision_href data['prefer_markup'] = fvi.prefer_markup data['log_href'] = request.get_url(view_func=view_log, params={}, escape=1) if request.roottype == 'cvs' and cfg.options.use_cvsgraph: data['graph_href'] = request.get_url(view_func=view_cvsgraph, params={}, escape=1) file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {}) def _only_this_file(item): return item.name == request.path_parts[-1] entries = filter(_only_this_file, file_data) if len(entries) == 1: request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {}) data['lockinfo'] = entries[0].lockinfo elif request.pathtype == vclib.DIR: data['view_href'] = request.get_url(view_func=view_directory, params={}, escape=1) if 'tar' in cfg.options.allowed_views: data['tarball_href'] = request.get_url(view_func=download_tarball, params={}, escape=1) if request.roottype == 'svn': data['revision_href'] = request.get_url(view_func=view_revision, params={'revision': data['rev']}, escape=1) data['log_href'] = request.get_url(view_func=view_log, params={}, escape=1) if is_querydb_nonempty_for_root(request): if request.pathtype == vclib.DIR: params = {} if request.roottype == 'cvs' and request.pathrev: params['branch'] = request.pathrev data['queryform_href'] = request.get_url(view_func=view_queryform, params=params, escape=1) data['rss_href'] = request.get_url(view_func=view_query, params={'date': 'month', 'format': 'rss'}, escape=1) elif request.pathtype == vclib.FILE: parts = _path_parts(request.where) where = _path_join(parts[:-1]) data['rss_href'] = request.get_url(view_func=view_query, where=where, pathtype=request.pathtype, params={'date': 'month', 'format': 'rss', 'file': parts[-1], 'file_match': 'exact'}, escape=1) return data def retry_read(src, reqlen=CHUNK_SIZE): while 1: chunk = src.read(CHUNK_SIZE) if not chunk: # need to check for eof methods because the cStringIO file objects # returned by ccvs don't provide them if hasattr(src, 'eof') and src.eof() is None: time.sleep(1) continue return chunk def copy_stream(src, dst, htmlize=0): while 1: chunk = retry_read(src) if not chunk: break if htmlize: chunk = sapi.escape(chunk) dst.write(chunk) class MarkupPipeWrapper: """An EZT callback that outputs a filepointer, plus some optional pre- and post- text.""" def __init__(self, fp, pretext=None, posttext=None, htmlize=0): self.fp = fp self.pretext = pretext self.posttext = posttext self.htmlize = htmlize def __call__(self, ctx): if self.pretext: ctx.fp.write(self.pretext) copy_stream(self.fp, ctx.fp, self.htmlize) self.fp.close() if self.posttext: ctx.fp.write(self.posttext) _re_rewrite_escaped_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)' '(://[-a-zA-Z0-9%.~:_/]+)' '((\?|\&amp;amp;|\&amp;|\&)' '([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*' '(#([-a-zA-Z0-9%.~:_]+)?)?)') def markup_escaped_urls(s): # Return a copy of S with all URL references -- which are expected # to be already HTML-escaped -- wrapped in <a href=""></a>. def _url_repl(match_obj): url = match_obj.group(0) unescaped_url = string.replace(url, "&amp;amp;", "&amp;") return "<a href=\"%s\">%s</a>" % (unescaped_url, url) return re.sub(_re_rewrite_escaped_url, _url_repl, s) def detect_encoding(text_block): """Return the encoding used by TEXT_BLOCK as detected by the chardet Python module. (Currently, this is used only when syntax highlighting is not enabled/available; otherwise, Pygments does this work for us.)""" # Does the TEXT_BLOCK start with a BOM? for bom, encoding in [('\xef\xbb\xbf', 'utf-8'), ('\xff\xfe', 'utf-16'), ('\xfe\xff', 'utf-16be'), ('\xff\xfe\0\0', 'utf-32'), ('\0\0\xfe\xff', 'utf-32be'), ]: if text_block[:len(bom)] == bom: return encoding # If no recognized BOM, see if chardet can help us. try: import chardet # If chardet can confidently claimed a match, we'll use its # findings. (And if that match is 'ascii' -- which is a subset of # utf-8 -- we'll just call it 'utf-8' and score a zero transform.) resp = chardet.detect(text_block) if resp.get('confidence') == 1.0: encoding = resp.get('encoding') if encoding is "ascii": encoding = "utf-8" return encoding except: pass # By default ... we have no idea. return None def transcode_text(text, encoding=None): """If ENCODING is provided and not 'utf-8', transcode TEXT from ENCODING to UTF-8.""" if not encoding or encoding == 'utf-8': return text try: return unicode(text, encoding, 'replace').encode('utf-8', 'replace') except: pass return text def markup_stream(request, cfg, blame_data, file_lines, filename, mime_type, encoding, colorize): """Return the contents of a versioned file as a list of vclib.Annotation objects, each representing one line of the file's contents. Use BLAME_DATA as the annotation information for the file if provided. Use FILE_LINES as the lines of file content text themselves. MIME_TYPE is the MIME content type of the file; ENCODING is its character encoding. If COLORIZE is true, attempt to apply syntax coloration to the file contents, and use the HTML-marked-up results as the text in the return vclib.Annotation objects.""" # Nothing to mark up? So be it. if not file_lines: return [] # Determine if we should (and can) use Pygments to highlight our # output. Reasons not to include a) being told not to by the # configuration, b) not being able to import the Pygments modules, # and c) Pygments not having a lexer for our file's format. pygments_lexer = None if colorize: from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import ClassNotFound, \ get_lexer_by_name, \ get_lexer_for_mimetype, \ get_lexer_for_filename, \ guess_lexer if not encoding: encoding = 'guess' if cfg.options.detect_encoding: try: import chardet encoding = 'chardet' except (SyntaxError, ImportError): pass # First, see if there's a Pygments lexer associated with MIME_TYPE. if mime_type: try: pygments_lexer = get_lexer_for_mimetype(mime_type, encoding=encoding, tabsize=cfg.options.tabsize, stripnl=False) except ClassNotFound: pygments_lexer = None # If we've no lexer thus far, try to find one based on the FILENAME. if not pygments_lexer: try: pygments_lexer = get_lexer_for_filename(filename, encoding=encoding, tabsize=cfg.options.tabsize, stripnl=False) except ClassNotFound: pygments_lexer = None # Still no lexer? If we've reason to believe this is a text # file, try to guess the lexer based on the file's content. if not pygments_lexer and is_text(mime_type) and file_lines: try: pygments_lexer = guess_lexer(file_lines[0], encoding=encoding, tabsize=cfg.options.tabsize, stripnl=False) except ClassNotFound: pygments_lexer = None # If we aren't highlighting, just return an amalgamation of the # BLAME_DATA (if any) and the FILE_LINES. if not pygments_lexer: # If allowed by configuration, try to detect the source encoding # for this file. We'll assemble a block of data from the file # contents to do so... 1024 bytes should be enough. if not encoding and cfg.options.detect_encoding: block_size = 0 text_block = '' for i in range(len(file_lines)): text_block = text_block + file_lines[i] if len(text_block) >= 1024: break encoding = detect_encoding(text_block) # Built output data comprised of marked-up and possibly-transcoded # source text lines wrapped in (possibly dummy) vclib.Annotation # objects. lines = [] file_lines = transcode_text(string.join(file_lines, ''), encoding) if file_lines[-1] == '\n': file_lines = file_lines[:-1] file_lines = string.split(file_lines, '\n') for i in range(len(file_lines)): line = file_lines[i] if cfg.options.tabsize > 0: line = string.expandtabs(line, cfg.options.tabsize) line = markup_escaped_urls(sapi.escape(line)) if blame_data: blame_item = blame_data[i] blame_item.text = line else: blame_item = vclib.Annotation(line, i + 1, None, None, None, None) blame_item.diff_href = None lines.append(blame_item) return lines # If we get here, we're highlighting something. class PygmentsSink: def __init__(self, blame_data): if blame_data: self.has_blame_data = 1 self.blame_data = blame_data else: self.has_blame_data = 0 self.blame_data = [] self.line_no = 0 def write(self, buf): ### FIXME: Don't bank on write() being called once per line buf = markup_escaped_urls(string.rstrip(buf, '\n\r')) if self.has_blame_data: self.blame_data[self.line_no].text = buf else: item = vclib.Annotation(buf, self.line_no + 1, None, None, None, None) item.diff_href = None self.blame_data.append(item) self.line_no = self.line_no + 1 ps = PygmentsSink(blame_data) highlight(string.join(file_lines, ''), pygments_lexer, HtmlFormatter(nowrap=True, classprefix="pygments-", encoding='utf-8'), ps) return ps.blame_data def make_time_string(date, cfg): """Returns formatted date string in either local time or UTC. The passed in 'date' variable is seconds since epoch. """ if date is None: return None if cfg.options.use_localtime: tm = time.localtime(date) else: tm = time.gmtime(date) if cfg.options.iso8601_timestamps: if cfg.options.use_localtime: if tm[8] and time.daylight: tz = -time.altzone else: tz = -time.timezone tz = float(tz) / 3600.0 tz = string.replace(str.format('{0:+06.2f}', tz), '.', ':') else: tz = 'Z' return time.strftime('%Y-%m-%dT%H:%M:%S', tm) + tz else: return time.asctime(tm) + ' ' + \ (cfg.options.use_localtime and time.tzname[tm[8]] or 'UTC') def make_rss_time_string(date, cfg): """Returns formatted date string in UTC, formatted for RSS. The passed in 'date' variable is seconds since epoch. """ if date is None: return None return time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(date)) + ' UTC' def make_comma_sep_list_string(items): return string.join(map(lambda x: x.name, items), ', ') def get_itemprops(request, path_parts, rev): itemprops = request.repos.itemprops(path_parts, rev) propnames = itemprops.keys() propnames.sort() props = [] for name in propnames: lf = LogFormatter(request, itemprops[name]) value = lf.get(maxlen=0, htmlize=1) undisplayable = ezt.boolean(0) # skip non-utf8 property names try: unicode(name, 'utf8') except: continue # note non-utf8 property values try: unicode(value, 'utf8') except: value = None undisplayable = ezt.boolean(1) props.append(_item(name=name, value=value, undisplayable=undisplayable)) return props def parse_mime_type(mime_type): mime_parts = map(lambda x: x.strip(), string.split(mime_type, ';')) type_subtype = mime_parts[0].lower() parameters = {} for part in mime_parts[1:]: name, value = string.split(part, '=', 1) parameters[name] = value return type_subtype, parameters def calculate_mime_type(request, path_parts, rev): """Return a 2-tuple carrying the MIME content type and character encoding for the file represented by PATH_PARTS in REV. Use REQUEST for repository access as necessary.""" if not path_parts: return None, None mime_type = encoding = None if request.roottype == 'svn' \ and (not request.cfg.options.svn_ignore_mimetype): try: itemprops = request.repos.itemprops(path_parts, rev) mime_type = itemprops.get('svn:mime-type') if mime_type: mime_type, parameters = parse_mime_type(mime_type) return mime_type, parameters.get('charset') except: pass return guess_mime(path_parts[-1]), None def assert_viewable_filesize(cfg, filesize): if cfg.options.max_filesize_kbytes \ and filesize != -1 \ and filesize > (1024 * cfg.options.max_filesize_kbytes): raise debug.ViewVCException('Display of files larger than %d KB ' 'disallowed by configuration' % (cfg.options.max_filesize_kbytes), '403 Forbidden') def markup_or_annotate(request, is_annotate): cfg = request.cfg path, rev = _orig_path(request, is_annotate and 'annotate' or 'revision') lines = fp = image_src_href = None annotation = 'none' revision = None mime_type, encoding = calculate_mime_type(request, path, rev) # Is this display blocked by 'binary_mime_types' configuration? if is_binary_file_mime_type(mime_type, cfg): raise debug.ViewVCException('Display of binary file content disabled ' 'by configuration', '403 Forbidden') # Is this a viewable image type? if is_viewable_image(mime_type) \ and 'co' in cfg.options.allowed_views: fp, revision = request.repos.openfile(path, rev, {}) fp.close() if check_freshness(request, None, revision, weak=1): return if is_annotate: annotation = 'binary' image_src_href = request.get_url(view_func=view_checkout, params={'revision': rev}, escape=1) # Not a viewable image. else: filesize = request.repos.filesize(path, rev) # If configuration disallows display of large files, try to honor # that request. assert_viewable_filesize(cfg, filesize) # If this was an annotation request, try to annotate this file. # If something goes wrong, that's okay -- we'll gracefully revert # to a plain markup display. blame_data = None if is_annotate: try: blame_source, revision = request.repos.annotate(path, rev, False) if check_freshness(request, None, revision, weak=1): return # Create BLAME_DATA list from BLAME_SOURCE, adding diff_href # items to each relevant "line". blame_data = [] for item in blame_source: item.diff_href = None if item.prev_rev: item.diff_href = request.get_url(view_func=view_diff, params={'r1': item.prev_rev, 'r2': item.rev}, escape=1, partial=1) blame_data.append(item) annotation = 'annotated' except vclib.NonTextualFileContents: annotation = 'binary' except: annotation = 'error' # Grab the file contents. fp, revision = request.repos.openfile(path, rev, {'cvs_oldkeywords' : 1}) if check_freshness(request, None, revision, weak=1): fp.close() return # If we're limiting by filesize but couldn't pull off the cheap # check above, we'll try to do so line by line here (while # building our file_lines array). if cfg.options.max_filesize_kbytes and filesize == -1: file_lines = [] filesize = 0 while 1: line = fp.readline() if not line: break filesize = filesize + len(line) assert_viewable_filesize(cfg, filesize) file_lines.append(line) else: file_lines = fp.readlines() fp.close() # Do we have a differing number of file content lines and # annotation items? That's no good. Call it an error and don't # bother attempting the annotation display. if blame_data and (len(file_lines) != len(blame_data)): annotation = 'error' blame_data = None # Try to markup the file contents/annotation. If we get an error # and we were colorizing the stream, try once more without the # colorization enabled. colorize = cfg.options.enable_syntax_coloration try: lines = markup_stream(request, cfg, blame_data, file_lines, path[-1], mime_type, encoding, colorize) except: if colorize: lines = markup_stream(request, cfg, blame_data, file_lines, path[-1], mime_type, encoding, False) else: raise debug.ViewVCException('Error displaying file contents', '500 Internal Server Error') data = common_template_data(request, revision, mime_type) data.merge(ezt.TemplateData({ 'mime_type' : mime_type, 'log' : None, 'date' : None, 'ago' : None, 'author' : None, 'branches' : None, 'tags' : None, 'branch_points' : None, 'changed' : None, 'size' : None, 'state' : None, 'vendor_branch' : None, 'prev' : None, 'orig_path' : None, 'orig_href' : None, 'image_src_href' : image_src_href, 'lines' : lines, 'properties' : get_itemprops(request, path, rev), 'annotation' : annotation, })) if cfg.options.show_log_in_markup: options = { 'svn_latest_log': 1, ### FIXME: Use of this magical value is uncool. 'svn_cross_copies': 1, } revs = request.repos.itemlog(path, revision, vclib.SORTBY_REV, 0, 1, options) entry = revs[-1] lf = LogFormatter(request, entry.log) data['date'] = make_time_string(entry.date, cfg) data['author'] = entry.author data['changed'] = entry.changed data['log'] = lf.get(maxlen=0, htmlize=1) data['size'] = entry.size if entry.date is not None: data['ago'] = html_time(request, entry.date, 1) if request.roottype == 'cvs': branch = entry.branch_number prev = entry.prev or entry.parent data['state'] = entry.dead and 'dead' data['prev'] = prev and prev.string data['vendor_branch'] = ezt.boolean(branch and branch[2] % 2 == 1) ### TODO: Should this be using prep_tags() instead? data['branches'] = make_comma_sep_list_string(entry.branches) data['tags'] = make_comma_sep_list_string(entry.tags) data['branch_points']= make_comma_sep_list_string(entry.branch_points) if path != request.path_parts: orig_path = _path_join(path) data['orig_path'] = orig_path data['orig_href'] = request.get_url(view_func=view_log, where=orig_path, pathtype=vclib.FILE, params={'pathrev': revision}, escape=1) generate_page(request, "file", data) def view_markup(request): if 'markup' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Markup view is disabled', '403 Forbidden') if request.pathtype != vclib.FILE: raise debug.ViewVCException('Unsupported feature: markup view on ' 'directory', '400 Bad Request') markup_or_annotate(request, 0) def view_annotate(request): if 'annotate' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Annotation view is disabled', '403 Forbidden') if request.pathtype != vclib.FILE: raise debug.ViewVCException('Unsupported feature: annotate view on ' 'directory', '400 Bad Request') markup_or_annotate(request, 1) def revcmp(rev1, rev2): rev1 = map(int, string.split(rev1, '.')) rev2 = map(int, string.split(rev2, '.')) return cmp(rev1, rev2) def sort_file_data(file_data, roottype, sortdir, sortby, group_dirs): # convert sortdir into a sign bit s = sortdir == "down" and -1 or 1 # in cvs, revision numbers can't be compared meaningfully between # files, so try to do the right thing and compare dates instead if roottype == "cvs" and sortby == "rev": sortby = "date" def file_sort_sortby(file1, file2, sortby): # sort according to sortby if sortby == 'rev': return s * revcmp(file1.rev, file2.rev) elif sortby == 'date': return s * cmp(file2.date, file1.date) # latest date is first elif sortby == 'log': return s * cmp(file1.log, file2.log) elif sortby == 'author': return s * cmp(file1.author, file2.author) return s * cmp(file1.name, file2.name) def file_sort_cmp(file1, file2, sortby=sortby, group_dirs=group_dirs, s=s): # if we're grouping directories together, sorting is pretty # simple. a directory sorts "higher" than a non-directory, and # two directories are sorted as normal. if group_dirs: if file1.kind == vclib.DIR: if file2.kind == vclib.DIR: # two directories, no special handling. return file_sort_sortby(file1, file2, sortby) else: # file1 is a directory, it sorts first. return -1 elif file2.kind == vclib.DIR: # file2 is a directory, it sorts first. return 1 # we should have data on these. if not, then it is because we requested # a specific tag and that tag is not present on the file. if file1.rev is not None and file2.rev is not None: return file_sort_sortby(file1, file2, sortby) elif file1.rev is not None: return -1 elif file2.rev is not None: return 1 # sort by file name return s * cmp(file1.name, file2.name) file_data.sort(file_sort_cmp) def icmp(x, y): """case insensitive comparison""" return cmp(string.lower(x), string.lower(y)) def view_roots(request): if 'roots' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Root listing view is disabled', '403 Forbidden') # add in the roots for the selection roots = [] expand_root_parents(request.cfg) allroots = list_roots(request) if len(allroots): rootnames = allroots.keys() rootnames.sort(icmp) for rootname in rootnames: root_path, root_type, lastmod = allroots[rootname] href = request.get_url(view_func=view_directory, where='', pathtype=vclib.DIR, params={'root': rootname}, escape=1) if root_type == vclib.SVN: log_href = request.get_url(view_func=view_log, where='', pathtype=vclib.DIR, params={'root': rootname}, escape=1) else: log_href = None roots.append(_item(name=request.server.escape(rootname), type=root_type, path=root_path, author=lastmod and lastmod.author or None, ago=lastmod and lastmod.ago or None, date=lastmod and lastmod.date or None, log=lastmod and lastmod.log or None, short_log=lastmod and lastmod.short_log or None, rev=lastmod and lastmod.rev or None, href=href, log_href=log_href)) data = common_template_data(request) data.merge(ezt.TemplateData({ 'roots' : roots, })) generate_page(request, "roots", data) def view_directory(request): cfg = request.cfg # For Subversion repositories, the revision acts as a weak validator for # the directory listing (to take into account template changes or # revision property changes). if request.roottype == 'svn': try: rev = request.repos._getrev(request.pathrev) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') tree_rev = request.repos.created_rev(request.where, rev) if check_freshness(request, None, str(tree_rev), weak=1): return # List current directory options = {} if request.roottype == 'cvs': hideattic = int(request.query_dict.get('hideattic', cfg.options.hide_attic)) options["cvs_subdirs"] = (cfg.options.show_subdir_lastmod and cfg.options.show_logs) file_data = request.repos.listdir(request.path_parts, request.pathrev, options) # sort with directories first, and using the "sortby" criteria sortby = request.query_dict.get('sortby', cfg.options.sort_by) or 'file' sortdir = request.query_dict.get('sortdir', 'up') # when paging and sorting by filename, we can greatly improve # performance by "cheating" -- first, we sort (we already have the # names), then we just fetch dirlogs for the needed entries. # however, when sorting by other properties or not paging, we've no # choice but to fetch dirlogs for everything. debug.t_start("dirlogs") if cfg.options.dir_pagesize and sortby == 'file': dirlogs_first = int(request.query_dict.get('dir_pagestart', 0)) if dirlogs_first > len(file_data): dirlogs_first = 0 dirlogs_last = dirlogs_first + cfg.options.dir_pagesize for file in file_data: file.rev = None file.date = None file.log = None file.author = None file.size = None file.lockinfo = None file.dead = None sort_file_data(file_data, request.roottype, sortdir, sortby, cfg.options.sort_group_dirs) # request dirlogs only for the slice of files in "this page" request.repos.dirlogs(request.path_parts, request.pathrev, file_data[dirlogs_first:dirlogs_last], options) else: request.repos.dirlogs(request.path_parts, request.pathrev, file_data, options) sort_file_data(file_data, request.roottype, sortdir, sortby, cfg.options.sort_group_dirs) debug.t_end("dirlogs") # If a regex is specified, build a compiled form thereof for filtering searchstr = None search_re = request.query_dict.get('search', '') if cfg.options.use_re_search and search_re: searchstr = re.compile(search_re) # loop through entries creating rows and changing these values rows = [ ] num_displayed = 0 num_dead = 0 # set some values to be used inside loop where = request.where where_prefix = where and where + '/' for file in file_data: row = _item(author=None, log=None, short_log=None, state=None, size=None, log_file=None, log_rev=None, graph_href=None, mime_type=None, date=None, ago=None, view_href=None, log_href=None, revision_href=None, annotate_href=None, download_href=None, download_text_href=None, prefer_markup=ezt.boolean(0)) if request.roottype == 'cvs' and file.absent: continue if cfg.options.hide_errorful_entries and file.errors: continue row.rev = file.rev row.author = file.author row.state = (request.roottype == 'cvs' and file.dead) and 'dead' or '' if file.date is not None: row.date = make_time_string(file.date, cfg) row.ago = html_time(request, file.date) if cfg.options.show_logs: debug.t_start("dirview_logformat") lf = LogFormatter(request, file.log) row.log = lf.get(maxlen=0, htmlize=1) row.short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=1) debug.t_end("dirview_logformat") row.lockinfo = file.lockinfo row.anchor = request.server.escape(file.name) row.name = request.server.escape(file.name) row.pathtype = (file.kind == vclib.FILE and 'file') or \ (file.kind == vclib.DIR and 'dir') row.errors = file.errors if file.kind == vclib.DIR: if cfg.options.hide_cvsroot \ and is_cvsroot_path(request.roottype, request.path_parts + [file.name]): continue row.view_href = request.get_url(view_func=view_directory, where=where_prefix+file.name, pathtype=vclib.DIR, params={}, escape=1) if request.roottype == 'svn': row.revision_href = request.get_url(view_func=view_revision, params={'revision': file.rev}, escape=1) if request.roottype == 'cvs' and file.rev is not None: row.rev = None if cfg.options.show_logs: row.log_file = file.newest_file row.log_rev = file.rev if request.roottype == 'svn': row.log_href = request.get_url(view_func=view_log, where=where_prefix + file.name, pathtype=vclib.DIR, params={}, escape=1) elif file.kind == vclib.FILE: if searchstr is not None: if request.roottype == 'cvs' and (file.errors or file.dead): continue if not search_file(request.repos, request.path_parts + [file.name], request.pathrev, searchstr): continue if request.roottype == 'cvs' and file.dead: num_dead = num_dead + 1 if hideattic: continue num_displayed = num_displayed + 1 file_where = where_prefix + file.name if request.roottype == 'svn': row.size = file.size row.mime_type, encoding = calculate_mime_type(request, _path_parts(file_where), file.rev) fvi = get_file_view_info(request, file_where, file.rev, row.mime_type) row.view_href = fvi.view_href row.download_href = fvi.download_href row.download_text_href = fvi.download_text_href row.annotate_href = fvi.annotate_href row.revision_href = fvi.revision_href row.prefer_markup = fvi.prefer_markup row.log_href = request.get_url(view_func=view_log, where=file_where, pathtype=vclib.FILE, params={}, escape=1) if cfg.options.use_cvsgraph and request.roottype == 'cvs': row.graph_href = request.get_url(view_func=view_cvsgraph, where=file_where, pathtype=vclib.FILE, params={}, escape=1) rows.append(row) # Prepare the data that will be passed to the template, based on the # common template data. data = common_template_data(request) data.merge(ezt.TemplateData({ 'entries' : rows, 'sortby' : sortby, 'sortdir' : sortdir, 'search_re' : request.server.escape(search_re), 'dir_pagestart' : None, 'sortby_file_href' : request.get_url(params={'sortby': 'file', 'sortdir': None}, escape=1), 'sortby_rev_href' : request.get_url(params={'sortby': 'rev', 'sortdir': None}, escape=1), 'sortby_date_href' : request.get_url(params={'sortby': 'date', 'sortdir': None}, escape=1), 'sortby_author_href' : request.get_url(params={'sortby': 'author', 'sortdir': None}, escape=1), 'sortby_log_href' : request.get_url(params={'sortby': 'log', 'sortdir': None}, escape=1), 'files_shown' : num_displayed, 'num_dead' : num_dead, 'youngest_rev' : None, 'youngest_rev_href' : None, 'selection_form' : None, 'attic_showing' : None, 'show_attic_href' : None, 'hide_attic_href' : None, 'branch_tags': None, 'plain_tags': None, 'properties': get_itemprops(request, request.path_parts, request.pathrev), 'tree_rev' : None, 'tree_rev_href' : None, 'dir_paging_action' : None, 'dir_paging_hidden_values' : [], 'search_re_action' : None, 'search_re_hidden_values' : [], # Populated by paging()/paging_sws() 'picklist' : [], 'picklist_len' : 0, # Populated by pathrev_form() 'pathrev_action' : None, 'pathrev_hidden_values' : [], 'pathrev_clear_action' : None, 'pathrev_clear_hidden_values' : [], 'pathrev' : None, 'lastrev' : None, })) # clicking on sort column reverses sort order if sortdir == 'down': revsortdir = None # 'up' else: revsortdir = 'down' if sortby in ['file', 'rev', 'date', 'log', 'author']: data['sortby_%s_href' % sortby] = request.get_url(params={'sortdir': revsortdir}, escape=1) # CVS doesn't support sorting by rev if request.roottype == "cvs": data['sortby_rev_href'] = None # set cvs-specific fields if request.roottype == 'cvs': plain_tags = options['cvs_tags'] plain_tags.sort(icmp) plain_tags.reverse() data['plain_tags']= plain_tags branch_tags = options['cvs_branches'] branch_tags.sort(icmp) branch_tags.reverse() data['branch_tags']= branch_tags data['attic_showing'] = ezt.boolean(not hideattic) data['show_attic_href'] = request.get_url(params={'hideattic': 0}, escape=1) data['hide_attic_href'] = request.get_url(params={'hideattic': 1}, escape=1) # set svn-specific fields elif request.roottype == 'svn': data['tree_rev'] = tree_rev data['tree_rev_href'] = request.get_url(view_func=view_revision, params={'revision': tree_rev}, escape=1) data['youngest_rev'] = request.repos.get_youngest_revision() data['youngest_rev_href'] = request.get_url(view_func=view_revision, params={}, escape=1) if cfg.options.dir_pagesize: data['dir_paging_action'], data['dir_paging_hidden_values'] = \ request.get_form(params={'dir_pagestart': None}) pathrev_form(request, data) if cfg.options.use_re_search: data['search_re_action'], data['search_re_hidden_values'] = \ request.get_form(params={'search': None}) if cfg.options.dir_pagesize: data['dir_pagestart'] = int(request.query_dict.get('dir_pagestart',0)) data['entries'] = paging(data, 'entries', data['dir_pagestart'], 'name', cfg.options.dir_pagesize) generate_page(request, "directory", data) def paging(data, key, pagestart, local_name, pagesize): # Implement paging # Create the picklist picklist = data['picklist'] = [] for i in range(0, len(data[key]), pagesize): pick = _item(start=None, end=None, count=None, more=ezt.boolean(0)) pick.start = getattr(data[key][i], local_name) pick.count = i pick.page = (i / pagesize) + 1 try: pick.end = getattr(data[key][i+pagesize-1], local_name) except IndexError: pick.end = getattr(data[key][-1], local_name) picklist.append(pick) data['picklist_len'] = len(picklist) # Need to fix # pagestart can be greater than the length of data[key] if you # select a tag or search while on a page other than the first. # Should reset to the first page, this test won't do that every # time that it is needed. # Problem might go away if we don't hide non-matching files when # selecting for tags or searching. if pagestart > len(data[key]): pagestart = 0 pageend = pagestart + pagesize # Slice return data[key][pagestart:pageend] def paging_sws(data, key, pagestart, local_name, pagesize, extra_pages, offset): """Implement sliding window-style paging.""" # Create the picklist last_requested = pagestart + (extra_pages * pagesize) picklist = data['picklist'] = [] has_more = ezt.boolean(0) for i in range(0, len(data[key]), pagesize): pick = _item(start=None, end=None, count=None, more=ezt.boolean(0)) pick.start = getattr(data[key][i], local_name) pick.count = offset + i pick.page = (pick.count / pagesize) + 1 try: pick.end = getattr(data[key][i+pagesize-1], local_name) except IndexError: pick.end = getattr(data[key][-1], local_name) picklist.append(pick) if pick.count >= last_requested: pick.more = ezt.boolean(1) break data['picklist_len'] = len(picklist) first = pagestart - offset # FIXME: first can be greater than the length of data[key] if # you select a tag or search while on a page other than the first. # Should reset to the first page, but this test won't do that every # time that it is needed. Problem might go away if we don't hide # non-matching files when selecting for tags or searching. if first > len(data[key]): pagestart = 0 pageend = first + pagesize # Slice return data[key][first:pageend] def pathrev_form(request, data): lastrev = None if request.roottype == 'svn': data['pathrev_action'], data['pathrev_hidden_values'] = \ request.get_form(view_func=redirect_pathrev, params={'pathrev': None, 'orig_path': request.where, 'orig_pathtype': request.pathtype, 'orig_pathrev': request.pathrev, 'orig_view': _view_codes.get(request.view_func)}) if request.pathrev: youngest = request.repos.get_youngest_revision() lastrev = request.repos.last_rev(request.where, request.pathrev, youngest)[0] if lastrev == youngest: lastrev = None data['pathrev'] = request.pathrev data['lastrev'] = lastrev action, hidden_values = request.get_form(params={'pathrev': lastrev}) if request.roottype != 'svn': data['pathrev_action'] = action data['pathrev_hidden_values'] = hidden_values data['pathrev_clear_action'] = action data['pathrev_clear_hidden_values'] = hidden_values return lastrev def redirect_pathrev(request): assert request.roottype == 'svn' new_pathrev = request.query_dict.get('pathrev') or None path = request.query_dict.get('orig_path', '') pathtype = request.query_dict.get('orig_pathtype') pathrev = request.query_dict.get('orig_pathrev') view = _views.get(request.query_dict.get('orig_view')) youngest = request.repos.get_youngest_revision() # go out of the way to allow revision numbers higher than youngest try: new_pathrev = int(new_pathrev) except ValueError: new_pathrev = youngest except TypeError: pass else: if new_pathrev > youngest: new_pathrev = youngest if _repos_pathtype(request.repos, _path_parts(path), new_pathrev): pathrev = new_pathrev else: pathrev, path = request.repos.last_rev(path, pathrev, new_pathrev) # allow clearing sticky revision by submitting empty string if new_pathrev is None and pathrev == youngest: pathrev = None request.server.redirect(request.get_url(view_func=view, where=path, pathtype=pathtype, params={'pathrev': pathrev})) def view_log(request): cfg = request.cfg diff_format = request.query_dict.get('diff_format', cfg.options.diff_format) pathtype = request.pathtype if pathtype is vclib.DIR: if request.roottype == 'cvs': raise debug.ViewVCException('Unsupported feature: log view on CVS ' 'directory', '400 Bad Request') mime_type = encoding = None else: mime_type, encoding = calculate_mime_type(request, request.path_parts, request.pathrev) options = {} options['svn_show_all_dir_logs'] = 1 ### someday make this optional? options['svn_cross_copies'] = cfg.options.cross_copies logsort = request.query_dict.get('logsort', cfg.options.log_sort) if request.roottype == "svn": sortby = vclib.SORTBY_DEFAULT logsort = None else: if logsort == 'date': sortby = vclib.SORTBY_DATE elif logsort == 'rev': sortby = vclib.SORTBY_REV else: sortby = vclib.SORTBY_DEFAULT first = last = 0 log_pagestart = None if cfg.options.log_pagesize: log_pagestart = int(request.query_dict.get('log_pagestart', 0)) total = cfg.options.log_pagesextra * cfg.options.log_pagesize first = log_pagestart - min(log_pagestart, total) last = log_pagestart + (total + cfg.options.log_pagesize) + 1 show_revs = request.repos.itemlog(request.path_parts, request.pathrev, sortby, first, last - first, options) # selected revision selected_rev = request.query_dict.get('r1') entries = [ ] name_printed = { } cvs = request.roottype == 'cvs' for rev in show_revs: entry = _item() entry.rev = rev.string entry.state = (cvs and rev.dead and 'dead') entry.author = rev.author entry.changed = rev.changed entry.date = make_time_string(rev.date, cfg) entry.ago = None if rev.date is not None: entry.ago = html_time(request, rev.date, 1) entry.size = rev.size entry.lockinfo = rev.lockinfo entry.branch_point = None entry.next_main = None entry.orig_path = None entry.copy_path = None lf = LogFormatter(request, rev.log or '') entry.log = lf.get(maxlen=0, htmlize=1) entry.view_href = None entry.download_href = None entry.download_text_href = None entry.annotate_href = None entry.revision_href = None entry.sel_for_diff_href = None entry.diff_to_sel_href = None entry.diff_to_prev_href = None entry.diff_to_branch_href = None entry.diff_to_main_href = None if request.roottype == 'cvs': prev = rev.prev or rev.parent entry.prev = prev and prev.string branch = rev.branch_number entry.vendor_branch = ezt.boolean(branch and branch[2] % 2 == 1) entry.branches = prep_tags(request, rev.branches) entry.tags = prep_tags(request, rev.tags) entry.branch_points = prep_tags(request, rev.branch_points) entry.tag_names = map(lambda x: x.name, rev.tags) if branch and not name_printed.has_key(branch): entry.branch_names = map(lambda x: x.name, rev.branches) name_printed[branch] = 1 else: entry.branch_names = [ ] if rev.parent and rev.parent is not prev and not entry.vendor_branch: entry.branch_point = rev.parent.string # if it's the last revision on a branch then diff against the # last revision on the higher branch (e.g. change is committed and # brought over to -stable) if not rev.next and rev.parent and rev.parent.next: r = rev.parent.next while r.next: r = r.next entry.next_main = r.string elif request.roottype == 'svn': entry.prev = rev.prev and rev.prev.string entry.branches = entry.tags = entry.branch_points = [ ] entry.tag_names = entry.branch_names = [ ] entry.vendor_branch = None if rev.filename != request.where: entry.orig_path = rev.filename entry.copy_path = rev.copy_path entry.copy_rev = rev.copy_rev if entry.orig_path: entry.orig_href = request.get_url(view_func=view_log, where=entry.orig_path, pathtype=vclib.FILE, params={'pathrev': rev.string}, escape=1) if rev.copy_path: entry.copy_href = request.get_url(view_func=view_log, where=rev.copy_path, pathtype=vclib.FILE, params={'pathrev': rev.copy_rev}, escape=1) # view/download links if pathtype is vclib.FILE: fvi = get_file_view_info(request, request.where, rev.string, mime_type) entry.view_href = fvi.view_href entry.download_href = fvi.download_href entry.download_text_href = fvi.download_text_href entry.annotate_href = fvi.annotate_href entry.revision_href = fvi.revision_href entry.prefer_markup = fvi.prefer_markup else: entry.revision_href = request.get_url(view_func=view_revision, params={'revision': rev.string}, escape=1) entry.view_href = request.get_url(view_func=view_directory, where=rev.filename, pathtype=vclib.DIR, params={'pathrev': rev.string}, escape=1) # calculate diff links if selected_rev != entry.rev: entry.sel_for_diff_href = \ request.get_url(view_func=view_log, params={'r1': entry.rev, 'log_pagestart': log_pagestart}, escape=1) if entry.prev is not None: entry.diff_to_prev_href = \ request.get_url(view_func=view_diff, params={'r1': entry.prev, 'r2': entry.rev, 'diff_format': None}, escape=1) if selected_rev and \ selected_rev != str(entry.rev) and \ selected_rev != str(entry.prev) and \ selected_rev != str(entry.branch_point) and \ selected_rev != str(entry.next_main): entry.diff_to_sel_href = \ request.get_url(view_func=view_diff, params={'r1': selected_rev, 'r2': entry.rev, 'diff_format': None}, escape=1) if entry.next_main: entry.diff_to_main_href = \ request.get_url(view_func=view_diff, params={'r1': entry.next_main, 'r2': entry.rev, 'diff_format': None}, escape=1) if entry.branch_point: entry.diff_to_branch_href = \ request.get_url(view_func=view_diff, params={'r1': entry.branch_point, 'r2': entry.rev, 'diff_format': None}, escape=1) # Save our escaping until the end so stuff above works if entry.orig_path: entry.orig_path = request.server.escape(entry.orig_path) if entry.copy_path: entry.copy_path = request.server.escape(entry.copy_path) entries.append(entry) diff_select_action, diff_select_hidden_values = \ request.get_form(view_func=view_diff, params={'r1': None, 'r2': None, 'tr1': None, 'tr2': None, 'diff_format': None}) logsort_action, logsort_hidden_values = \ request.get_form(params={'logsort': None}) data = common_template_data(request) data.merge(ezt.TemplateData({ 'default_branch' : None, 'mime_type' : mime_type, 'rev_selected' : selected_rev, 'diff_format' : diff_format, 'logsort' : logsort, 'human_readable' : ezt.boolean(diff_format in ('f', 'h', 'l')), 'log_pagestart' : None, 'log_paging_action' : None, 'log_paging_hidden_values' : [], 'entries': entries, 'head_prefer_markup' : ezt.boolean(0), 'head_view_href' : None, 'head_download_href': None, 'head_download_text_href': None, 'head_annotate_href': None, 'tag_prefer_markup' : ezt.boolean(0), 'tag_view_href' : None, 'tag_download_href': None, 'tag_download_text_href': None, 'tag_annotate_href': None, 'diff_select_action' : diff_select_action, 'diff_select_hidden_values' : diff_select_hidden_values, 'logsort_action' : logsort_action, 'logsort_hidden_values' : logsort_hidden_values, 'tags' : [], 'branch_tags' : [], 'plain_tags' : [], # Populated by paging()/paging_sws() 'picklist' : [], 'picklist_len' : 0, # Populated by pathrev_form() 'pathrev_action' : None, 'pathrev_hidden_values' : [], 'pathrev_clear_action' : None, 'pathrev_clear_hidden_values' : [], 'pathrev' : None, 'lastrev' : None, })) lastrev = pathrev_form(request, data) if pathtype is vclib.FILE: if not request.pathrev or lastrev is None: fvi = get_file_view_info(request, request.where, None, mime_type, None) data['head_view_href']= fvi.view_href data['head_download_href']= fvi.download_href data['head_download_text_href']= fvi.download_text_href data['head_annotate_href']= fvi.annotate_href data['head_prefer_markup']= fvi.prefer_markup if request.pathrev and request.roottype == 'cvs': fvi = get_file_view_info(request, request.where, None, mime_type) data['tag_view_href']= fvi.view_href data['tag_download_href']= fvi.download_href data['tag_download_text_href']= fvi.download_text_href data['tag_annotate_href']= fvi.annotate_href data['tag_prefer_markup']= fvi.prefer_markup else: data['head_view_href'] = request.get_url(view_func=view_directory, params={}, escape=1) taginfo = options.get('cvs_tags', {}) tagitems = taginfo.items() tagitems.sort() tagitems.reverse() main = taginfo.get('MAIN') if main: # Default branch may have multiple names so we list them branches = [] for branch in main.aliases: # Don't list MAIN if branch is not main: branches.append(branch) data['default_branch'] = prep_tags(request, branches) for tag, rev in tagitems: if rev.co_rev: data['tags'].append(_item(rev=rev.co_rev.string, name=tag)) if rev.is_branch: data['branch_tags'].append(tag) else: data['plain_tags'].append(tag) if cfg.options.log_pagesize: data['log_paging_action'], data['log_paging_hidden_values'] = \ request.get_form(params={'log_pagestart': None, 'r1': selected_rev, }) data['log_pagestart'] = int(request.query_dict.get('log_pagestart',0)) data['entries'] = paging_sws(data, 'entries', data['log_pagestart'], 'rev', cfg.options.log_pagesize, cfg.options.log_pagesextra, first) generate_page(request, "log", data) def view_checkout(request): cfg = request.cfg if 'co' not in cfg.options.allowed_views: raise debug.ViewVCException('Checkout view is disabled', '403 Forbidden') if request.pathtype != vclib.FILE: raise debug.ViewVCException('Unsupported feature: checkout view on ' 'directory', '400 Bad Request') path, rev = _orig_path(request) fp, revision = request.repos.openfile(path, rev, {}) # The revision number acts as a strong validator. if not check_freshness(request, None, revision): mime_type, encoding = calculate_mime_type(request, path, rev) mime_type = request.query_dict.get('content-type') \ or mime_type \ or 'text/plain' server_fp = get_writeready_server_file(request, mime_type, encoding) copy_stream(fp, server_fp) fp.close() def view_cvsgraph_image(request): "output the image rendered by cvsgraph" # this function is derived from cgi/cvsgraphmkimg.cgi cfg = request.cfg if not cfg.options.use_cvsgraph: raise debug.ViewVCException('Graph view is disabled', '403 Forbidden') # If cvsgraph can't find its supporting libraries, uncomment and set # accordingly. Do the same in view_cvsgraph(). #os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib:/path/to/cvsgraph' rcsfile = request.repos.rcsfile(request.path_parts) fp = popen.popen(cfg.utilities.cvsgraph or 'cvsgraph', ("-c", cfg.path(cfg.options.cvsgraph_conf), "-r", request.repos.rootpath, rcsfile), 'rb', 0) copy_stream(fp, get_writeready_server_file(request, 'image/png')) fp.close() def view_cvsgraph(request): "output a page containing an image rendered by cvsgraph" cfg = request.cfg if not cfg.options.use_cvsgraph: raise debug.ViewVCException('Graph view is disabled', '403 Forbidden') # If cvsgraph can't find its supporting libraries, uncomment and set # accordingly. Do the same in view_cvsgraph_image(). #os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib:/path/to/cvsgraph' imagesrc = request.get_url(view_func=view_cvsgraph_image, escape=1) mime_type = guess_mime(request.where) view = default_view(mime_type, cfg) up_where = _path_join(request.path_parts[:-1]) # Create an image map rcsfile = request.repos.rcsfile(request.path_parts) fp = popen.popen(cfg.utilities.cvsgraph or 'cvsgraph', ("-i", "-c", cfg.path(cfg.options.cvsgraph_conf), "-r", request.repos.rootpath, "-x", "x", "-3", request.get_url(view_func=view_log, params={}, escape=1), "-4", request.get_url(view_func=view, params={'revision': None}, escape=1, partial=1), "-5", request.get_url(view_func=view_diff, params={'r1': None, 'r2': None}, escape=1, partial=1), "-6", request.get_url(view_func=view_directory, where=up_where, pathtype=vclib.DIR, params={'pathrev': None}, escape=1, partial=1), rcsfile), 'rb', 0) data = common_template_data(request) data.merge(ezt.TemplateData({ 'imagemap' : fp, 'imagesrc' : imagesrc, })) generate_page(request, "graph", data) def search_file(repos, path_parts, rev, search_re): """Return 1 iff the contents of the file at PATH_PARTS in REPOS as of revision REV matches regular expression SEARCH_RE.""" # Read in each line of a checked-out file, and then use re.search to # search line. fp = repos.openfile(path_parts, rev, {})[0] matches = 0 while 1: line = fp.readline() if not line: break if search_re.search(line): matches = 1 fp.close() break return matches def view_doc(request): """Serve ViewVC static content locally. Using this avoids the need for modifying the setup of the web server. """ cfg = request.cfg document = request.where filename = cfg.path(os.path.join(cfg.options.template_dir, "docroot", document)) # Stat the file to get content length and last-modified date. try: info = os.stat(filename) except OSError, v: raise debug.ViewVCException('Static file "%s" not available (%s)' % (document, str(v)), '404 Not Found') content_length = str(info[stat.ST_SIZE]) last_modified = info[stat.ST_MTIME] # content_length + mtime makes a pretty good etag. if check_freshness(request, last_modified, "%s-%s" % (content_length, last_modified)): return try: fp = open(filename, "rb") except IOError, v: raise debug.ViewVCException('Static file "%s" not available (%s)' % (document, str(v)), '404 Not Found') if document[-3:] == 'png': mime_type = 'image/png' elif document[-3:] == 'jpg': mime_type = 'image/jpeg' elif document[-3:] == 'gif': mime_type = 'image/gif' elif document[-3:] == 'css': mime_type = 'text/css' else: # assume HTML: mime_type = None copy_stream(fp, get_writeready_server_file(request, mime_type, content_length=content_length)) fp.close() def rcsdiff_date_reformat(date_str, cfg): if date_str is None: return None try: date = compat.cvs_strptime(date_str) except ValueError: return date_str return make_time_string(compat.timegm(date), cfg) _re_extract_rev = re.compile(r'^[-+*]{3} [^\t]+\t([^\t]+)\t((\d+\.)*\d+)$') _re_extract_info = re.compile(r'@@ \-([0-9]+).*\+([0-9]+).*@@(.*)') class DiffSource: def __init__(self, fp, cfg): self.fp = fp self.cfg = cfg self.save_line = None self.line_number = None self.prev_line_number = None # keep track of where we are during an iteration self.idx = -1 self.last = None # these will be set once we start reading self.state = 'no-changes' self.left_col = [ ] self.right_col = [ ] def __getitem__(self, idx): if idx == self.idx: return self.last if idx != self.idx + 1: raise DiffSequencingError() # keep calling _get_row until it gives us something. sometimes, it # doesn't return a row immediately because it is accumulating changes. # when it is out of data, _get_row will raise IndexError. while 1: item = self._get_row() if item: self.idx = idx self.last = item return item def _format_text(self, text): text = string.rstrip(text, '\r\n') if self.cfg.options.tabsize > 0: text = string.expandtabs(text, self.cfg.options.tabsize) hr_breakable = self.cfg.options.hr_breakable # in the code below, "\x01" will be our stand-in for "&". We don't want # to insert "&" because it would get escaped by sapi.escape(). Similarly, # we use "\x02" as a stand-in for "<br>" if hr_breakable > 1 and len(text) > hr_breakable: text = re.sub('(' + ('.' * hr_breakable) + ')', '\\1\x02', text) if hr_breakable: # make every other space "breakable" text = string.replace(text, ' ', ' \x01nbsp;') else: text = string.replace(text, ' ', '\x01nbsp;') text = sapi.escape(text) text = string.replace(text, '\x01', '&') text = string.replace(text, '\x02', '<span style="color:red">\</span><br />') return text def _get_row(self): if self.state[:5] == 'flush': item = self._flush_row() if item: return item self.state = 'dump' if self.save_line: line = self.save_line self.save_line = None else: line = self.fp.readline() if not line: if self.state == 'no-changes': self.state = 'done' return _item(type='no-changes') # see if there are lines to flush if self.left_col or self.right_col: # move into the flushing state self.state = 'flush-' + self.state return None # nothing more to return raise IndexError if line[:2] == '@@': self.state = 'dump' self.left_col = [ ] self.right_col = [ ] match = _re_extract_info.match(line) self.line_number = int(match.group(2)) - 1 self.prev_line_number = int(match.group(1)) - 1 return _item(type='header', line_info_left=match.group(1), line_info_right=match.group(2), line_info_extra=self._format_text(match.group(3))) if line[0] == '\\': # \ No newline at end of file # move into the flushing state. note: it doesn't matter if we really # have data to flush or not; that will be figured out later self.state = 'flush-' + self.state return None diff_code = line[0] output = self._format_text(line[1:]) if diff_code == '+': if self.state == 'dump': self.line_number = self.line_number + 1 return _item(type='add', right=output, line_number=self.line_number) self.state = 'pre-change-add' self.right_col.append(output) return None if diff_code == '-': self.state = 'pre-change-remove' self.left_col.append(output) return None # early exit to avoid line in if self.left_col or self.right_col: # save the line for processing again later, and move into the # flushing state self.save_line = line self.state = 'flush-' + self.state return None self.line_number = self.line_number + 1 self.prev_line_number = self.prev_line_number + 1 return _item(type='context', left=output, right=output, line_number=self.line_number) def _flush_row(self): if not self.left_col and not self.right_col: # nothing more to flush return None if self.state == 'flush-pre-change-remove': self.prev_line_number = self.prev_line_number + 1 return _item(type='remove', left=self.left_col.pop(0), line_number=self.prev_line_number) # state == flush-pre-change-add item = _item(type='change', have_left=ezt.boolean(0), have_right=ezt.boolean(0)) if self.left_col: self.prev_line_number = self.prev_line_number + 1 item.have_left = ezt.boolean(1) item.left = self.left_col.pop(0) item.line_number = self.prev_line_number if self.right_col: self.line_number = self.line_number + 1 item.have_right = ezt.boolean(1) item.right = self.right_col.pop(0) item.line_number = self.line_number return item class DiffSequencingError(Exception): pass def diff_parse_headers(fp, diff_type, path1, path2, rev1, rev2, sym1=None, sym2=None): date1 = date2 = log_rev1 = log_rev2 = flag = None header_lines = [] if diff_type == vclib.UNIFIED: f1 = '--- ' f2 = '+++ ' elif diff_type == vclib.CONTEXT: f1 = '*** ' f2 = '--- ' else: f1 = f2 = None # If we're parsing headers, then parse and tweak the diff headers, # collecting them in an array until we've read and handled them all. if f1 and f2: parsing = 1 len_f1 = len(f1) len_f2 = len(f2) while parsing: line = fp.readline() if not line: break if line[:len(f1)] == f1: match = _re_extract_rev.match(line) if match: date1 = match.group(1) log_rev1 = match.group(2) line = '%s%s\t%s\t%s%s\n' % (f1, path1, date1, log_rev1, sym1 and ' ' + sym1 or '') elif line[:len(f2)] == f2: match = _re_extract_rev.match(line) if match: date2 = match.group(1) log_rev2 = match.group(2) line = '%s%s\t%s\t%s%s\n' % (f2, path2, date2, log_rev2, sym2 and ' ' + sym2 or '') parsing = 0 elif line[:3] == 'Bin': flag = _RCSDIFF_IS_BINARY parsing = 0 elif (string.find(line, 'not found') != -1 or string.find(line, 'illegal option') != -1): flag = _RCSDIFF_ERROR parsing = 0 header_lines.append(line) if (log_rev1 and log_rev1 != rev1): raise debug.ViewVCException('rcsdiff found revision %s, but expected ' 'revision %s' % (log_rev1, rev1), '500 Internal Server Error') if (log_rev2 and log_rev2 != rev2): raise debug.ViewVCException('rcsdiff found revision %s, but expected ' 'revision %s' % (log_rev2, rev2), '500 Internal Server Error') return date1, date2, flag, string.join(header_lines, '') def _get_diff_path_parts(request, query_key, rev, base_rev): repos = request.repos if request.query_dict.has_key(query_key): parts = _path_parts(request.query_dict[query_key]) elif request.roottype == 'svn': try: parts = _path_parts(repos.get_location(request.where, repos._getrev(base_rev), repos._getrev(rev))) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') except vclib.ItemNotFound: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') else: parts = request.path_parts return parts def setup_diff(request): query_dict = request.query_dict rev1 = r1 = query_dict['r1'] rev2 = r2 = query_dict['r2'] sym1 = sym2 = None # hack on the diff revisions if r1 == 'text': rev1 = query_dict.get('tr1', None) if not rev1: raise debug.ViewVCException('Missing revision from the diff ' 'form text field', '400 Bad Request') else: idx = string.find(r1, ':') if idx == -1: rev1 = r1 else: rev1 = r1[:idx] sym1 = r1[idx+1:] if r2 == 'text': rev2 = query_dict.get('tr2', None) if not rev2: raise debug.ViewVCException('Missing revision from the diff ' 'form text field', '400 Bad Request') sym2 = '' else: idx = string.find(r2, ':') if idx == -1: rev2 = r2 else: rev2 = r2[:idx] sym2 = r2[idx+1:] if request.roottype == 'svn': try: rev1 = str(request.repos._getrev(rev1)) rev2 = str(request.repos._getrev(rev2)) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision(s) passed to diff', '400 Bad Request') p1 = _get_diff_path_parts(request, 'p1', rev1, request.pathrev) p2 = _get_diff_path_parts(request, 'p2', rev2, request.pathrev) try: if revcmp(rev1, rev2) > 0: rev1, rev2 = rev2, rev1 sym1, sym2 = sym2, sym1 p1, p2 = p2, p1 except ValueError: raise debug.ViewVCException('Invalid revision(s) passed to diff', '400 Bad Request') return p1, p2, rev1, rev2, sym1, sym2 def view_patch(request): if 'diff' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Diff generation is disabled', '403 Forbidden') cfg = request.cfg query_dict = request.query_dict p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request) mime_type1, encoding1 = calculate_mime_type(request, p1, rev1) mime_type2, encoding2 = calculate_mime_type(request, p2, rev2) if is_binary_file_mime_type(mime_type1, cfg) or \ is_binary_file_mime_type(mime_type2, cfg): raise debug.ViewVCException('Display of binary file content disabled ' 'by configuration', '403 Forbidden') # In the absence of a format dictation in the CGI params, we'll let # use the configured diff format, allowing 'c' to mean 'c' and # anything else to mean 'u'. format = query_dict.get('diff_format', cfg.options.diff_format == 'c' and 'c' or 'u') if format == 'c': diff_type = vclib.CONTEXT elif format == 'u': diff_type = vclib.UNIFIED else: raise debug.ViewVCException('Diff format %s not understood' % format, '400 Bad Request') try: fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') path_left = _path_join(p1) path_right = _path_join(p2) date1, date2, flag, headers = diff_parse_headers(fp, diff_type, path_left, path_right, rev1, rev2, sym1, sym2) server_fp = get_writeready_server_file(request, 'text/plain') server_fp.write(headers) copy_stream(fp, server_fp) fp.close() def view_diff(request): if 'diff' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Diff generation is disabled', '403 Forbidden') cfg = request.cfg query_dict = request.query_dict p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request) mime_type1, encoding1 = calculate_mime_type(request, p1, rev1) mime_type2, encoding2 = calculate_mime_type(request, p2, rev2) if is_binary_file_mime_type(mime_type1, cfg) or \ is_binary_file_mime_type(mime_type2, cfg): raise debug.ViewVCException('Display of binary file content disabled ' 'by configuration', '403 Forbidden') # since templates are in use and subversion allows changes to the dates, # we can't provide a strong etag if check_freshness(request, None, '%s-%s' % (rev1, rev2), weak=1): return # TODO: Is the slice necessary, or is limit enough? log_entry1 = request.repos.itemlog(p1, rev1, vclib.SORTBY_REV, 0, 1, {})[-1] log_entry2 = request.repos.itemlog(p2, rev2, vclib.SORTBY_REV, 0, 1, {})[-1] ago1 = log_entry1.date is not None \ and html_time(request, log_entry1.date, 1) or None ago2 = log_entry2.date is not None \ and html_time(request, log_entry2.date, 2) or None diff_type = None diff_options = {} human_readable = 0 format = query_dict.get('diff_format', cfg.options.diff_format) if format == 'c': diff_type = vclib.CONTEXT elif format == 's': diff_type = vclib.SIDE_BY_SIDE elif format == 'l': diff_type = vclib.UNIFIED diff_options['context'] = 15 human_readable = 1 elif format == 'f': diff_type = vclib.UNIFIED diff_options['context'] = None human_readable = 1 elif format == 'h': diff_type = vclib.UNIFIED human_readable = 1 elif format == 'u': diff_type = vclib.UNIFIED else: raise debug.ViewVCException('Diff format %s not understood' % format, '400 Bad Request') if human_readable or format == 'u': diff_options['funout'] = cfg.options.hr_funout if human_readable: diff_options['ignore_white'] = cfg.options.hr_ignore_white diff_options['ignore_keyword_subst'] = cfg.options.hr_ignore_keyword_subst try: fp = sidebyside = unified = None if (cfg.options.hr_intraline and idiff and ((human_readable and idiff.sidebyside) or (not human_readable and diff_type == vclib.UNIFIED))): f1 = request.repos.openfile(p1, rev1, {})[0] try: lines_left = f1.readlines() finally: f1.close() f2 = request.repos.openfile(p2, rev2, {})[0] try: lines_right = f2.readlines() finally: f2.close() if human_readable: sidebyside = idiff.sidebyside(lines_left, lines_right, diff_options.get("context", 5)) else: unified = idiff.unified(lines_left, lines_right, diff_options.get("context", 2)) else: fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type, diff_options) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid path(s) or revision(s) passed ' 'to diff', '400 Bad Request') path_left = _path_join(p1) path_right = _path_join(p2) date1 = date2 = raw_diff_fp = None changes = [] if fp: date1, date2, flag, headers = diff_parse_headers(fp, diff_type, path_left, path_right, rev1, rev2, sym1, sym2) if human_readable: if flag is not None: changes = [ _item(type=flag) ] else: changes = DiffSource(fp, cfg) else: raw_diff_fp = MarkupPipeWrapper(fp, request.server.escape(headers), None, 1) no_format_params = request.query_dict.copy() no_format_params['diff_format'] = None diff_format_action, diff_format_hidden_values = \ request.get_form(params=no_format_params) fvi = get_file_view_info(request, path_left, rev1) left = _item(date=make_time_string(log_entry1.date, cfg), author=log_entry1.author, log=LogFormatter(request, log_entry1.log).get(maxlen=0, htmlize=1), size=log_entry1.size, ago=ago1, path=path_left, rev=rev1, tag=sym1, view_href=fvi.view_href, download_href=fvi.download_href, download_text_href=fvi.download_text_href, annotate_href=fvi.annotate_href, revision_href=fvi.revision_href, prefer_markup=fvi.prefer_markup) fvi = get_file_view_info(request, path_right, rev2) right = _item(date=make_time_string(log_entry2.date, cfg), author=log_entry2.author, log=LogFormatter(request, log_entry2.log).get(maxlen=0, htmlize=1), size=log_entry2.size, ago=ago2, path=path_right, rev=rev2, tag=sym2, view_href=fvi.view_href, download_href=fvi.download_href, download_text_href=fvi.download_text_href, annotate_href=fvi.annotate_href, revision_href=fvi.revision_href, prefer_markup=fvi.prefer_markup) data = common_template_data(request) data.merge(ezt.TemplateData({ 'left' : left, 'right' : right, 'raw_diff' : raw_diff_fp, 'changes' : changes, 'sidebyside': sidebyside, 'unified': unified, 'diff_format' : request.query_dict.get('diff_format', cfg.options.diff_format), 'patch_href' : request.get_url(view_func=view_patch, params=no_format_params, escape=1), 'diff_format_action' : diff_format_action, 'diff_format_hidden_values' : diff_format_hidden_values, })) generate_page(request, "diff", data) def generate_tarball_header(out, name, size=0, mode=None, mtime=0, uid=0, gid=0, typeflag=None, linkname='', uname='viewvc', gname='viewvc', devmajor=1, devminor=0, prefix=None, magic='ustar', version='00', chksum=None): if not mode: if name[-1:] == '/': mode = 0755 else: mode = 0644 if not typeflag: if linkname: typeflag = '2' # symbolic link elif name[-1:] == '/': typeflag = '5' # directory else: typeflag = '0' # regular file if not prefix: prefix = '' # generate a GNU tar extension header for a long name. if len(name) >= 100: generate_tarball_header(out, '././@LongLink', len(name), 0, 0, 0, 0, 'L') out.write(name) out.write('\0' * (511 - ((len(name) + 511) % 512))) # generate a GNU tar extension header for a long symlink name. if len(linkname) >= 100: generate_tarball_header(out, '././@LongLink', len(linkname), 0, 0, 0, 0, 'K') out.write(linkname) out.write('\0' * (511 - ((len(linkname) + 511) % 512))) block1 = struct.pack('100s 8s 8s 8s 12s 12s', name, '%07o' % mode, '%07o' % uid, '%07o' % gid, '%011o' % size, '%011o' % mtime) block2 = struct.pack('c 100s 6s 2s 32s 32s 8s 8s 155s', typeflag, linkname, magic, version, uname, gname, '%07o' % devmajor, '%07o' % devminor, prefix) if not chksum: dummy_chksum = ' ' block = block1 + dummy_chksum + block2 chksum = 0 for i in range(len(block)): chksum = chksum + ord(block[i]) block = block1 + struct.pack('8s', '%07o' % chksum) + block2 block = block + '\0' * (512 - len(block)) out.write(block) def generate_tarball(out, request, reldir, stack, dir_mtime=None): # get directory info from repository rep_path = request.path_parts + reldir entries = request.repos.listdir(rep_path, request.pathrev, {}) request.repos.dirlogs(rep_path, request.pathrev, entries, {}) entries.sort(lambda a, b: cmp(a.name, b.name)) # figure out corresponding path in tar file. everything gets put underneath # a single top level directory named after the repository directory being # tarred if request.path_parts: tar_dir = request.path_parts[-1] + '/' else: tar_dir = request.rootname + '/' if reldir: tar_dir = tar_dir + _path_join(reldir) + '/' cvs = request.roottype == 'cvs' # If our caller doesn't dictate a datestamp to use for the current # directory, its datestamps will be the youngest of the datestamps # of versioned items in that subdirectory. We'll be ignoring dead # or busted items and, in CVS, subdirs. if dir_mtime is None: dir_mtime = 0 for file in entries: if cvs and (file.kind != vclib.FILE or file.rev is None or file.dead): continue if (file.date is not None) and (file.date > dir_mtime): dir_mtime = file.date # Push current directory onto the stack. stack.append(tar_dir) # If this is Subversion, we generate a header for this directory # regardless of its contents. For CVS it will only get into the # tarball if it has files underneath it, which we determine later. if not cvs: generate_tarball_header(out, tar_dir, mtime=dir_mtime) # Run through the files in this directory, skipping busted and # unauthorized ones. for file in entries: if file.kind != vclib.FILE: continue if cvs and (file.rev is None or file.dead): continue # If we get here, we've seen at least one valid file in the # current directory. For CVS, we need to make sure there are # directory parents to contain it, so we flush the stack. if cvs: for dir in stack: generate_tarball_header(out, dir, mtime=dir_mtime) del stack[:] # Calculate the mode for the file. Sure, we could look directly # at the ,v file in CVS, but that's a layering violation we'd like # to avoid as much as possible. if request.repos.isexecutable(rep_path + [file.name], request.pathrev): mode = 0755 else: mode = 0644 # Is this thing a symlink? # ### FIXME: A better solution would be to have vclib returning ### symlinks with a new vclib.SYMLINK path type. symlink_target = None if hasattr(request.repos, 'get_symlink_target'): symlink_target = request.repos.get_symlink_target(rep_path + [file.name], request.pathrev) # If the object is a symlink, generate the appropriate header. # Otherwise, we're dealing with a regular file. if symlink_target: generate_tarball_header(out, tar_dir + file.name, 0, mode, file.date is not None and file.date or 0, typeflag='2', linkname=symlink_target) else: filesize = request.repos.filesize(rep_path + [file.name], request.pathrev) if filesize == -1: # Bummer. We have to calculate the filesize manually. fp = request.repos.openfile(rep_path + [file.name], request.pathrev, {})[0] filesize = 0 while 1: chunk = retry_read(fp) if not chunk: break filesize = filesize + len(chunk) fp.close() # Write the tarball header... generate_tarball_header(out, tar_dir + file.name, filesize, mode, file.date is not None and file.date or 0) # ...the file's contents ... fp = request.repos.openfile(rep_path + [file.name], request.pathrev, {})[0] while 1: chunk = retry_read(fp) if not chunk: break out.write(chunk) fp.close() # ... and then add the block padding. out.write('\0' * (511 - (filesize + 511) % 512)) # Recurse into subdirectories, skipping busted and unauthorized (or # configured-to-be-hidden) ones. for file in entries: if file.errors or file.kind != vclib.DIR: continue if request.cfg.options.hide_cvsroot \ and is_cvsroot_path(request.roottype, rep_path + [file.name]): continue mtime = request.roottype == 'svn' and file.date or None generate_tarball(out, request, reldir + [file.name], stack, mtime) # Pop the current directory from the stack. del stack[-1:] def download_tarball(request): cfg = request.cfg if 'tar' not in request.cfg.options.allowed_views: raise debug.ViewVCException('Tarball generation is disabled', '403 Forbidden') # If debugging, we just need to open up the specified tar path for # writing. Otherwise, we get a writeable server output stream -- # disabling any default compression thereupon -- and wrap that in # our own gzip stream wrapper. if debug.TARFILE_PATH: fp = open(debug.TARFILE_PATH, 'w') else: tarfile = request.rootname if request.path_parts: tarfile = "%s-%s" % (tarfile, request.path_parts[-1]) request.server.addheader('Content-Disposition', 'attachment; filename="%s.tar.gz"' % (tarfile)) server_fp = get_writeready_server_file(request, 'application/x-gzip', allow_compress=False) request.server.flush() fp = gzip.GzipFile('', 'wb', 9, server_fp) ### FIXME: For Subversion repositories, we can get the real mtime of the ### top-level directory here. generate_tarball(fp, request, [], []) fp.write('\0' * 1024) fp.close() if debug.TARFILE_PATH: request.server.header('') print """ <html> <body> <p>Tarball '%s' successfully generated!</p> </body> </html>""" % (debug.TARFILE_PATH) def view_revision(request): if request.roottype != "svn": raise debug.ViewVCException("Revision view not supported for CVS " "repositories at this time.", "400 Bad Request") cfg = request.cfg query_dict = request.query_dict try: rev = request.repos._getrev(query_dict.get('revision')) except vclib.InvalidRevision: raise debug.ViewVCException('Invalid revision', '404 Not Found') youngest_rev = request.repos.get_youngest_revision() # The revision number acts as a weak validator (but we tell browsers # not to cache the youngest revision). if rev != youngest_rev and check_freshness(request, None, str(rev), weak=1): return # Fetch the revision information. date, author, msg, revprops, changes = request.repos.revinfo(rev) date_str = make_time_string(date, cfg) # Fix up the revprops list (rather like get_itemprops()). propnames = revprops.keys() propnames.sort() props = [] for name in propnames: lf = LogFormatter(request, revprops[name]) value = lf.get(maxlen=0, htmlize=1) undisplayable = ezt.boolean(0) # skip non-utf8 property names try: unicode(name, 'utf8') except: continue # note non-utf8 property values try: unicode(value, 'utf8') except: value = None undisplayable = ezt.boolean(1) props.append(_item(name=name, value=value, undisplayable=undisplayable)) # Sort the changes list by path. def changes_sort_by_path(a, b): return cmp(a.path_parts, b.path_parts) changes.sort(changes_sort_by_path) # Handle limit_changes parameter cfg_limit_changes = cfg.options.limit_changes limit_changes = int(query_dict.get('limit_changes', cfg_limit_changes)) more_changes = None more_changes_href = None first_changes = None first_changes_href = None num_changes = len(changes) if limit_changes and len(changes) > limit_changes: more_changes = len(changes) - limit_changes params = query_dict.copy() params['limit_changes'] = 0 more_changes_href = request.get_url(params=params, escape=1) changes = changes[:limit_changes] elif cfg_limit_changes and len(changes) > cfg_limit_changes: first_changes = cfg_limit_changes params = query_dict.copy() params['limit_changes'] = None first_changes_href = request.get_url(params=params, escape=1) # Add the hrefs, types, and prev info for change in changes: change.view_href = change.diff_href = change.type = change.log_href = None # If the path is newly added, don't claim text or property # modifications. if (change.action == vclib.ADDED or change.action == vclib.REPLACED) \ and not change.copied: change.text_changed = 0 change.props_changed = 0 # Calculate the view link URLs (for which we must have a pathtype). if change.pathtype: view_func = None if change.pathtype is vclib.FILE \ and 'markup' in cfg.options.allowed_views: view_func = view_markup elif change.pathtype is vclib.DIR: view_func = view_directory path = _path_join(change.path_parts) base_path = _path_join(change.base_path_parts) if change.action == vclib.DELETED: link_rev = str(change.base_rev) link_where = base_path else: link_rev = str(rev) link_where = path change.view_href = request.get_url(view_func=view_func, where=link_where, pathtype=change.pathtype, params={'pathrev' : link_rev}, escape=1) change.log_href = request.get_url(view_func=view_log, where=link_where, pathtype=change.pathtype, params={'pathrev' : link_rev}, escape=1) if change.pathtype is vclib.FILE and change.text_changed: change.diff_href = request.get_url(view_func=view_diff, where=path, pathtype=change.pathtype, params={'pathrev' : str(rev), 'r1' : str(rev), 'r2' : str(change.base_rev), }, escape=1) # use same variable names as the log template change.path = _path_join(change.path_parts) change.copy_path = _path_join(change.base_path_parts) change.copy_rev = change.base_rev change.text_mods = ezt.boolean(change.text_changed) change.prop_mods = ezt.boolean(change.props_changed) change.is_copy = ezt.boolean(change.copied) change.pathtype = (change.pathtype == vclib.FILE and 'file') \ or (change.pathtype == vclib.DIR and 'dir') \ or None del change.path_parts del change.base_path_parts del change.base_rev del change.text_changed del change.props_changed del change.copied prev_rev_href = next_rev_href = None if rev > 0: prev_rev_href = request.get_url(view_func=view_revision, where=None, pathtype=None, params={'revision': str(rev - 1)}, escape=1) if rev < request.repos.get_youngest_revision(): next_rev_href = request.get_url(view_func=view_revision, where=None, pathtype=None, params={'revision': str(rev + 1)}, escape=1) jump_rev_action, jump_rev_hidden_values = \ request.get_form(params={'revision': None}) lf = LogFormatter(request, msg) data = common_template_data(request) data.merge(ezt.TemplateData({ 'rev' : str(rev), 'author' : author, 'date' : date_str, 'log' : lf.get(maxlen=0, htmlize=1), 'properties' : props, 'ago' : date is not None and html_time(request, date, 1) or None, 'changes' : changes, 'prev_href' : prev_rev_href, 'next_href' : next_rev_href, 'num_changes' : num_changes, 'limit_changes': limit_changes, 'more_changes': more_changes, 'more_changes_href': more_changes_href, 'first_changes': first_changes, 'first_changes_href': first_changes_href, 'jump_rev_action' : jump_rev_action, 'jump_rev_hidden_values' : jump_rev_hidden_values, 'revision_href' : request.get_url(view_func=view_revision, where=None, pathtype=None, params={'revision': str(rev)}, escape=1), })) if rev == youngest_rev: request.server.addheader("Cache-control", "no-store") generate_page(request, "revision", data) def is_query_supported(request): """Returns true if querying is supported for the given path.""" return request.cfg.cvsdb.enabled \ and request.pathtype == vclib.DIR \ and request.roottype in ['cvs', 'svn'] def is_querydb_nonempty_for_root(request): """Return 1 iff commits database integration is supported *and* the current root is found in that database. Only does this check if check_database is set to 1.""" if request.cfg.cvsdb.enabled and request.roottype in ['cvs', 'svn']: if request.cfg.cvsdb.check_database_for_root: global cvsdb import cvsdb db = cvsdb.ConnectDatabaseReadOnly(request.cfg) repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath) if repos_root: return 1 else: return 1 return 0 def validate_query_args(request): # Do some additional input validation of query form arguments beyond # what is offered by the CGI param validation loop in Request.run_viewvc(). for arg_base in ['branch', 'file', 'comment', 'who']: # First, make sure the the XXX_match args have valid values: arg_match = arg_base + '_match' arg_match_value = request.query_dict.get(arg_match, 'exact') if not arg_match_value in ('exact', 'like', 'glob', 'regex', 'notregex'): raise debug.ViewVCException( 'An illegal value was provided for the "%s" parameter.' % (arg_match), '400 Bad Request') # Now, for those args which are supposed to be regular expressions (per # their corresponding XXX_match values), make sure they are. if arg_match_value == 'regex' or arg_match_value == 'notregex': arg_base_value = request.query_dict.get(arg_base) if arg_base_value: try: re.compile(arg_base_value) except: raise debug.ViewVCException( 'An illegal value was provided for the "%s" parameter.' % (arg_base), '400 Bad Request') def view_queryform(request): if not is_query_supported(request): raise debug.ViewVCException('Can not query project root "%s" at "%s".' % (request.rootname, request.where), '403 Forbidden') # Do some more precise input validation. validate_query_args(request) query_action, query_hidden_values = \ request.get_form(view_func=view_query, params={'limit_changes': None}) limit_changes = \ int(request.query_dict.get('limit_changes', request.cfg.options.limit_changes)) def escaped_query_dict_get(itemname, itemdefault=''): return request.server.escape(request.query_dict.get(itemname, itemdefault)) data = common_template_data(request) data.merge(ezt.TemplateData({ 'branch' : escaped_query_dict_get('branch', ''), 'branch_match' : escaped_query_dict_get('branch_match', 'exact'), 'dir' : escaped_query_dict_get('dir', ''), 'file' : escaped_query_dict_get('file', ''), 'file_match' : escaped_query_dict_get('file_match', 'exact'), 'who' : escaped_query_dict_get('who', ''), 'who_match' : escaped_query_dict_get('who_match', 'exact'), 'comment' : escaped_query_dict_get('comment', ''), 'comment_match' : escaped_query_dict_get('comment_match', 'exact'), 'querysort' : escaped_query_dict_get('querysort', 'date'), 'date' : escaped_query_dict_get('date', 'hours'), 'hours' : escaped_query_dict_get('hours', '2'), 'mindate' : escaped_query_dict_get('mindate', ''), 'maxdate' : escaped_query_dict_get('maxdate', ''), 'query_action' : query_action, 'query_hidden_values' : query_hidden_values, 'limit_changes' : limit_changes, 'dir_href' : request.get_url(view_func=view_directory, params={}, escape=1), })) generate_page(request, "query_form", data) def parse_date(datestr): """Parse a date string from the query form.""" match = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)(?:\ +' '(\d\d):(\d\d)(?::(\d\d))?)?$', datestr) if match: year = int(match.group(1)) month = int(match.group(2)) day = int(match.group(3)) hour = match.group(4) if hour is not None: hour = int(hour) else: hour = 0 minute = match.group(5) if minute is not None: minute = int(minute) else: minute = 0 second = match.group(6) if second is not None: second = int(second) else: second = 0 # return a "seconds since epoch" value assuming date given in UTC tm = (year, month, day, hour, minute, second, 0, 0, 0) return compat.timegm(tm) else: return None def english_query(request): """Generate a sentance describing the query.""" cfg = request.cfg ret = [ 'Checkins ' ] dir = request.query_dict.get('dir', '') if dir: ret.append('to ') if ',' in dir: ret.append('subdirectories') else: ret.append('subdirectory') ret.append(' <em>%s</em> ' % request.server.escape(dir)) file = request.query_dict.get('file', '') if file: if len(ret) != 1: ret.append('and ') ret.append('to file <em>%s</em> ' % request.server.escape(file)) who = request.query_dict.get('who', '') branch = request.query_dict.get('branch', '') if branch: ret.append('on branch <em>%s</em> ' % request.server.escape(branch)) else: ret.append('on all branches ') comment = request.query_dict.get('comment', '') if comment: ret.append('with comment <i>%s</i> ' % request.server.escape(comment)) if who: ret.append('by <em>%s</em> ' % request.server.escape(who)) date = request.query_dict.get('date', 'hours') if date == 'hours': ret.append('in the last %s hours' \ % request.server.escape(request.query_dict.get('hours', '2'))) elif date == 'day': ret.append('in the last day') elif date == 'week': ret.append('in the last week') elif date == 'month': ret.append('in the last month') elif date == 'all': ret.append('since the beginning of time') elif date == 'explicit': mindate = request.query_dict.get('mindate', '') maxdate = request.query_dict.get('maxdate', '') if mindate and maxdate: w1, w2 = 'between', 'and' else: w1, w2 = 'since', 'before' if mindate: mindate = make_time_string(parse_date(mindate), cfg) ret.append('%s <em>%s</em> ' % (w1, mindate)) if maxdate: maxdate = make_time_string(parse_date(maxdate), cfg) ret.append('%s <em>%s</em> ' % (w2, maxdate)) return string.join(ret, '') def prev_rev(rev): """Returns a string representing the previous revision of the argument.""" r = string.split(rev, '.') # decrement final revision component r[-1] = str(int(r[-1]) - 1) # prune if we pass the beginning of the branch if len(r) > 2 and r[-1] == '0': r = r[:-2] return string.join(r, '.') def build_commit(request, files, max_files, dir_strip, format): """Return a commit object build from the information in FILES, or None if no allowed files are present in the set. DIR_STRIP is the path prefix to remove from the commit object's set of files. If MAX_FILES is non-zero, it is used to limit the number of files returned in the commit object. FORMAT is the requested output format of the query request.""" cfg = request.cfg author = files[0].GetAuthor() date = files[0].GetTime() desc = files[0].GetDescription() commit_rev = files[0].GetRevision() len_strip = len(dir_strip) commit_files = [] num_allowed = 0 plus_count = 0 minus_count = 0 found_unreadable = 0 for f in files: dirname = f.GetDirectory() filename = f.GetFile() if dir_strip: assert dirname[:len_strip] == dir_strip assert len(dirname) == len_strip or dirname[len(dir_strip)] == '/' dirname = dirname[len_strip+1:] where = dirname and ("%s/%s" % (dirname, filename)) or filename rev = f.GetRevision() rev_prev = prev_rev(rev) commit_time = f.GetTime() if commit_time: commit_time = make_time_string(commit_time, cfg) change_type = f.GetTypeString() # In CVS, we can actually look at deleted revisions; in Subversion # we can't -- we'll look at the previous revision instead. exam_rev = rev if request.roottype == 'svn' and change_type == 'Remove': exam_rev = rev_prev # Check path access (since the commits database logic bypasses the # vclib layer and, thus, the vcauth stuff that layer uses). path_parts = _path_parts(where) if path_parts: # Skip files in CVSROOT if asked to hide such. if cfg.options.hide_cvsroot \ and is_cvsroot_path(request.roottype, path_parts): found_unreadable = 1 continue # We have to do a rare authz check here because this data comes # from the CVSdb, not from the vclib providers. # # WARNING: The Subversion CVSdb integration logic is weak, weak, # weak. It has no ability to track copies, so complex # situations like a copied directory with a deleted subfile (all # in the same revision) are very ... difficult. We've no choice # but to omit as unauthorized paths the authorization logic # can't find. try: readable = vclib.check_path_access(request.repos, path_parts, None, exam_rev) except vclib.ItemNotFound: readable = 0 if not readable: found_unreadable = 1 continue if request.roottype == 'svn': params = { 'pathrev': exam_rev } else: params = { 'revision': exam_rev, 'pathrev': f.GetBranch() or None } dir_href = request.get_url(view_func=view_directory, where=dirname, pathtype=vclib.DIR, params=params, escape=1) log_href = request.get_url(view_func=view_log, where=where, pathtype=vclib.FILE, params=params, escape=1) diff_href = view_href = download_href = None if 'markup' in cfg.options.allowed_views: view_href = request.get_url(view_func=view_markup, where=where, pathtype=vclib.FILE, params=params, escape=1) if 'co' in cfg.options.allowed_views: download_href = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params=params, escape=1) if change_type == 'Change': diff_href_params = params.copy() diff_href_params.update({ 'r1': rev_prev, 'r2': rev, 'diff_format': None }) diff_href = request.get_url(view_func=view_diff, where=where, pathtype=vclib.FILE, params=diff_href_params, escape=1) mime_type, encoding = calculate_mime_type(request, path_parts, exam_rev) prefer_markup = ezt.boolean(default_view(mime_type, cfg) == view_markup) # Update plus/minus line change count. plus = int(f.GetPlusCount()) minus = int(f.GetMinusCount()) plus_count = plus_count + plus minus_count = minus_count + minus num_allowed = num_allowed + 1 if max_files and num_allowed > max_files: continue commit_files.append(_item(date=commit_time, dir=request.server.escape(dirname), file=request.server.escape(filename), author=request.server.escape(f.GetAuthor()), rev=rev, branch=f.GetBranch(), plus=plus, minus=minus, type=change_type, dir_href=dir_href, log_href=log_href, view_href=view_href, download_href=download_href, prefer_markup=prefer_markup, diff_href=diff_href)) # No files survived authz checks? Let's just pretend this # little commit didn't happen, shall we? if not len(commit_files): return None commit = _item(num_files=len(commit_files), files=commit_files, plus=plus_count, minus=minus_count) commit.limited_files = ezt.boolean(num_allowed > len(commit_files)) # We'll mask log messages in commits which contain unreadable paths, # but even that is kinda iffy. If a person searches for # '/some/hidden/path' across log messages, then gets a response set # that shows commits lacking log message, said person can reasonably # assume that the log messages contained the hidden path, and that # this is likely because they are referencing a real path in the # repository -- a path the user isn't supposed to even know about. if found_unreadable: commit.log = None commit.short_log = None else: lf = LogFormatter(request, desc) htmlize = (format != 'rss') commit.log = lf.get(maxlen=0, htmlize=htmlize) commit.short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=htmlize) commit.author = request.server.escape(author) commit.rss_date = make_rss_time_string(date, request.cfg) if request.roottype == 'svn': commit.rev = commit_rev commit.rss_url = '%s://%s%s' % \ (request.server.getenv("HTTPS") == "on" and "https" or "http", request.server.getenv("HTTP_HOST"), request.get_url(view_func=view_revision, params={'revision': commit.rev}, escape=1)) else: commit.rev = None commit.rss_url = None return commit def query_backout(request, commits): server_fp = get_writeready_server_file(request, 'text/plain') if not commits: server_fp.write("""\ # No changes were selected by the query. # There is nothing to back out. """) return server_fp.write("""\ # This page can be saved as a shell script and executed. # It should be run at the top of your work area. It will update # your working copy to back out the changes selected by the # query. """) for commit in commits: for fileinfo in commit.files: if request.roottype == 'cvs': server_fp.write('cvs update -j %s -j %s %s/%s\n' % (fileinfo.rev, prev_rev(fileinfo.rev), fileinfo.dir, fileinfo.file)) elif request.roottype == 'svn': server_fp.write('svn merge -r %s:%s %s/%s\n' % (fileinfo.rev, prev_rev(fileinfo.rev), fileinfo.dir, fileinfo.file)) def view_query(request): if not is_query_supported(request): raise debug.ViewVCException('Can not query project root "%s" at "%s".' % (request.rootname, request.where), '403 Forbidden') cfg = request.cfg # Do some more precise input validation. validate_query_args(request) # get form data branch = request.query_dict.get('branch', '') branch_match = request.query_dict.get('branch_match', 'exact') dir = request.query_dict.get('dir', '') file = request.query_dict.get('file', '') file_match = request.query_dict.get('file_match', 'exact') who = request.query_dict.get('who', '') who_match = request.query_dict.get('who_match', 'exact') comment = request.query_dict.get('comment', '') comment_match = request.query_dict.get('comment_match', 'exact') querysort = request.query_dict.get('querysort', 'date') date = request.query_dict.get('date', 'hours') hours = request.query_dict.get('hours', '2') mindate = request.query_dict.get('mindate', '') maxdate = request.query_dict.get('maxdate', '') format = request.query_dict.get('format') limit_changes = int(request.query_dict.get('limit_changes', cfg.options.limit_changes)) match_types = { 'exact':1, 'like':1, 'glob':1, 'regex':1, 'notregex':1 } sort_types = { 'date':1, 'author':1, 'file':1 } date_types = { 'hours':1, 'day':1, 'week':1, 'month':1, 'all':1, 'explicit':1 } # parse various fields, validating or converting them if not match_types.has_key(branch_match): branch_match = 'exact' if not match_types.has_key(file_match): file_match = 'exact' if not match_types.has_key(who_match): who_match = 'exact' if not match_types.has_key(comment_match): comment_match = 'exact' if not sort_types.has_key(querysort): querysort = 'date' if not date_types.has_key(date): date = 'hours' mindate = parse_date(mindate) maxdate = parse_date(maxdate) global cvsdb import cvsdb db = cvsdb.ConnectDatabaseReadOnly(cfg) repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath) if not repos_root: raise debug.ViewVCException( "The root '%s' was not found in the commit database " % request.rootname) # create the database query from the form data query = cvsdb.CreateCheckinQuery() query.SetRepository(repos_root) # treat "HEAD" specially ... if branch_match == 'exact' and branch == 'HEAD': query.SetBranch('') elif branch: query.SetBranch(branch, branch_match) if dir: for subdir in string.split(dir, ','): path = (_path_join(repos_dir + request.path_parts + _path_parts(string.strip(subdir)))) query.SetDirectory(path, 'exact') query.SetDirectory('%s/%%' % cvsdb.EscapeLike(path), 'like') else: where = _path_join(repos_dir + request.path_parts) if where: # if we are in a subdirectory ... query.SetDirectory(where, 'exact') query.SetDirectory('%s/%%' % cvsdb.EscapeLike(where), 'like') if file: query.SetFile(file, file_match) if who: query.SetAuthor(who, who_match) if comment: query.SetComment(comment, comment_match) query.SetSortMethod(querysort) if date == 'hours': query.SetFromDateHoursAgo(int(hours)) elif date == 'day': query.SetFromDateDaysAgo(1) elif date == 'week': query.SetFromDateDaysAgo(7) elif date == 'month': query.SetFromDateDaysAgo(31) elif date == 'all': pass elif date == 'explicit': if mindate is not None: query.SetFromDateObject(mindate) if maxdate is not None: query.SetToDateObject(maxdate) # Set the admin-defined (via configuration) row limits. This is to avoid # slamming the database server with a monster query. if format == 'rss': query.SetLimit(cfg.cvsdb.rss_row_limit) else: query.SetLimit(cfg.cvsdb.row_limit) # run the query db.RunQuery(query) commit_list = query.GetCommitList() row_limit_reached = query.GetLimitReached() # gather commits commits = [] plus_count = 0 minus_count = 0 mod_time = -1 if commit_list: files = [] limited_files = 0 current_desc = commit_list[0].GetDescriptionID() current_rev = commit_list[0].GetRevision() dir_strip = _path_join(repos_dir) for commit in commit_list: commit_desc = commit.GetDescriptionID() commit_rev = commit.GetRevision() # base modification time on the newest commit if commit.GetTime() > mod_time: mod_time = commit.GetTime() # For CVS, group commits with the same commit message. # For Subversion, group them only if they have the same revision number if request.roottype == 'cvs': if current_desc == commit_desc: files.append(commit) continue else: if current_rev == commit_rev: files.append(commit) continue # append this grouping commit_item = build_commit(request, files, limit_changes, dir_strip, format) if commit_item: # update running plus/minus totals plus_count = plus_count + commit_item.plus minus_count = minus_count + commit_item.minus commits.append(commit_item) files = [ commit ] limited_files = 0 current_desc = commit_desc current_rev = commit_rev # we need to tack on our last commit grouping, if any commit_item = build_commit(request, files, limit_changes, dir_strip, format) if commit_item: # update running plus/minus totals plus_count = plus_count + commit_item.plus minus_count = minus_count + commit_item.minus commits.append(commit_item) # only show the branch column if we are querying all branches # or doing a non-exact branch match on a CVS repository. show_branch = ezt.boolean(request.roottype == 'cvs' and (branch == '' or branch_match != 'exact')) # backout link params = request.query_dict.copy() params['format'] = 'backout' backout_href = request.get_url(params=params, escape=1) # link to zero limit_changes value params = request.query_dict.copy() params['limit_changes'] = 0 limit_changes_href = request.get_url(params=params, escape=1) # if we got any results, use the newest commit as the modification time if mod_time >= 0: if check_freshness(request, mod_time): return if format == 'backout': query_backout(request, commits) return data = common_template_data(request) data.merge(ezt.TemplateData({ 'sql': request.server.escape(db.CreateSQLQueryString(query)), 'english_query': english_query(request), 'queryform_href': request.get_url(view_func=view_queryform, escape=1), 'backout_href': backout_href, 'plus_count': plus_count, 'minus_count': minus_count, 'show_branch': show_branch, 'querysort': querysort, 'commits': commits, 'row_limit_reached' : ezt.boolean(row_limit_reached), 'limit_changes': limit_changes, 'limit_changes_href': limit_changes_href, 'rss_link_href': request.get_url(view_func=view_query, params={'date': 'month'}, escape=1, prefix=1), })) if format == 'rss': generate_page(request, "rss", data, "application/rss+xml") else: generate_page(request, "query_results", data) _views = { 'annotate': view_annotate, 'co': view_checkout, 'diff': view_diff, 'dir': view_directory, 'graph': view_cvsgraph, 'graphimg': view_cvsgraph_image, 'log': view_log, 'markup': view_markup, 'patch': view_patch, 'query': view_query, 'queryform': view_queryform, 'revision': view_revision, 'roots': view_roots, 'tar': download_tarball, 'redirect_pathrev': redirect_pathrev, } _view_codes = {} for code, view in _views.items(): _view_codes[view] = code def list_roots(request): cfg = request.cfg allroots = { } # Add the viewable Subversion roots for root in cfg.general.svn_roots.keys(): auth = setup_authorizer(cfg, request.username, root) try: repos = vclib.svn.SubversionRepository(root, cfg.general.svn_roots[root], auth, cfg.utilities, cfg.options.svn_config_dir) lastmod = None if cfg.options.show_roots_lastmod: try: repos.open() youngest_rev = repos.youngest date, author, msg, revprops, changes = repos.revinfo(youngest_rev) date_str = make_time_string(date, cfg) ago = html_time(request, date) lf = LogFormatter(request, msg) log = lf.get(maxlen=0, htmlize=1) short_log = lf.get(maxlen=cfg.options.short_log_len, htmlize=1) lastmod = _item(ago=ago, author=author, date=date_str, log=log, short_log=short_log, rev=str(youngest_rev)) except: lastmod = None except vclib.ReposNotFound: continue allroots[root] = [cfg.general.svn_roots[root], 'svn', lastmod] # Add the viewable CVS roots for root in cfg.general.cvs_roots.keys(): auth = setup_authorizer(cfg, request.username, root) try: vclib.ccvs.CVSRepository(root, cfg.general.cvs_roots[root], auth, cfg.utilities, cfg.options.use_rcsparse) except vclib.ReposNotFound: continue allroots[root] = [cfg.general.cvs_roots[root], 'cvs', None] return allroots def expand_root_parents(cfg): """Expand the configured root parents into individual roots.""" # Each item in root_parents is a "directory : repo_type" string. for pp in cfg.general.root_parents: pos = string.rfind(pp, ':') if pos < 0: raise debug.ViewVCException( 'The path "%s" in "root_parents" does not include a ' 'repository type. Expected "cvs" or "svn".' % (pp)) repo_type = string.strip(pp[pos+1:]) pp = os.path.normpath(string.strip(pp[:pos])) if repo_type == 'cvs': roots = vclib.ccvs.expand_root_parent(pp) if cfg.options.hide_cvsroot and roots.has_key('CVSROOT'): del roots['CVSROOT'] cfg.general.cvs_roots.update(roots) elif repo_type == 'svn': roots = vclib.svn.expand_root_parent(pp) cfg.general.svn_roots.update(roots) else: raise debug.ViewVCException( 'The path "%s" in "root_parents" has an unrecognized ' 'repository type ("%s"). Expected "cvs" or "svn".' % (pp, repo_type)) def find_root_in_parents(cfg, rootname, roottype): """Return the rootpath for configured ROOTNAME of ROOTTYPE.""" # Easy out: caller wants rootname "CVSROOT", and we're hiding those. if rootname == 'CVSROOT' and cfg.options.hide_cvsroot: return None for pp in cfg.general.root_parents: pos = string.rfind(pp, ':') if pos < 0: continue repo_type = string.strip(pp[pos+1:]) if repo_type != roottype: continue pp = os.path.normpath(string.strip(pp[:pos])) rootpath = None if roottype == 'cvs': rootpath = vclib.ccvs.find_root_in_parent(pp, rootname) elif roottype == 'svn': rootpath = vclib.svn.find_root_in_parent(pp, rootname) if rootpath is not None: return rootpath return None def locate_root(cfg, rootname): """Return a 2-tuple ROOTTYPE, ROOTPATH for configured ROOTNAME.""" if cfg.general.cvs_roots.has_key(rootname): return 'cvs', cfg.general.cvs_roots[rootname] path_in_parent = find_root_in_parents(cfg, rootname, 'cvs') if path_in_parent: cfg.general.cvs_roots[rootname] = path_in_parent return 'cvs', path_in_parent if cfg.general.svn_roots.has_key(rootname): return 'svn', cfg.general.svn_roots[rootname] path_in_parent = find_root_in_parents(cfg, rootname, 'svn') if path_in_parent: cfg.general.svn_roots[rootname] = path_in_parent return 'svn', path_in_parent return None, None def load_config(pathname=None, server=None): """Load the ViewVC configuration file. SERVER is the server object that will be using this configuration. Consult the environment for the variable VIEWVC_CONF_PATHNAME and VIEWCVS_CONF_PATHNAME (its legacy name) and, if set, use its value as the path of the configuration file; otherwise, use PATHNAME (if provided). Failing all else, use a hardcoded default configuration path.""" debug.t_start('load-config') # See if the environment contains overrides to the configuration # path. If we have a SERVER object, consult its environment; use # the OS environment otherwise. env_get = server and server.getenv or os.environ.get env_pathname = (env_get("VIEWVC_CONF_PATHNAME") or env_get("VIEWCVS_CONF_PATHNAME")) # Try to find the configuration pathname by searching these ordered # locations: the environment, the passed-in PATHNAME, the hard-coded # default. pathname = (env_pathname or pathname or os.path.join(os.path.dirname(os.path.dirname(__file__)), "viewvc.conf")) # Load the configuration! cfg = config.Config() cfg.set_defaults() cfg.load_config(pathname, env_get("HTTP_HOST")) # Load mime types file(s), but reverse the order -- our # configuration uses a most-to-least preferred approach, but the # 'mimetypes' package wants things the other way around. if cfg.general.mime_types_files: files = cfg.general.mime_types_files[:] files.reverse() files = map(lambda x, y=pathname: os.path.join(os.path.dirname(y), x), files) mimetypes.init(files) debug.t_end('load-config') return cfg def view_error(server, cfg): exc_dict = debug.GetExceptionData() status = exc_dict['status'] if exc_dict['msg']: exc_dict['msg'] = server.escape(exc_dict['msg']) if exc_dict['stacktrace']: exc_dict['stacktrace'] = server.escape(exc_dict['stacktrace']) handled = 0 # use the configured error template if possible try: if cfg and not server.headerSent: server.header(status=status) template = get_view_template(cfg, "error") template.generate(server.file(), exc_dict) handled = 1 except: pass # but fallback to the old exception printer if no configuration is # available, or if something went wrong if not handled: debug.PrintException(server, exc_dict) def main(server, cfg): try: debug.t_start('main') try: # build a Request object, which contains info about the HTTP request request = Request(server, cfg) request.run_viewvc() except SystemExit, e: return except: view_error(server, cfg) finally: debug.t_end('main') debug.t_dump(server.file()) debug.DumpChildren(server) class _item: def __init__(self, **kw): vars(self).update(kw)
./CrossVul/dataset_final_sorted/CWE-79/py/good_3149_0
crossvul-python_data_bad_2188_0
# # gravatars.py -- Decorational template tags # # Copyright (c) 2008-2009 Christian Hammond # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals from django import template from djblets.gravatars import (get_gravatar_url, get_gravatar_url_for_email) from djblets.util.decorators import basictag register = template.Library() @register.tag @basictag(takes_context=True) def gravatar(context, user, size=None): """ Outputs the HTML for displaying a user's gravatar. This can take an optional size of the image (defaults to 80 if not specified). This is also influenced by the following settings: GRAVATAR_SIZE - Default size for gravatars GRAVATAR_RATING - Maximum allowed rating (g, pg, r, x) GRAVATAR_DEFAULT - Default image set to show if the user hasn't specified a gravatar (identicon, monsterid, wavatar) See http://www.gravatar.com/ for more information. """ url = get_gravatar_url(context['request'], user, size) if url: return ('<img src="%s" width="%s" height="%s" alt="%s" ' ' class="gravatar"/>' % (url, size, size, user.get_full_name() or user.username)) else: return '' @register.tag @basictag(takes_context=True) def gravatar_url(context, email, size=None): """ Outputs the URL for a gravatar for the given email address. This can take an optional size of the image (defaults to 80 if not specified). This is also influenced by the following settings: GRAVATAR_SIZE - Default size for gravatars GRAVATAR_RATING - Maximum allowed rating (g, pg, r, x) GRAVATAR_DEFAULT - Default image set to show if the user hasn't specified a gravatar (identicon, monsterid, wavatar) See http://www.gravatar.com/ for more information. """ return get_gravatar_url_for_email(context['request'], email, size)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2188_0
crossvul-python_data_good_1644_2
"""Tornado handlers for frontend config storage.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import os import io import errno from tornado import web from IPython.utils.py3compat import PY3 from ...base.handlers import APIHandler, json_errors class ConfigHandler(APIHandler): SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH') @web.authenticated @json_errors def get(self, section_name): self.set_header("Content-Type", 'application/json') self.finish(json.dumps(self.config_manager.get(section_name))) @web.authenticated @json_errors def put(self, section_name): data = self.get_json_body() # Will raise 400 if content is not valid JSON self.config_manager.set(section_name, data) self.set_status(204) @web.authenticated @json_errors def patch(self, section_name): new_data = self.get_json_body() section = self.config_manager.update(section_name, new_data) self.finish(json.dumps(section)) # URL to handler mappings section_name_regex = r"(?P<section_name>\w+)" default_handlers = [ (r"/api/config/%s" % section_name_regex, ConfigHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_2
crossvul-python_data_good_1728_0
# coding: utf-8 """A tornado based Jupyter notebook server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import absolute_import, print_function import base64 import datetime import errno import importlib import io import json import logging import os import random import re import select import signal import socket import ssl import sys import threading import webbrowser from jinja2 import Environment, FileSystemLoader # Install the pyzmq ioloop. This has to be done before anything else from # tornado is imported. from zmq.eventloop import ioloop ioloop.install() # check for tornado 3.1.0 msg = "The Jupyter Notebook requires tornado >= 4.0" try: import tornado except ImportError: raise ImportError(msg) try: version_info = tornado.version_info except AttributeError: raise ImportError(msg + ", but you have < 1.1.0") if version_info < (4,0): raise ImportError(msg + ", but you have %s" % tornado.version) from tornado import httpserver from tornado import web from tornado.log import LogFormatter, app_log, access_log, gen_log from notebook import ( DEFAULT_STATIC_FILES_PATH, DEFAULT_TEMPLATE_PATH_LIST, __version__, ) from .base.handlers import Template404 from .log import log_request from .services.kernels.kernelmanager import MappingKernelManager from .services.config import ConfigManager from .services.contents.manager import ContentsManager from .services.contents.filemanager import FileContentsManager from .services.sessions.sessionmanager import SessionManager from .auth.login import LoginHandler from .auth.logout import LogoutHandler from .base.handlers import FileFindHandler, IPythonHandler from traitlets.config import Config from traitlets.config.application import catch_config_error, boolean_flag from jupyter_core.application import ( JupyterApp, base_flags, base_aliases, ) from jupyter_client import KernelManager from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel, NATIVE_KERNEL_NAME from jupyter_client.session import Session from nbformat.sign import NotebookNotary from traitlets import ( Dict, Unicode, Integer, List, Bool, Bytes, Instance, TraitError, Type, ) from ipython_genutils import py3compat from IPython.paths import get_ipython_dir from jupyter_core.paths import jupyter_runtime_dir, jupyter_path from notebook._sysinfo import get_sys_info from .utils import url_path_join, check_pid #----------------------------------------------------------------------------- # Module globals #----------------------------------------------------------------------------- _examples = """ jupyter notebook # start the notebook jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate """ #----------------------------------------------------------------------------- # Helper functions #----------------------------------------------------------------------------- def random_ports(port, n): """Generate a list of n random ports near the given port. The first 5 ports will be sequential, and the remaining n-5 will be randomly selected in the range [port-2*n, port+2*n]. """ for i in range(min(5, n)): yield port + i for i in range(n-5): yield max(1, port + random.randint(-2*n, 2*n)) def load_handlers(name): """Load the (URL pattern, handler) tuples for each component.""" name = 'notebook.' + name mod = __import__(name, fromlist=['default_handlers']) return mod.default_handlers class DeprecationHandler(IPythonHandler): def get(self, url_path): self.set_header("Content-Type", 'text/javascript') self.finish(""" console.warn('`/static/widgets/js` is deprecated. Use `/nbextensions/widgets/widgets/js` instead.'); define(['%s'], function(x) { return x; }); """ % url_path_join('nbextensions', 'widgets', 'widgets', url_path.rstrip('.js'))) self.log.warn('Deprecated widget Javascript path /static/widgets/js/*.js was used') #----------------------------------------------------------------------------- # The Tornado web application #----------------------------------------------------------------------------- class NotebookWebApplication(web.Application): def __init__(self, ipython_app, kernel_manager, contents_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options): settings = self.init_settings( ipython_app, kernel_manager, contents_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options) handlers = self.init_handlers(settings) super(NotebookWebApplication, self).__init__(handlers, **settings) def init_settings(self, ipython_app, kernel_manager, contents_manager, session_manager, kernel_spec_manager, config_manager, log, base_url, default_url, settings_overrides, jinja_env_options=None): _template_path = settings_overrides.get( "template_path", ipython_app.template_file_path, ) if isinstance(_template_path, py3compat.string_types): _template_path = (_template_path,) template_path = [os.path.expanduser(path) for path in _template_path] jenv_opt = {"autoescape": True} jenv_opt.update(jinja_env_options if jinja_env_options else {}) env = Environment(loader=FileSystemLoader(template_path), **jenv_opt) sys_info = get_sys_info() if sys_info['commit_source'] == 'repository': # don't cache (rely on 304) when working from master version_hash = '' else: # reset the cache on server restart version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S") settings = dict( # basics log_function=log_request, base_url=base_url, default_url=default_url, template_path=template_path, static_path=ipython_app.static_file_path, static_custom_path=ipython_app.static_custom_path, static_handler_class = FileFindHandler, static_url_prefix = url_path_join(base_url,'/static/'), static_handler_args = { # don't cache custom.js 'no_cache_paths': [url_path_join(base_url, 'static', 'custom')], }, version_hash=version_hash, ignore_minified_js=ipython_app.ignore_minified_js, # authentication cookie_secret=ipython_app.cookie_secret, login_url=url_path_join(base_url,'/login'), login_handler_class=ipython_app.login_handler_class, logout_handler_class=ipython_app.logout_handler_class, password=ipython_app.password, # managers kernel_manager=kernel_manager, contents_manager=contents_manager, session_manager=session_manager, kernel_spec_manager=kernel_spec_manager, config_manager=config_manager, # IPython stuff jinja_template_vars=ipython_app.jinja_template_vars, nbextensions_path=ipython_app.nbextensions_path, websocket_url=ipython_app.websocket_url, mathjax_url=ipython_app.mathjax_url, config=ipython_app.config, config_dir=ipython_app.config_dir, jinja2_env=env, terminals_available=False, # Set later if terminals are available ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) return settings def init_handlers(self, settings): """Load the (URL pattern, handler) tuples for each component.""" # Order matters. The first handler to match the URL will handle the request. handlers = [] handlers.append((r'/deprecatedwidgets/(.*)', DeprecationHandler)) handlers.extend(load_handlers('tree.handlers')) handlers.extend([(r"/login", settings['login_handler_class'])]) handlers.extend([(r"/logout", settings['logout_handler_class'])]) handlers.extend(load_handlers('files.handlers')) handlers.extend(load_handlers('notebook.handlers')) handlers.extend(load_handlers('nbconvert.handlers')) handlers.extend(load_handlers('kernelspecs.handlers')) handlers.extend(load_handlers('edit.handlers')) handlers.extend(load_handlers('services.api.handlers')) handlers.extend(load_handlers('services.config.handlers')) handlers.extend(load_handlers('services.kernels.handlers')) handlers.extend(load_handlers('services.contents.handlers')) handlers.extend(load_handlers('services.sessions.handlers')) handlers.extend(load_handlers('services.nbconvert.handlers')) handlers.extend(load_handlers('services.kernelspecs.handlers')) handlers.extend(load_handlers('services.security.handlers')) # BEGIN HARDCODED WIDGETS HACK try: import ipywidgets handlers.append( (r"/nbextensions/widgets/(.*)", FileFindHandler, { 'path': ipywidgets.find_static_assets(), 'no_cache_paths': ['/'], # don't cache anything in nbextensions }), ) except: app_log.warn('ipywidgets package not installed. Widgets are unavailable.') # END HARDCODED WIDGETS HACK handlers.append( (r"/nbextensions/(.*)", FileFindHandler, { 'path': settings['nbextensions_path'], 'no_cache_paths': ['/'], # don't cache anything in nbextensions }), ) handlers.append( (r"/custom/(.*)", FileFindHandler, { 'path': settings['static_custom_path'], 'no_cache_paths': ['/'], # don't cache anything in custom }) ) # register base handlers last handlers.extend(load_handlers('base.handlers')) # set the URL that will be redirected from `/` handlers.append( (r'/?', web.RedirectHandler, { 'url' : settings['default_url'], 'permanent': False, # want 302, not 301 }) ) # prepend base_url onto the patterns that we match new_handlers = [] for handler in handlers: pattern = url_path_join(settings['base_url'], handler[0]) new_handler = tuple([pattern] + list(handler[1:])) new_handlers.append(new_handler) # add 404 on the end, which will catch everything that falls through new_handlers.append((r'(.*)', Template404)) return new_handlers class NbserverListApp(JupyterApp): version = __version__ description="List currently running notebook servers in this profile." flags = dict( json=({'NbserverListApp': {'json': True}}, "Produce machine-readable JSON output."), ) json = Bool(False, config=True, help="If True, each line of output will be a JSON object with the " "details from the server info file.") def start(self): if not self.json: print("Currently running servers:") for serverinfo in list_running_servers(self.runtime_dir): if self.json: print(json.dumps(serverinfo)) else: print(serverinfo['url'], "::", serverinfo['notebook_dir']) #----------------------------------------------------------------------------- # Aliases and Flags #----------------------------------------------------------------------------- flags = dict(base_flags) flags['no-browser']=( {'NotebookApp' : {'open_browser' : False}}, "Don't open the notebook in a browser after startup." ) flags['pylab']=( {'NotebookApp' : {'pylab' : 'warn'}}, "DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib." ) flags['no-mathjax']=( {'NotebookApp' : {'enable_mathjax' : False}}, """Disable MathJax MathJax is the javascript library Jupyter uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. """ ) # Add notebook manager flags flags.update(boolean_flag('script', 'FileContentsManager.save_script', 'DEPRECATED, IGNORED', 'DEPRECATED, IGNORED')) aliases = dict(base_aliases) aliases.update({ 'ip': 'NotebookApp.ip', 'port': 'NotebookApp.port', 'port-retries': 'NotebookApp.port_retries', 'transport': 'KernelManager.transport', 'keyfile': 'NotebookApp.keyfile', 'certfile': 'NotebookApp.certfile', 'notebook-dir': 'NotebookApp.notebook_dir', 'browser': 'NotebookApp.browser', 'pylab': 'NotebookApp.pylab', }) #----------------------------------------------------------------------------- # NotebookApp #----------------------------------------------------------------------------- class NotebookApp(JupyterApp): name = 'jupyter-notebook' version = __version__ description = """ The Jupyter HTML Notebook. This launches a Tornado based HTML Notebook Server that serves up an HTML5/Javascript Notebook client. """ examples = _examples aliases = aliases flags = flags classes = [ KernelManager, Session, MappingKernelManager, ContentsManager, FileContentsManager, NotebookNotary, KernelSpecManager, ] flags = Dict(flags) aliases = Dict(aliases) subcommands = dict( list=(NbserverListApp, NbserverListApp.description.splitlines()[0]), ) _log_formatter_cls = LogFormatter def _log_level_default(self): return logging.INFO def _log_datefmt_default(self): """Exclude date from default date format""" return "%H:%M:%S" def _log_format_default(self): """override default log format to include time""" return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" # create requested profiles by default, if they don't exist: auto_create = Bool(True) ignore_minified_js = Bool(False, config=True, help='Use minified JS file or not, mainly use during dev to avoid JS recompilation', ) # file to be opened in the notebook server file_to_run = Unicode('', config=True) # Network related information allow_origin = Unicode('', config=True, help="""Set the Access-Control-Allow-Origin header Use '*' to allow any origin to access your server. Takes precedence over allow_origin_pat. """ ) allow_origin_pat = Unicode('', config=True, help="""Use a regular expression for the Access-Control-Allow-Origin header Requests from an origin matching the expression will get replies with: Access-Control-Allow-Origin: origin where `origin` is the origin of the request. Ignored if allow_origin is set. """ ) allow_credentials = Bool(False, config=True, help="Set the Access-Control-Allow-Credentials: true header" ) default_url = Unicode('/tree', config=True, help="The default URL to redirect to from `/`" ) ip = Unicode('localhost', config=True, help="The IP address the notebook server will listen on." ) def _ip_default(self): """Return localhost if available, 127.0.0.1 otherwise. On some (horribly broken) systems, localhost cannot be bound. """ s = socket.socket() try: s.bind(('localhost', 0)) except socket.error as e: self.log.warn("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e) return '127.0.0.1' else: s.close() return 'localhost' def _ip_changed(self, name, old, new): if new == u'*': self.ip = u'' port = Integer(8888, config=True, help="The port the notebook server will listen on." ) port_retries = Integer(50, config=True, help="The number of additional ports to try if the specified port is not available." ) certfile = Unicode(u'', config=True, help="""The full path to an SSL/TLS certificate file.""" ) keyfile = Unicode(u'', config=True, help="""The full path to a private key file for usage with SSL/TLS.""" ) cookie_secret_file = Unicode(config=True, help="""The file where the cookie secret is stored.""" ) def _cookie_secret_file_default(self): return os.path.join(self.runtime_dir, 'notebook_cookie_secret') cookie_secret = Bytes(b'', config=True, help="""The random bytes used to secure cookies. By default this is a new random number every time you start the Notebook. Set it to a value in a config file to enable logins to persist across server sessions. Note: Cookie secrets should be kept private, do not share config files with cookie_secret stored in plaintext (you can read the value from a file). """ ) def _cookie_secret_default(self): if os.path.exists(self.cookie_secret_file): with io.open(self.cookie_secret_file, 'rb') as f: return f.read() else: secret = base64.encodestring(os.urandom(1024)) self._write_cookie_secret_file(secret) return secret def _write_cookie_secret_file(self, secret): """write my secret to my secret_file""" self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file) with io.open(self.cookie_secret_file, 'wb') as f: f.write(secret) try: os.chmod(self.cookie_secret_file, 0o600) except OSError: self.log.warn( "Could not set permissions on %s", self.cookie_secret_file ) password = Unicode(u'', config=True, help="""Hashed password to use for web authentication. To generate, type in a python/IPython shell: from notebook.auth import passwd; passwd() The string should be of the form type:salt:hashed-password. """ ) open_browser = Bool(True, config=True, help="""Whether to open in a browser after starting. The specific browser used is platform dependent and determined by the python standard library `webbrowser` module, unless it is overridden using the --browser (NotebookApp.browser) configuration option. """) browser = Unicode(u'', config=True, help="""Specify what command to use to invoke a web browser when opening the notebook. If not specified, the default browser will be determined by the `webbrowser` standard library module, which allows setting of the BROWSER environment variable to override it. """) webapp_settings = Dict(config=True, help="DEPRECATED, use tornado_settings" ) def _webapp_settings_changed(self, name, old, new): self.log.warn("\n webapp_settings is deprecated, use tornado_settings.\n") self.tornado_settings = new tornado_settings = Dict(config=True, help="Supply overrides for the tornado.web.Application that the " "Jupyter notebook uses.") ssl_options = Dict(config=True, help="""Supply SSL options for the tornado HTTPServer. See the tornado docs for details.""") jinja_environment_options = Dict(config=True, help="Supply extra arguments that will be passed to Jinja environment.") jinja_template_vars = Dict( config=True, help="Extra variables to supply to jinja templates when rendering.", ) enable_mathjax = Bool(True, config=True, help="""Whether to enable MathJax for typesetting math/TeX MathJax is the javascript library Jupyter uses to render math/LaTeX. It is very large, so you may want to disable it if you have a slow internet connection, or for offline use of the notebook. When disabled, equations etc. will appear as their untransformed TeX source. """ ) def _enable_mathjax_changed(self, name, old, new): """set mathjax url to empty if mathjax is disabled""" if not new: self.mathjax_url = u'' base_url = Unicode('/', config=True, help='''The base URL for the notebook server. Leading and trailing slashes can be omitted, and will automatically be added. ''') def _base_url_changed(self, name, old, new): if not new.startswith('/'): self.base_url = '/'+new elif not new.endswith('/'): self.base_url = new+'/' base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""") def _base_project_url_changed(self, name, old, new): self.log.warn("base_project_url is deprecated, use base_url") self.base_url = new extra_static_paths = List(Unicode(), config=True, help="""Extra paths to search for serving static files. This allows adding javascript/css to be available from the notebook server machine, or overriding individual files in the IPython""" ) @property def static_file_path(self): """return extra paths + the default location""" return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH] static_custom_path = List(Unicode(), help="""Path to search for custom.js, css""" ) def _static_custom_path_default(self): return [ os.path.join(d, 'custom') for d in ( self.config_dir, # FIXME: serve IPython profile while we don't have `jupyter migrate` os.path.join(get_ipython_dir(), 'profile_default', 'static'), DEFAULT_STATIC_FILES_PATH) ] extra_template_paths = List(Unicode(), config=True, help="""Extra paths to search for serving jinja templates. Can be used to override templates from notebook.templates.""" ) @property def template_file_path(self): """return extra paths + the default locations""" return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST extra_nbextensions_path = List(Unicode(), config=True, help="""extra paths to look for Javascript notebook extensions""" ) @property def nbextensions_path(self): """The path to look for Javascript notebook extensions""" path = self.extra_nbextensions_path + jupyter_path('nbextensions') # FIXME: remove IPython nbextensions path once migration is setup path.append(os.path.join(get_ipython_dir(), 'nbextensions')) return path websocket_url = Unicode("", config=True, help="""The base URL for websockets, if it differs from the HTTP server (hint: it almost certainly doesn't). Should be in the form of an HTTP origin: ws[s]://hostname[:port] """ ) mathjax_url = Unicode("", config=True, help="""The url for MathJax.js.""" ) def _mathjax_url_default(self): if not self.enable_mathjax: return u'' static_url_prefix = self.tornado_settings.get("static_url_prefix", url_path_join(self.base_url, "static") ) return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js') def _mathjax_url_changed(self, name, old, new): if new and not self.enable_mathjax: # enable_mathjax=False overrides mathjax_url self.mathjax_url = u'' else: self.log.info("Using MathJax: %s", new) contents_manager_class = Type( default_value=FileContentsManager, klass=ContentsManager, config=True, help='The notebook manager class to use.' ) kernel_manager_class = Type( default_value=MappingKernelManager, config=True, help='The kernel manager class to use.' ) session_manager_class = Type( default_value=SessionManager, config=True, help='The session manager class to use.' ) config_manager_class = Type( default_value=ConfigManager, config = True, help='The config manager class to use' ) kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) kernel_spec_manager_class = Type( default_value=KernelSpecManager, config=True, help=""" The kernel spec manager class to use. Should be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. The Api of KernelSpecManager is provisional and might change without warning between this version of Jupyter and the next stable one. """ ) login_handler_class = Type( default_value=LoginHandler, klass=web.RequestHandler, config=True, help='The login handler class to use.', ) logout_handler_class = Type( default_value=LogoutHandler, klass=web.RequestHandler, config=True, help='The logout handler class to use.', ) trust_xheaders = Bool(False, config=True, help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers" "sent by the upstream reverse proxy. Necessary if the proxy handles SSL") ) info_file = Unicode() def _info_file_default(self): info_file = "nbserver-%s.json" % os.getpid() return os.path.join(self.runtime_dir, info_file) pylab = Unicode('disabled', config=True, help=""" DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. """ ) def _pylab_changed(self, name, old, new): """when --pylab is specified, display a warning and exit""" if new != 'warn': backend = ' %s' % new else: backend = '' self.log.error("Support for specifying --pylab on the command line has been removed.") self.log.error( "Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend) ) self.exit(1) notebook_dir = Unicode(config=True, help="The directory to use for notebooks and kernels." ) def _notebook_dir_default(self): if self.file_to_run: return os.path.dirname(os.path.abspath(self.file_to_run)) else: return py3compat.getcwd() def _notebook_dir_changed(self, name, old, new): """Do a bit of validation of the notebook dir.""" if not os.path.isabs(new): # If we receive a non-absolute path, make it absolute. self.notebook_dir = os.path.abspath(new) return if not os.path.isdir(new): raise TraitError("No such notebook dir: %r" % new) # setting App.notebook_dir implies setting notebook and kernel dirs as well self.config.FileContentsManager.root_dir = new self.config.MappingKernelManager.root_dir = new server_extensions = List(Unicode(), config=True, help=("Python modules to load as notebook server extensions. " "This is an experimental API, and may change in future releases.") ) reraise_server_extension_failures = Bool( False, config=True, help="Reraise exceptions encountered loading server extensions?", ) def parse_command_line(self, argv=None): super(NotebookApp, self).parse_command_line(argv) if self.extra_args: arg0 = self.extra_args[0] f = os.path.abspath(arg0) self.argv.remove(arg0) if not os.path.exists(f): self.log.critical("No such file or directory: %s", f) self.exit(1) # Use config here, to ensure that it takes higher priority than # anything that comes from the profile. c = Config() if os.path.isdir(f): c.NotebookApp.notebook_dir = f elif os.path.isfile(f): c.NotebookApp.file_to_run = f self.update_config(c) def init_configurables(self): self.kernel_spec_manager = self.kernel_spec_manager_class( parent=self, ) self.kernel_manager = self.kernel_manager_class( parent=self, log=self.log, connection_dir=self.runtime_dir, kernel_spec_manager=self.kernel_spec_manager, ) self.contents_manager = self.contents_manager_class( parent=self, log=self.log, ) self.session_manager = self.session_manager_class( parent=self, log=self.log, kernel_manager=self.kernel_manager, contents_manager=self.contents_manager, ) self.config_manager = self.config_manager_class( parent=self, log=self.log, config_dir=os.path.join(self.config_dir, 'nbconfig'), ) def init_logging(self): # This prevents double log messages because tornado use a root logger that # self.log is a child of. The logging module dipatches log messages to a log # and all of its ancenstors until propagate is set to False. self.log.propagate = False for log in app_log, access_log, gen_log: # consistent log output name (NotebookApp instead of tornado.access, etc.) log.name = self.log.name # hook up tornado 3's loggers to our app handlers logger = logging.getLogger('tornado') logger.propagate = True logger.parent = self.log logger.setLevel(self.log.level) def init_webapp(self): """initialize tornado webapp and httpserver""" self.tornado_settings['allow_origin'] = self.allow_origin if self.allow_origin_pat: self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat) self.tornado_settings['allow_credentials'] = self.allow_credentials # ensure default_url starts with base_url if not self.default_url.startswith(self.base_url): self.default_url = url_path_join(self.base_url, self.default_url) self.web_app = NotebookWebApplication( self, self.kernel_manager, self.contents_manager, self.session_manager, self.kernel_spec_manager, self.config_manager, self.log, self.base_url, self.default_url, self.tornado_settings, self.jinja_environment_options ) ssl_options = self.ssl_options if self.certfile: ssl_options['certfile'] = self.certfile if self.keyfile: ssl_options['keyfile'] = self.keyfile if not ssl_options: # None indicates no SSL config ssl_options = None else: # Disable SSLv3, since its use is discouraged. ssl_options['ssl_version']=ssl.PROTOCOL_TLSv1 self.login_handler_class.validate_security(self, ssl_options=ssl_options) self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options, xheaders=self.trust_xheaders) success = None for port in random_ports(self.port, self.port_retries+1): try: self.http_server.listen(port, self.ip) except socket.error as e: if e.errno == errno.EADDRINUSE: self.log.info('The port %i is already in use, trying another random port.' % port) continue elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)): self.log.warn("Permission to listen on port %i denied" % port) continue else: raise else: self.port = port success = True break if not success: self.log.critical('ERROR: the notebook server could not be started because ' 'no available port could be found.') self.exit(1) @property def display_url(self): ip = self.ip if self.ip else '[all ip addresses on your system]' return self._url(ip) @property def connection_url(self): ip = self.ip if self.ip else 'localhost' return self._url(ip) def _url(self, ip): proto = 'https' if self.certfile else 'http' return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url) def init_terminals(self): try: from .terminal import initialize initialize(self.web_app, self.notebook_dir, self.connection_url) self.web_app.settings['terminals_available'] = True except ImportError as e: log = self.log.debug if sys.platform == 'win32' else self.log.warn log("Terminals not available (error was %s)", e) def init_signal(self): if not sys.platform.startswith('win') and sys.stdin.isatty(): signal.signal(signal.SIGINT, self._handle_sigint) signal.signal(signal.SIGTERM, self._signal_stop) if hasattr(signal, 'SIGUSR1'): # Windows doesn't support SIGUSR1 signal.signal(signal.SIGUSR1, self._signal_info) if hasattr(signal, 'SIGINFO'): # only on BSD-based systems signal.signal(signal.SIGINFO, self._signal_info) def _handle_sigint(self, sig, frame): """SIGINT handler spawns confirmation dialog""" # register more forceful signal handler for ^C^C case signal.signal(signal.SIGINT, self._signal_stop) # request confirmation dialog in bg thread, to avoid # blocking the App thread = threading.Thread(target=self._confirm_exit) thread.daemon = True thread.start() def _restore_sigint_handler(self): """callback for restoring original SIGINT handler""" signal.signal(signal.SIGINT, self._handle_sigint) def _confirm_exit(self): """confirm shutdown on ^C A second ^C, or answering 'y' within 5s will cause shutdown, otherwise original SIGINT handler will be restored. This doesn't work on Windows. """ info = self.log.info info('interrupted') print(self.notebook_info()) sys.stdout.write("Shutdown this notebook server (y/[n])? ") sys.stdout.flush() r,w,x = select.select([sys.stdin], [], [], 5) if r: line = sys.stdin.readline() if line.lower().startswith('y') and 'n' not in line.lower(): self.log.critical("Shutdown confirmed") ioloop.IOLoop.current().stop() return else: print("No answer for 5s:", end=' ') print("resuming operation...") # no answer, or answer is no: # set it back to original SIGINT handler # use IOLoop.add_callback because signal.signal must be called # from main thread ioloop.IOLoop.current().add_callback(self._restore_sigint_handler) def _signal_stop(self, sig, frame): self.log.critical("received signal %s, stopping", sig) ioloop.IOLoop.current().stop() def _signal_info(self, sig, frame): print(self.notebook_info()) def init_components(self): """Check the components submodule, and warn if it's unclean""" # TODO: this should still check, but now we use bower, not git submodule pass def init_server_extensions(self): """Load any extensions specified by config. Import the module, then call the load_jupyter_server_extension function, if one exists. The extension API is experimental, and may change in future releases. """ for modulename in self.server_extensions: try: mod = importlib.import_module(modulename) func = getattr(mod, 'load_jupyter_server_extension', None) if func is not None: func(self) except Exception: if self.reraise_server_extension_failures: raise self.log.warn("Error loading server extension %s", modulename, exc_info=True) @catch_config_error def initialize(self, argv=None): super(NotebookApp, self).initialize(argv) self.init_logging() if self._dispatching: return self.init_configurables() self.init_components() self.init_webapp() self.init_terminals() self.init_signal() self.init_server_extensions() def cleanup_kernels(self): """Shutdown all kernels. The kernels will shutdown themselves when this process no longer exists, but explicit shutdown allows the KernelManagers to cleanup the connection files. """ self.log.info('Shutting down kernels') self.kernel_manager.shutdown_all() def notebook_info(self): "Return the current working directory and the server url information" info = self.contents_manager.info_string() + "\n" info += "%d active kernels \n" % len(self.kernel_manager._kernels) return info + "The Jupyter Notebook is running at: %s" % self.display_url def server_info(self): """Return a JSONable dict of information about this server.""" return {'url': self.connection_url, 'hostname': self.ip if self.ip else 'localhost', 'port': self.port, 'secure': bool(self.certfile), 'base_url': self.base_url, 'notebook_dir': os.path.abspath(self.notebook_dir), 'pid': os.getpid() } def write_server_info_file(self): """Write the result of server_info() to the JSON file info_file.""" with open(self.info_file, 'w') as f: json.dump(self.server_info(), f, indent=2) def remove_server_info_file(self): """Remove the nbserver-<pid>.json file created for this server. Ignores the error raised when the file has already been removed. """ try: os.unlink(self.info_file) except OSError as e: if e.errno != errno.ENOENT: raise def start(self): """ Start the Notebook server app, after initialization This method takes no arguments so all configuration and initialization must be done prior to calling this method.""" super(NotebookApp, self).start() info = self.log.info for line in self.notebook_info().split("\n"): info(line) info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).") self.write_server_info_file() if self.open_browser or self.file_to_run: try: browser = webbrowser.get(self.browser or None) except webbrowser.Error as e: self.log.warn('No web browser found: %s.' % e) browser = None if self.file_to_run: if not os.path.exists(self.file_to_run): self.log.critical("%s does not exist" % self.file_to_run) self.exit(1) relpath = os.path.relpath(self.file_to_run, self.notebook_dir) uri = url_path_join('notebooks', *relpath.split(os.sep)) else: uri = 'tree' if browser: b = lambda : browser.open(url_path_join(self.connection_url, uri), new=2) threading.Thread(target=b).start() self.io_loop = ioloop.IOLoop.current() if sys.platform.startswith('win'): # add no-op to wake every 5s # to handle signals that may be ignored by the inner loop pc = ioloop.PeriodicCallback(lambda : None, 5000) pc.start() try: self.io_loop.start() except KeyboardInterrupt: info("Interrupted...") finally: self.cleanup_kernels() self.remove_server_info_file() def stop(self): def _stop(): self.http_server.stop() self.io_loop.stop() self.io_loop.add_callback(_stop) def list_running_servers(runtime_dir=None): """Iterate over the server info files of running notebook servers. Given a profile name, find nbserver-* files in the security directory of that profile, and yield dicts of their information, each one pertaining to a currently running notebook server instance. """ if runtime_dir is None: runtime_dir = jupyter_runtime_dir() # The runtime dir might not exist if not os.path.isdir(runtime_dir): return for file in os.listdir(runtime_dir): if file.startswith('nbserver-'): with io.open(os.path.join(runtime_dir, file), encoding='utf-8') as f: info = json.load(f) # Simple check whether that process is really still running # Also remove leftover files from IPython 2.x without a pid field if ('pid' in info) and check_pid(info['pid']): yield info else: # If the process has died, try to delete its info file try: os.unlink(file) except OSError: pass # TODO: This should warn or log or something #----------------------------------------------------------------------------- # Main entry point #----------------------------------------------------------------------------- main = launch_new_instance = NotebookApp.launch_instance
./CrossVul/dataset_final_sorted/CWE-79/py/good_1728_0
crossvul-python_data_bad_5191_4
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import os import tempfile import uuid from django.contrib.auth.models import User from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.core.files.storage import FileSystemStorage from django.db import models from django.utils.encoding import python_2_unicode_compatible class Section(models.Model): """ A simple section that links to articles, to test linking to related items in admin views. """ name = models.CharField(max_length=100) @property def name_property(self): """ A property that simply returns the name. Used to test #24461 """ return self.name @python_2_unicode_compatible class Article(models.Model): """ A simple article to test admin views. Test backwards compatibility. """ title = models.CharField(max_length=100) content = models.TextField() date = models.DateTimeField() section = models.ForeignKey(Section, null=True, blank=True) sub_section = models.ForeignKey(Section, null=True, blank=True, on_delete=models.SET_NULL, related_name='+') def __str__(self): return self.title def model_year(self): return self.date.year model_year.admin_order_field = 'date' model_year.short_description = '' def model_year_reversed(self): return self.date.year model_year_reversed.admin_order_field = '-date' model_year_reversed.short_description = '' @python_2_unicode_compatible class Book(models.Model): """ A simple book that has chapters. """ name = models.CharField(max_length=100, verbose_name='¿Name?') def __str__(self): return self.name @python_2_unicode_compatible class Promo(models.Model): name = models.CharField(max_length=100, verbose_name='¿Name?') book = models.ForeignKey(Book) def __str__(self): return self.name @python_2_unicode_compatible class Chapter(models.Model): title = models.CharField(max_length=100, verbose_name='¿Title?') content = models.TextField() book = models.ForeignKey(Book) def __str__(self): return self.title class Meta: # Use a utf-8 bytestring to ensure it works (see #11710) verbose_name = '¿Chapter?' @python_2_unicode_compatible class ChapterXtra1(models.Model): chap = models.OneToOneField(Chapter, verbose_name='¿Chap?') xtra = models.CharField(max_length=100, verbose_name='¿Xtra?') def __str__(self): return '¿Xtra1: %s' % self.xtra @python_2_unicode_compatible class ChapterXtra2(models.Model): chap = models.OneToOneField(Chapter, verbose_name='¿Chap?') xtra = models.CharField(max_length=100, verbose_name='¿Xtra?') def __str__(self): return '¿Xtra2: %s' % self.xtra class RowLevelChangePermissionModel(models.Model): name = models.CharField(max_length=100, blank=True) class CustomArticle(models.Model): content = models.TextField() date = models.DateTimeField() @python_2_unicode_compatible class ModelWithStringPrimaryKey(models.Model): string_pk = models.CharField(max_length=255, primary_key=True) def __str__(self): return self.string_pk def get_absolute_url(self): return '/dummy/%s/' % self.string_pk @python_2_unicode_compatible class Color(models.Model): value = models.CharField(max_length=10) warm = models.BooleanField(default=False) def __str__(self): return self.value # we replicate Color to register with another ModelAdmin class Color2(Color): class Meta: proxy = True @python_2_unicode_compatible class Thing(models.Model): title = models.CharField(max_length=20) color = models.ForeignKey(Color, limit_choices_to={'warm': True}) pub_date = models.DateField(blank=True, null=True) def __str__(self): return self.title @python_2_unicode_compatible class Actor(models.Model): name = models.CharField(max_length=50) age = models.IntegerField() title = models.CharField(max_length=50, null=True, blank=True) def __str__(self): return self.name @python_2_unicode_compatible class Inquisition(models.Model): expected = models.BooleanField(default=False) leader = models.ForeignKey(Actor) country = models.CharField(max_length=20) def __str__(self): return "by %s from %s" % (self.leader, self.country) @python_2_unicode_compatible class Sketch(models.Model): title = models.CharField(max_length=100) inquisition = models.ForeignKey(Inquisition, limit_choices_to={'leader__name': 'Palin', 'leader__age': 27, 'expected': False, }) defendant0 = models.ForeignKey(Actor, limit_choices_to={'title__isnull': False}, related_name='as_defendant0') defendant1 = models.ForeignKey(Actor, limit_choices_to={'title__isnull': True}, related_name='as_defendant1') def __str__(self): return self.title def today_callable_dict(): return {"last_action__gte": datetime.datetime.today()} def today_callable_q(): return models.Q(last_action__gte=datetime.datetime.today()) @python_2_unicode_compatible class Character(models.Model): username = models.CharField(max_length=100) last_action = models.DateTimeField() def __str__(self): return self.username @python_2_unicode_compatible class StumpJoke(models.Model): variation = models.CharField(max_length=100) most_recently_fooled = models.ForeignKey(Character, limit_choices_to=today_callable_dict, related_name="+") has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+") def __str__(self): return self.variation class Fabric(models.Model): NG_CHOICES = ( ('Textured', ( ('x', 'Horizontal'), ('y', 'Vertical'), )), ('plain', 'Smooth'), ) surface = models.CharField(max_length=20, choices=NG_CHOICES) @python_2_unicode_compatible class Person(models.Model): GENDER_CHOICES = ( (1, "Male"), (2, "Female"), ) name = models.CharField(max_length=100) gender = models.IntegerField(choices=GENDER_CHOICES) age = models.IntegerField(default=21) alive = models.BooleanField(default=True) def __str__(self): return self.name @python_2_unicode_compatible class Persona(models.Model): """ A simple persona associated with accounts, to test inlining of related accounts which inherit from a common accounts class. """ name = models.CharField(blank=False, max_length=80) def __str__(self): return self.name @python_2_unicode_compatible class Account(models.Model): """ A simple, generic account encapsulating the information shared by all types of accounts. """ username = models.CharField(blank=False, max_length=80) persona = models.ForeignKey(Persona, related_name="accounts") servicename = 'generic service' def __str__(self): return "%s: %s" % (self.servicename, self.username) class FooAccount(Account): """A service-specific account of type Foo.""" servicename = 'foo' class BarAccount(Account): """A service-specific account of type Bar.""" servicename = 'bar' @python_2_unicode_compatible class Subscriber(models.Model): name = models.CharField(blank=False, max_length=80) email = models.EmailField(blank=False, max_length=175) def __str__(self): return "%s (%s)" % (self.name, self.email) class ExternalSubscriber(Subscriber): pass class OldSubscriber(Subscriber): pass class Media(models.Model): name = models.CharField(max_length=60) class Podcast(Media): release_date = models.DateField() class Meta: ordering = ('release_date',) # overridden in PodcastAdmin class Vodcast(Media): media = models.OneToOneField(Media, primary_key=True, parent_link=True) released = models.BooleanField(default=False) class Parent(models.Model): name = models.CharField(max_length=128) class Child(models.Model): parent = models.ForeignKey(Parent, editable=False) name = models.CharField(max_length=30, blank=True) @python_2_unicode_compatible class EmptyModel(models.Model): def __str__(self): return "Primary key = %s" % self.id temp_storage = FileSystemStorage(tempfile.mkdtemp()) UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload') class Gallery(models.Model): name = models.CharField(max_length=100) class Picture(models.Model): name = models.CharField(max_length=100) image = models.FileField(storage=temp_storage, upload_to='test_upload') gallery = models.ForeignKey(Gallery, related_name="pictures") class Language(models.Model): iso = models.CharField(max_length=5, primary_key=True) name = models.CharField(max_length=50) english_name = models.CharField(max_length=50) shortlist = models.BooleanField(default=False) class Meta: ordering = ('iso',) # a base class for Recommender and Recommendation class Title(models.Model): pass class TitleTranslation(models.Model): title = models.ForeignKey(Title) text = models.CharField(max_length=100) class Recommender(Title): pass class Recommendation(Title): recommender = models.ForeignKey(Recommender) class Collector(models.Model): name = models.CharField(max_length=100) class Widget(models.Model): owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class DooHickey(models.Model): code = models.CharField(max_length=10, primary_key=True) owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class Grommet(models.Model): code = models.AutoField(primary_key=True) owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class Whatsit(models.Model): index = models.IntegerField(primary_key=True) owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class Doodad(models.Model): name = models.CharField(max_length=100) class FancyDoodad(Doodad): owner = models.ForeignKey(Collector) expensive = models.BooleanField(default=True) @python_2_unicode_compatible class Category(models.Model): collector = models.ForeignKey(Collector) order = models.PositiveIntegerField() class Meta: ordering = ('order',) def __str__(self): return '%s:o%s' % (self.id, self.order) class Link(models.Model): posted = models.DateField( default=lambda: datetime.date.today() - datetime.timedelta(days=7) ) url = models.URLField() post = models.ForeignKey("Post") readonly_link_content = models.TextField() class PrePopulatedPost(models.Model): title = models.CharField(max_length=100) published = models.BooleanField(default=False) slug = models.SlugField() class PrePopulatedSubPost(models.Model): post = models.ForeignKey(PrePopulatedPost) subtitle = models.CharField(max_length=100) subslug = models.SlugField() class Post(models.Model): title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)") content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)") readonly_content = models.TextField() posted = models.DateField( default=datetime.date.today, help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)" ) public = models.NullBooleanField() def awesomeness_level(self): return "Very awesome." # Proxy model to test overridden fields attrs on Post model so as not to # interfere with other tests. class FieldOverridePost(Post): class Meta: proxy = True @python_2_unicode_compatible class Gadget(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name @python_2_unicode_compatible class Villain(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class SuperVillain(Villain): pass @python_2_unicode_compatible class FunkyTag(models.Model): "Because we all know there's only one real use case for GFKs." name = models.CharField(max_length=25) content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') def __str__(self): return self.name @python_2_unicode_compatible class Plot(models.Model): name = models.CharField(max_length=100) team_leader = models.ForeignKey(Villain, related_name='lead_plots') contact = models.ForeignKey(Villain, related_name='contact_plots') tags = GenericRelation(FunkyTag) def __str__(self): return self.name @python_2_unicode_compatible class PlotDetails(models.Model): details = models.CharField(max_length=100) plot = models.OneToOneField(Plot, null=True, blank=True) def __str__(self): return self.details class PlotProxy(Plot): class Meta: proxy = True @python_2_unicode_compatible class SecretHideout(models.Model): """ Secret! Not registered with the admin! """ location = models.CharField(max_length=100) villain = models.ForeignKey(Villain) def __str__(self): return self.location @python_2_unicode_compatible class SuperSecretHideout(models.Model): """ Secret! Not registered with the admin! """ location = models.CharField(max_length=100) supervillain = models.ForeignKey(SuperVillain) def __str__(self): return self.location @python_2_unicode_compatible class CyclicOne(models.Model): name = models.CharField(max_length=25) two = models.ForeignKey('CyclicTwo') def __str__(self): return self.name @python_2_unicode_compatible class CyclicTwo(models.Model): name = models.CharField(max_length=25) one = models.ForeignKey(CyclicOne) def __str__(self): return self.name class Topping(models.Model): name = models.CharField(max_length=20) class Pizza(models.Model): name = models.CharField(max_length=20) toppings = models.ManyToManyField('Topping', related_name='pizzas') class Album(models.Model): owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL) title = models.CharField(max_length=30) class Employee(Person): code = models.CharField(max_length=20) class WorkHour(models.Model): datum = models.DateField() employee = models.ForeignKey(Employee) class Question(models.Model): question = models.CharField(max_length=20) @python_2_unicode_compatible class Answer(models.Model): question = models.ForeignKey(Question, on_delete=models.PROTECT) answer = models.CharField(max_length=20) def __str__(self): return self.answer class Reservation(models.Model): start_date = models.DateTimeField() price = models.IntegerField() DRIVER_CHOICES = ( ('bill', 'Bill G'), ('steve', 'Steve J'), ) RESTAURANT_CHOICES = ( ('indian', 'A Taste of India'), ('thai', 'Thai Pography'), ('pizza', 'Pizza Mama'), ) class FoodDelivery(models.Model): reference = models.CharField(max_length=100) driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True) restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True) class Meta: unique_together = (("driver", "restaurant"),) @python_2_unicode_compatible class CoverLetter(models.Model): author = models.CharField(max_length=30) date_written = models.DateField(null=True, blank=True) def __str__(self): return self.author class Paper(models.Model): title = models.CharField(max_length=30) author = models.CharField(max_length=30, blank=True, null=True) class ShortMessage(models.Model): content = models.CharField(max_length=140) timestamp = models.DateTimeField(null=True, blank=True) @python_2_unicode_compatible class Telegram(models.Model): title = models.CharField(max_length=30) date_sent = models.DateField(null=True, blank=True) def __str__(self): return self.title class Story(models.Model): title = models.CharField(max_length=100) content = models.TextField() class OtherStory(models.Model): title = models.CharField(max_length=100) content = models.TextField() class ComplexSortedPerson(models.Model): name = models.CharField(max_length=100) age = models.PositiveIntegerField() is_employee = models.NullBooleanField() class PluggableSearchPerson(models.Model): name = models.CharField(max_length=100) age = models.PositiveIntegerField() class PrePopulatedPostLargeSlug(models.Model): """ Regression test for #15938: a large max_length for the slugfield must not be localized in prepopulated_fields_js.html or it might end up breaking the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000) """ title = models.CharField(max_length=100) published = models.BooleanField(default=False) # `db_index=False` because MySQL cannot index large CharField (#21196). slug = models.SlugField(max_length=1000, db_index=False) class AdminOrderedField(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) class AdminOrderedModelMethod(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) def some_order(self): return self.order some_order.admin_order_field = 'order' class AdminOrderedAdminMethod(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) class AdminOrderedCallable(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) @python_2_unicode_compatible class Report(models.Model): title = models.CharField(max_length=100) def __str__(self): return self.title class MainPrepopulated(models.Model): name = models.CharField(max_length=100) pubdate = models.DateField() status = models.CharField( max_length=20, choices=(('option one', 'Option One'), ('option two', 'Option Two'))) slug1 = models.SlugField(blank=True) slug2 = models.SlugField(blank=True) class RelatedPrepopulated(models.Model): parent = models.ForeignKey(MainPrepopulated) name = models.CharField(max_length=75) pubdate = models.DateField() status = models.CharField( max_length=20, choices=(('option one', 'Option One'), ('option two', 'Option Two'))) slug1 = models.SlugField(max_length=50) slug2 = models.SlugField(max_length=60) class UnorderedObject(models.Model): """ Model without any defined `Meta.ordering`. Refs #16819. """ name = models.CharField(max_length=255) bool = models.BooleanField(default=True) class UndeletableObject(models.Model): """ Model whose show_delete in admin change_view has been disabled Refs #10057. """ name = models.CharField(max_length=255) class UnchangeableObject(models.Model): """ Model whose change_view is disabled in admin Refs #20640. """ class UserMessenger(models.Model): """ Dummy class for testing message_user functions on ModelAdmin """ class Simple(models.Model): """ Simple model with nothing on it for use in testing """ class Choice(models.Model): choice = models.IntegerField(blank=True, null=True, choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion'))) class ParentWithDependentChildren(models.Model): """ Issue #20522 Model where the validation of child foreign-key relationships depends on validation of the parent """ some_required_info = models.PositiveIntegerField() family_name = models.CharField(max_length=255, blank=False) class DependentChild(models.Model): """ Issue #20522 Model that depends on validation of the parent class for one of its fields to validate during clean """ parent = models.ForeignKey(ParentWithDependentChildren) family_name = models.CharField(max_length=255) class _Manager(models.Manager): def get_queryset(self): return super(_Manager, self).get_queryset().filter(pk__gt=1) class FilteredManager(models.Model): def __str__(self): return "PK=%d" % self.pk pk_gt_1 = _Manager() objects = models.Manager() class EmptyModelVisible(models.Model): """ See ticket #11277. """ class EmptyModelHidden(models.Model): """ See ticket #11277. """ class EmptyModelMixin(models.Model): """ See ticket #11277. """ class State(models.Model): name = models.CharField(max_length=100) class City(models.Model): state = models.ForeignKey(State) name = models.CharField(max_length=100) def get_absolute_url(self): return '/dummy/%s/' % self.pk class Restaurant(models.Model): city = models.ForeignKey(City) name = models.CharField(max_length=100) def get_absolute_url(self): return '/dummy/%s/' % self.pk class Worker(models.Model): work_at = models.ForeignKey(Restaurant) name = models.CharField(max_length=50) surname = models.CharField(max_length=50) # Models for #23329 class ReferencedByParent(models.Model): name = models.CharField(max_length=20, unique=True) class ParentWithFK(models.Model): fk = models.ForeignKey( ReferencedByParent, to_field='name', related_name='hidden+', ) class ChildOfReferer(ParentWithFK): pass # Models for #23431 class ReferencedByInline(models.Model): name = models.CharField(max_length=20, unique=True) class InlineReference(models.Model): fk = models.ForeignKey( ReferencedByInline, to_field='name', related_name='hidden+', ) class InlineReferer(models.Model): refs = models.ManyToManyField(InlineReference) # Models for #23604 and #23915 class Recipe(models.Model): rname = models.CharField(max_length=20, unique=True) class Ingredient(models.Model): iname = models.CharField(max_length=20, unique=True) recipes = models.ManyToManyField(Recipe, through='RecipeIngredient') class RecipeIngredient(models.Model): ingredient = models.ForeignKey(Ingredient, to_field='iname') recipe = models.ForeignKey(Recipe, to_field='rname') # Model for #23839 class NotReferenced(models.Model): # Don't point any FK at this model. pass # Models for #23934 class ExplicitlyProvidedPK(models.Model): name = models.IntegerField(primary_key=True) class ImplicitlyGeneratedPK(models.Model): name = models.IntegerField(unique=True) # Models for #25622 class ReferencedByGenRel(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') class GenRelReference(models.Model): references = GenericRelation(ReferencedByGenRel) class ParentWithUUIDPK(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) title = models.CharField(max_length=100) def __str__(self): return str(self.id) class RelatedWithUUIDPKModel(models.Model): parent = models.ForeignKey(ParentWithUUIDPK, on_delete=models.CASCADE)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5191_4
crossvul-python_data_good_1644_0
"""Base Tornado handlers for the notebook server.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import functools import json import logging import os import re import sys import traceback try: # py3 from http.client import responses except ImportError: from httplib import responses from jinja2 import TemplateNotFound from tornado import web from tornado import gen from tornado.log import app_log import IPython from IPython.utils.sysinfo import get_sys_info from IPython.config import Application from IPython.utils.path import filefind from IPython.utils.py3compat import string_types from IPython.html.utils import is_hidden, url_path_join, url_escape from IPython.html.services.security import csp_report_uri #----------------------------------------------------------------------------- # Top-level handlers #----------------------------------------------------------------------------- non_alphanum = re.compile(r'[^A-Za-z0-9]') sys_info = json.dumps(get_sys_info()) class AuthenticatedHandler(web.RequestHandler): """A RequestHandler with an authenticated user.""" @property def content_security_policy(self): """The default Content-Security-Policy header Can be overridden by defining Content-Security-Policy in settings['headers'] """ return '; '.join([ "frame-ancestors 'self'", # Make sure the report-uri is relative to the base_url "report-uri " + url_path_join(self.base_url, csp_report_uri), ]) def set_default_headers(self): headers = self.settings.get('headers', {}) if "Content-Security-Policy" not in headers: headers["Content-Security-Policy"] = self.content_security_policy # Allow for overriding headers for header_name,value in headers.items() : try: self.set_header(header_name, value) except Exception as e: # tornado raise Exception (not a subclass) # if method is unsupported (websocket and Access-Control-Allow-Origin # for example, so just ignore) self.log.debug(e) def clear_login_cookie(self): self.clear_cookie(self.cookie_name) def get_current_user(self): if self.login_handler is None: return 'anonymous' return self.login_handler.get_user(self) @property def cookie_name(self): default_cookie_name = non_alphanum.sub('-', 'username-{}'.format( self.request.host )) return self.settings.get('cookie_name', default_cookie_name) @property def logged_in(self): """Is a user currently logged in?""" user = self.get_current_user() return (user and not user == 'anonymous') @property def login_handler(self): """Return the login handler for this application, if any.""" return self.settings.get('login_handler_class', None) @property def login_available(self): """May a user proceed to log in? This returns True if login capability is available, irrespective of whether the user is already logged in or not. """ if self.login_handler is None: return False return bool(self.login_handler.login_available(self.settings)) class IPythonHandler(AuthenticatedHandler): """IPython-specific extensions to authenticated handling Mostly property shortcuts to IPython-specific settings. """ @property def config(self): return self.settings.get('config', None) @property def log(self): """use the IPython log by default, falling back on tornado's logger""" if Application.initialized(): return Application.instance().log else: return app_log @property def jinja_template_vars(self): """User-supplied values to supply to jinja templates.""" return self.settings.get('jinja_template_vars', {}) #--------------------------------------------------------------- # URLs #--------------------------------------------------------------- @property def version_hash(self): """The version hash to use for cache hints for static files""" return self.settings.get('version_hash', '') @property def mathjax_url(self): return self.settings.get('mathjax_url', '') @property def base_url(self): return self.settings.get('base_url', '/') @property def default_url(self): return self.settings.get('default_url', '') @property def ws_url(self): return self.settings.get('websocket_url', '') @property def contents_js_source(self): self.log.debug("Using contents: %s", self.settings.get('contents_js_source', 'services/contents')) return self.settings.get('contents_js_source', 'services/contents') #--------------------------------------------------------------- # Manager objects #--------------------------------------------------------------- @property def kernel_manager(self): return self.settings['kernel_manager'] @property def contents_manager(self): return self.settings['contents_manager'] @property def cluster_manager(self): return self.settings['cluster_manager'] @property def session_manager(self): return self.settings['session_manager'] @property def terminal_manager(self): return self.settings['terminal_manager'] @property def kernel_spec_manager(self): return self.settings['kernel_spec_manager'] @property def config_manager(self): return self.settings['config_manager'] #--------------------------------------------------------------- # CORS #--------------------------------------------------------------- @property def allow_origin(self): """Normal Access-Control-Allow-Origin""" return self.settings.get('allow_origin', '') @property def allow_origin_pat(self): """Regular expression version of allow_origin""" return self.settings.get('allow_origin_pat', None) @property def allow_credentials(self): """Whether to set Access-Control-Allow-Credentials""" return self.settings.get('allow_credentials', False) def set_default_headers(self): """Add CORS headers, if defined""" super(IPythonHandler, self).set_default_headers() if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) elif self.allow_origin_pat: origin = self.get_origin() if origin and self.allow_origin_pat.match(origin): self.set_header("Access-Control-Allow-Origin", origin) if self.allow_credentials: self.set_header("Access-Control-Allow-Credentials", 'true') def get_origin(self): # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if "Origin" in self.request.headers: origin = self.request.headers.get("Origin") else: origin = self.request.headers.get("Sec-Websocket-Origin", None) return origin #--------------------------------------------------------------- # template rendering #--------------------------------------------------------------- def get_template(self, name): """Return the jinja template object for a given name""" return self.settings['jinja2_env'].get_template(name) def render_template(self, name, **ns): ns.update(self.template_namespace) template = self.get_template(name) return template.render(**ns) @property def template_namespace(self): return dict( base_url=self.base_url, default_url=self.default_url, ws_url=self.ws_url, logged_in=self.logged_in, login_available=self.login_available, static_url=self.static_url, sys_info=sys_info, contents_js_source=self.contents_js_source, version_hash=self.version_hash, **self.jinja_template_vars ) def get_json_body(self): """Return the body of the request as JSON data.""" if not self.request.body: return None # Do we need to call body.decode('utf-8') here? body = self.request.body.strip().decode(u'utf-8') try: model = json.loads(body) except Exception: self.log.debug("Bad JSON: %r", body) self.log.error("Couldn't parse JSON", exc_info=True) raise web.HTTPError(400, u'Invalid JSON in body of request') return model def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: self.log.debug("No template for %d", status_code) html = self.render_template('error.html', **ns) self.write(html) class APIHandler(IPythonHandler): """Base class for API handlers""" @property def content_security_policy(self): csp = '; '.join([ super(APIHandler, self).content_security_policy, "default-src 'none'", ]) return csp def finish(self, *args, **kwargs): self.set_header('Content-Type', 'application/json') return super(APIHandler, self).finish(*args, **kwargs) class Template404(IPythonHandler): """Render our 404 template""" def prepare(self): raise web.HTTPError(404) class AuthenticatedFileHandler(IPythonHandler, web.StaticFileHandler): """static files should only be accessible when logged in""" @web.authenticated def get(self, path): if os.path.splitext(path)[1] == '.ipynb': name = path.rsplit('/', 1)[-1] self.set_header('Content-Type', 'application/json') self.set_header('Content-Disposition','attachment; filename="%s"' % name) return web.StaticFileHandler.get(self, path) def set_headers(self): super(AuthenticatedFileHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments: self.add_header("Cache-Control", "no-cache") def compute_etag(self): return None def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. Requires tornado 3.1 Adding to tornado's own handling, forbids the serving of hidden files. """ abs_path = super(AuthenticatedFileHandler, self).validate_absolute_path(root, absolute_path) abs_root = os.path.abspath(root) if is_hidden(abs_path, abs_root): self.log.info("Refusing to serve hidden file, via 404 Error") raise web.HTTPError(404) return abs_path def json_errors(method): """Decorate methods with this to return GitHub style JSON errors. This should be used on any JSON API on any handler method that can raise HTTPErrors. This will grab the latest HTTPError exception using sys.exc_info and then: 1. Set the HTTP status code based on the HTTPError 2. Create and return a JSON body with a message field describing the error in a human readable form. """ @functools.wraps(method) @gen.coroutine def wrapper(self, *args, **kwargs): try: result = yield gen.maybe_future(method(self, *args, **kwargs)) except web.HTTPError as e: self.set_header('Content-Type', 'application/json') status = e.status_code message = e.log_message self.log.warn(message) self.set_status(e.status_code) reply = dict(message=message, reason=e.reason) self.finish(json.dumps(reply)) except Exception: self.set_header('Content-Type', 'application/json') self.log.error("Unhandled error in API request", exc_info=True) status = 500 message = "Unknown server error" t, value, tb = sys.exc_info() self.set_status(status) tb_text = ''.join(traceback.format_exception(t, value, tb)) reply = dict(message=message, reason=None, traceback=tb_text) self.finish(json.dumps(reply)) else: # FIXME: can use regular return in generators in py3 raise gen.Return(result) return wrapper #----------------------------------------------------------------------------- # File handler #----------------------------------------------------------------------------- # to minimize subclass changes: HTTPError = web.HTTPError class FileFindHandler(IPythonHandler, web.StaticFileHandler): """subclass of StaticFileHandler for serving files from a search path""" # cache search results, don't search for files more than once _static_paths = {} def set_headers(self): super(FileFindHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments or \ any(self.request.path.startswith(path) for path in self.no_cache_paths): self.set_header("Cache-Control", "no-cache") def initialize(self, path, default_filename=None, no_cache_paths=None): self.no_cache_paths = no_cache_paths or [] if isinstance(path, string_types): path = [path] self.root = tuple( os.path.abspath(os.path.expanduser(p)) + os.sep for p in path ) self.default_filename = default_filename def compute_etag(self): return None @classmethod def get_absolute_path(cls, roots, path): """locate a file to serve on our static file search path""" with cls._lock: if path in cls._static_paths: return cls._static_paths[path] try: abspath = os.path.abspath(filefind(path, roots)) except IOError: # IOError means not found return '' cls._static_paths[path] = abspath return abspath def validate_absolute_path(self, root, absolute_path): """check if the file should be served (raises 404, 403, etc.)""" if absolute_path == '': raise web.HTTPError(404) for root in self.root: if (absolute_path + os.sep).startswith(root): break return super(FileFindHandler, self).validate_absolute_path(root, absolute_path) class APIVersionHandler(APIHandler): @json_errors def get(self): # not authenticated, so give as few info as possible self.finish(json.dumps({"version":IPython.__version__})) class TrailingSlashHandler(web.RequestHandler): """Simple redirect handler that strips trailing slashes This should be the first, highest priority handler. """ def get(self): self.redirect(self.request.uri.rstrip('/')) post = put = get class FilesRedirectHandler(IPythonHandler): """Handler for redirecting relative URLs to the /files/ handler""" @staticmethod def redirect_to_files(self, path): """make redirect logic a reusable static method so it can be called from other handlers. """ cm = self.contents_manager if cm.dir_exists(path): # it's a *directory*, redirect to /tree url = url_path_join(self.base_url, 'tree', path) else: orig_path = path # otherwise, redirect to /files parts = path.split('/') if not cm.file_exists(path=path) and 'files' in parts: # redirect without files/ iff it would 404 # this preserves pre-2.0-style 'files/' links self.log.warn("Deprecated files/ URL: %s", orig_path) parts.remove('files') path = '/'.join(parts) if not cm.file_exists(path=path): raise web.HTTPError(404) url = url_path_join(self.base_url, 'files', path) url = url_escape(url) self.log.debug("Redirecting %s to %s", self.request.path, url) self.redirect(url) def get(self, path=''): return self.redirect_to_files(self, path) #----------------------------------------------------------------------------- # URL pattern fragments for re-use #----------------------------------------------------------------------------- # path matches any number of `/foo[/bar...]` or just `/` or '' path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))" #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- default_handlers = [ (r".*/", TrailingSlashHandler), (r"api", APIVersionHandler) ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_0
crossvul-python_data_good_2188_0
# # gravatars.py -- Decorational template tags # # Copyright (c) 2008-2009 Christian Hammond # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals from django import template from django.utils.html import format_html from djblets.gravatars import (get_gravatar_url, get_gravatar_url_for_email) from djblets.util.decorators import basictag register = template.Library() @register.tag @basictag(takes_context=True) def gravatar(context, user, size=None): """ Outputs the HTML for displaying a user's gravatar. This can take an optional size of the image (defaults to 80 if not specified). This is also influenced by the following settings: GRAVATAR_SIZE - Default size for gravatars GRAVATAR_RATING - Maximum allowed rating (g, pg, r, x) GRAVATAR_DEFAULT - Default image set to show if the user hasn't specified a gravatar (identicon, monsterid, wavatar) See http://www.gravatar.com/ for more information. """ url = get_gravatar_url(context['request'], user, size) if url: return format_html( '<img src="{0}" width="{1}" height="{1}" alt="{2}" ' 'class="gravatar"/>', url, size, user.get_full_name() or user.username) else: return '' @register.tag @basictag(takes_context=True) def gravatar_url(context, email, size=None): """ Outputs the URL for a gravatar for the given email address. This can take an optional size of the image (defaults to 80 if not specified). This is also influenced by the following settings: GRAVATAR_SIZE - Default size for gravatars GRAVATAR_RATING - Maximum allowed rating (g, pg, r, x) GRAVATAR_DEFAULT - Default image set to show if the user hasn't specified a gravatar (identicon, monsterid, wavatar) See http://www.gravatar.com/ for more information. """ return get_gravatar_url_for_email(context['request'], email, size)
./CrossVul/dataset_final_sorted/CWE-79/py/good_2188_0
crossvul-python_data_bad_1644_3
"""Tornado handlers for the contents web service.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json from tornado import gen, web from IPython.html.utils import url_path_join, url_escape from IPython.utils.jsonutil import date_default from IPython.html.base.handlers import ( IPythonHandler, json_errors, path_regex, ) def sort_key(model): """key function for case-insensitive sort by name and type""" iname = model['name'].lower() type_key = { 'directory' : '0', 'notebook' : '1', 'file' : '2', }.get(model['type'], '9') return u'%s%s' % (type_key, iname) def validate_model(model, expect_content): """ Validate a model returned by a ContentsManager method. If expect_content is True, then we expect non-null entries for 'content' and 'format'. """ required_keys = { "name", "path", "type", "writable", "created", "last_modified", "mimetype", "content", "format", } missing = required_keys - set(model.keys()) if missing: raise web.HTTPError( 500, u"Missing Model Keys: {missing}".format(missing=missing), ) maybe_none_keys = ['content', 'format'] if model['type'] == 'file': # mimetype should be populated only for file models maybe_none_keys.append('mimetype') if expect_content: errors = [key for key in maybe_none_keys if model[key] is None] if errors: raise web.HTTPError( 500, u"Keys unexpectedly None: {keys}".format(keys=errors), ) else: errors = { key: model[key] for key in maybe_none_keys if model[key] is not None } if errors: raise web.HTTPError( 500, u"Keys unexpectedly not None: {keys}".format(keys=errors), ) class ContentsHandler(IPythonHandler): SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE') def location_url(self, path): """Return the full URL location of a file. Parameters ---------- path : unicode The API path of the file, such as "foo/bar.txt". """ return url_escape(url_path_join( self.base_url, 'api', 'contents', path )) def _finish_model(self, model, location=True): """Finish a JSON request with a model, setting relevant headers, etc.""" if location: location = self.location_url(model['path']) self.set_header('Location', location) self.set_header('Last-Modified', model['last_modified']) self.set_header('Content-Type', 'application/json') self.finish(json.dumps(model, default=date_default)) @web.authenticated @json_errors @gen.coroutine def get(self, path=''): """Return a model for a file or directory. A directory model contains a list of models (without content) of the files and directories it contains. """ path = path or '' type = self.get_query_argument('type', default=None) if type not in {None, 'directory', 'file', 'notebook'}: raise web.HTTPError(400, u'Type %r is invalid' % type) format = self.get_query_argument('format', default=None) if format not in {None, 'text', 'base64'}: raise web.HTTPError(400, u'Format %r is invalid' % format) content = self.get_query_argument('content', default='1') if content not in {'0', '1'}: raise web.HTTPError(400, u'Content %r is invalid' % content) content = int(content) model = yield gen.maybe_future(self.contents_manager.get( path=path, type=type, format=format, content=content, )) if model['type'] == 'directory' and content: # group listing by type, then by name (case-insensitive) # FIXME: sorting should be done in the frontends model['content'].sort(key=sort_key) validate_model(model, expect_content=content) self._finish_model(model, location=False) @web.authenticated @json_errors @gen.coroutine def patch(self, path=''): """PATCH renames a file or directory without re-uploading content.""" cm = self.contents_manager model = self.get_json_body() if model is None: raise web.HTTPError(400, u'JSON body missing') model = yield gen.maybe_future(cm.update(model, path)) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _copy(self, copy_from, copy_to=None): """Copy a file, optionally specifying a target directory.""" self.log.info(u"Copying {copy_from} to {copy_to}".format( copy_from=copy_from, copy_to=copy_to or '', )) model = yield gen.maybe_future(self.contents_manager.copy(copy_from, copy_to)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _upload(self, model, path): """Handle upload of a new file to path""" self.log.info(u"Uploading file to %s", path) model = yield gen.maybe_future(self.contents_manager.new(model, path)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _new_untitled(self, path, type='', ext=''): """Create a new, empty untitled entity""" self.log.info(u"Creating new %s in %s", type or 'file', path) model = yield gen.maybe_future(self.contents_manager.new_untitled(path=path, type=type, ext=ext)) self.set_status(201) validate_model(model, expect_content=False) self._finish_model(model) @gen.coroutine def _save(self, model, path): """Save an existing file.""" self.log.info(u"Saving file at %s", path) model = yield gen.maybe_future(self.contents_manager.save(model, path)) validate_model(model, expect_content=False) self._finish_model(model) @web.authenticated @json_errors @gen.coroutine def post(self, path=''): """Create a new file in the specified path. POST creates new files. The server always decides on the name. POST /api/contents/path New untitled, empty file or directory. POST /api/contents/path with body {"copy_from" : "/path/to/OtherNotebook.ipynb"} New copy of OtherNotebook in path """ cm = self.contents_manager if cm.file_exists(path): raise web.HTTPError(400, "Cannot POST to files, use PUT instead.") if not cm.dir_exists(path): raise web.HTTPError(404, "No such directory: %s" % path) model = self.get_json_body() if model is not None: copy_from = model.get('copy_from') ext = model.get('ext', '') type = model.get('type', '') if copy_from: yield self._copy(copy_from, path) else: yield self._new_untitled(path, type=type, ext=ext) else: yield self._new_untitled(path) @web.authenticated @json_errors @gen.coroutine def put(self, path=''): """Saves the file in the location specified by name and path. PUT is very similar to POST, but the requester specifies the name, whereas with POST, the server picks the name. PUT /api/contents/path/Name.ipynb Save notebook at ``path/Name.ipynb``. Notebook structure is specified in `content` key of JSON request body. If content is not specified, create a new empty notebook. """ model = self.get_json_body() if model: if model.get('copy_from'): raise web.HTTPError(400, "Cannot copy with PUT, only POST") exists = yield gen.maybe_future(self.contents_manager.file_exists(path)) if exists: yield gen.maybe_future(self._save(model, path)) else: yield gen.maybe_future(self._upload(model, path)) else: yield gen.maybe_future(self._new_untitled(path)) @web.authenticated @json_errors @gen.coroutine def delete(self, path=''): """delete a file in the given path""" cm = self.contents_manager self.log.warn('delete %s', path) yield gen.maybe_future(cm.delete(path)) self.set_status(204) self.finish() class CheckpointsHandler(IPythonHandler): SUPPORTED_METHODS = ('GET', 'POST') @web.authenticated @json_errors @gen.coroutine def get(self, path=''): """get lists checkpoints for a file""" cm = self.contents_manager checkpoints = yield gen.maybe_future(cm.list_checkpoints(path)) data = json.dumps(checkpoints, default=date_default) self.finish(data) @web.authenticated @json_errors @gen.coroutine def post(self, path=''): """post creates a new checkpoint""" cm = self.contents_manager checkpoint = yield gen.maybe_future(cm.create_checkpoint(path)) data = json.dumps(checkpoint, default=date_default) location = url_path_join(self.base_url, 'api/contents', path, 'checkpoints', checkpoint['id']) self.set_header('Location', url_escape(location)) self.set_status(201) self.finish(data) class ModifyCheckpointsHandler(IPythonHandler): SUPPORTED_METHODS = ('POST', 'DELETE') @web.authenticated @json_errors @gen.coroutine def post(self, path, checkpoint_id): """post restores a file from a checkpoint""" cm = self.contents_manager yield gen.maybe_future(cm.restore_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish() @web.authenticated @json_errors @gen.coroutine def delete(self, path, checkpoint_id): """delete clears a checkpoint for a given file""" cm = self.contents_manager yield gen.maybe_future(cm.delete_checkpoint(checkpoint_id, path)) self.set_status(204) self.finish() class NotebooksRedirectHandler(IPythonHandler): """Redirect /api/notebooks to /api/contents""" SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH', 'POST', 'DELETE') def get(self, path): self.log.warn("/api/notebooks is deprecated, use /api/contents") self.redirect(url_path_join( self.base_url, 'api/contents', path )) put = patch = post = delete = get #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- _checkpoint_id_regex = r"(?P<checkpoint_id>[\w-]+)" default_handlers = [ (r"/api/contents%s/checkpoints" % path_regex, CheckpointsHandler), (r"/api/contents%s/checkpoints/%s" % (path_regex, _checkpoint_id_regex), ModifyCheckpointsHandler), (r"/api/contents%s" % path_regex, ContentsHandler), (r"/api/notebooks/?(.*)", NotebooksRedirectHandler), ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_3
crossvul-python_data_bad_5789_1
from __future__ import unicode_literals import base64 import calendar import datetime import re import sys try: from urllib import parse as urllib_parse except ImportError: # Python 2 import urllib as urllib_parse import urlparse urllib_parse.urlparse = urlparse.urlparse from binascii import Error as BinasciiError from email.utils import formatdate from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str, force_text from django.utils.functional import allow_lazy from django.utils import six ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(urllib_parse.quote(force_str(url), force_str(safe))) urlquote = allow_lazy(urlquote, six.text_type) def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe))) urlquote_plus = allow_lazy(urlquote_plus, six.text_type) def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_text(urllib_parse.unquote(force_str(quoted_url))) urlunquote = allow_lazy(urlunquote, six.text_type) def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_text(urllib_parse.unquote_plus(force_str(quoted_url))) urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first cast to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return urllib_parse.urlencode( [(force_str(k), [force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ return formatdate(epoch_seconds, usegmt=True) def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an integer expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2]) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int to avoid # returning a long (#15067). The long type was removed in Python 3. if not six.PY3 and value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ digits = "0123456789abcdefghijklmnopqrstuvwxyz" factor = 0 if i < 0: raise ValueError("Negative base36 conversion input.") if not six.PY3: if not isinstance(i, six.integer_types): raise TypeError("Non-integer base36 conversion input.") if i > sys.maxint: raise ValueError("Base36 conversion input too large.") # Find starting factor while True: factor += 1 if i < 36 ** factor: factor -= 1 break base36 = [] # Construct base36 representation while factor >= 0: j = 36 ** factor base36.append(digits[i // j]) i = i % j factor -= 1 return ''.join(base36) def urlsafe_base64_encode(s): """ Encodes a bytestring in base64 for use in URLs, stripping any trailing equal signs. """ return base64.urlsafe_b64encode(s).rstrip(b'\n=') def urlsafe_base64_decode(s): """ Decodes a base64 encoded string, adding back any trailing equal signs that might have been stripped. """ s = s.encode('utf-8') # base64encode should only return ASCII. try: return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'=')) except (LookupError, BinasciiError) as e: raise ValueError(e) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.encode('ascii').decode('unicode_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necessary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2) try: return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port) except ValueError: return False def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host). Always returns ``False`` on an empty url. """ if not url: return False netloc = urllib_parse.urlparse(url)[1] return not netloc or netloc == host
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5789_1
crossvul-python_data_good_5510_1
from __future__ import absolute_import, division, unicode_literals from six import text_type import re from ..constants import voidElements, booleanAttributes, spaceCharacters from ..constants import rcdataElements, entities, xmlEntities from .. import utils from xml.sax.saxutils import escape spaceCharacters = "".join(spaceCharacters) quoteAttributeSpecChars = spaceCharacters + "\"'=<>`" quoteAttributeSpec = re.compile("[" + quoteAttributeSpecChars + "]") quoteAttributeLegacy = re.compile("[" + quoteAttributeSpecChars + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" "\u3000]") try: from codecs import register_error, xmlcharrefreplace_errors except ImportError: unicode_encode_errors = "strict" else: unicode_encode_errors = "htmlentityreplace" encode_entity_map = {} is_ucs4 = len("\U0010FFFF") == 1 for k, v in list(entities.items()): # skip multi-character entities if ((is_ucs4 and len(v) > 1) or (not is_ucs4 and len(v) > 2)): continue if v != "&": if len(v) == 2: v = utils.surrogatePairToCodepoint(v) else: v = ord(v) if v not in encode_entity_map or k.islower(): # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc. encode_entity_map[v] = k def htmlentityreplace_errors(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): res = [] codepoints = [] skip = False for i, c in enumerate(exc.object[exc.start:exc.end]): if skip: skip = False continue index = i + exc.start if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2]) skip = True else: codepoint = ord(c) codepoints.append(codepoint) for cp in codepoints: e = encode_entity_map.get(cp) if e: res.append("&") res.append(e) if not e.endswith(";"): res.append(";") else: res.append("&#x%s;" % (hex(cp)[2:])) return ("".join(res), exc.end) else: return xmlcharrefreplace_errors(exc) register_error(unicode_encode_errors, htmlentityreplace_errors) del register_error class HTMLSerializer(object): # attribute quoting options quote_attr_values = "legacy" # be secure by default quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values="legacy"|"spec"|"always" Whether to quote attribute values that don't require quoting per legacy browser behaviour, when required by the standard, or always. quote_char=u'"'|u"'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example <input disabled="disabled"> becomes <input disabled>. use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. <hr/>. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation """ if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, unicode_encode_errors) else: return string def encodeStrict(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from ..filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from ..filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from ..filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from ..filters.optionaltags import Filter treewalker = Filter(treewalker) # Alphabetical attributes must be last, as other filters # could add attributes and alter the order if self.alphabetical_attributes: from ..filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "<!DOCTYPE %s" % token["name"] if token["publicId"]: doctype += ' PUBLIC "%s"' % token["publicId"] elif token["systemId"]: doctype += " SYSTEM" if token["systemId"]: if token["systemId"].find('"') >= 0: if token["systemId"].find("'") >= 0: self.serializeError("System identifer contains both single and double quote characters") quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("</") >= 0: self.serializeError("Unexpected </ in CDATA") yield self.encode(token["data"]) else: yield self.encode(escape(token["data"])) elif type in ("StartTag", "EmptyTag"): name = token["name"] yield self.encodeStrict("<%s" % name) if name in rcdataElements and not self.escape_rcdata: in_cdata = True elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") for (attr_namespace, attr_name), attr_value in token["data"].items(): # TODO: Add namespace support here k = attr_name v = attr_value yield self.encodeStrict(' ') yield self.encodeStrict(k) if not self.minimize_boolean_attributes or \ (k not in booleanAttributes.get(name, tuple()) and k not in booleanAttributes.get("", tuple())): yield self.encodeStrict("=") if self.quote_attr_values == "always" or len(v) == 0: quote_attr = True elif self.quote_attr_values == "spec": quote_attr = quoteAttributeSpec.search(v) is not None elif self.quote_attr_values == "legacy": quote_attr = quoteAttributeLegacy.search(v) is not None else: raise ValueError("quote_attr_values must be one of: " "'always', 'spec', or 'legacy'") v = v.replace("&", "&amp;") if self.escape_lt_in_attrs: v = v.replace("<", "&lt;") if quote_attr: quote_char = self.quote_char if self.use_best_quote_char: if "'" in v and '"' not in v: quote_char = '"' elif '"' in v and "'" not in v: quote_char = "'" if quote_char == "'": v = v.replace("'", "&#39;") else: v = v.replace('"', "&quot;") yield self.encodeStrict(quote_char) yield self.encode(v) yield self.encodeStrict(quote_char) else: yield self.encode(v) if name in voidElements and self.use_trailing_solidus: if self.space_before_trailing_solidus: yield self.encodeStrict(" /") else: yield self.encodeStrict("/") yield self.encode(">") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") yield self.encodeStrict("</%s>" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError("Comment contains --") yield self.encodeStrict("<!--%s-->" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if key not in entities: self.serializeError("Entity %s not recognized" % name) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError def SerializeError(Exception): """Error in serialized tree""" pass
./CrossVul/dataset_final_sorted/CWE-79/py/good_5510_1
crossvul-python_data_good_3147_0
# -*- coding: iso-8859-1 -*- """ MoinMoin - feed some FCKeditor dialogues @copyright: 2005-2006 Bastian Blank, Florian Festi, Thomas Waldmann @license: GNU GPL, see COPYING for details. """ from MoinMoin import config, wikiutil from MoinMoin.action.AttachFile import _get_files from MoinMoin.Page import Page import re ############################################################################## ### Macro dialog ############################################################################## def macro_dialog(request): help = get_macro_help(request) request.write( '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Insert Macro</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta content="noindex,nofollow" name="robots"> <script src="%s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script language="javascript"> var oEditor = window.parent.InnerDialogLoaded() ; var FCKLang = oEditor.FCKLang ; var FCKMacros = oEditor.FCKMacros ; window.onload = function () { // First of all, translate the dialog box texts oEditor.FCKLanguageManager.TranslatePage( document ) ; OnChange( "BR" ); // Show the "Ok" button. window.parent.SetOkButton( true ) ; } function Ok() { if ( document.getElementById('txtName').value.length == 0 ) { alert( FCKLang.MacroErrNoName ) ; return false ; } FCKMacros.Add( txtName.value ) ; return true ; } function OnChange( sMacro ) { // sMacro = GetE("txtName").value; oHelp = GetE("help"); for (var i=0; i<oHelp.childNodes.length; i++) { var oDiv = oHelp.childNodes[i]; if (oDiv.nodeType==1) { // oDiv.style.display = (GetAttribute(oDiv, "id", "")==sMacro) ? '' : 'none'; if (GetAttribute(oDiv, "id", "") == sMacro) { oDiv.style.display = '' ; // alert("enabled div id " + sMacro) ; } else { oDiv.style.display = 'none' ; } } } } </script> </head> <body scroll="no" style="OVERFLOW: hidden"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td valign="top"> <span fckLang="MacroDlgName">Macro Name</span><br> <select id="txtName" size="10" onchange="OnChange(this.value);"> ''' % request.cfg.url_prefix_static) macros = [] for macro in macro_list(request): if macro == "BR": selected = ' selected="selected"' else: selected = '' if macro in help: macros.append('<option value="%s"%s>%s</option>' % (help[macro].group('prototype'), selected, macro)) else: macros.append('<option value="%s"%s>%s</option>' % (macro, selected, macro)) request.write('\n'.join(macros)) request.write(''' </select> </td> <td id="help">''') helptexts = [] for macro in macro_list(request): if macro in help: match = help[macro] prototype = match.group('prototype') helptext = match.group('help') else: prototype = macro helptext = "" helptexts.append( '''<div id="%s" style="DISPLAY: none"> <b>&lt;&lt;%s&gt;&gt;</b> <br/> <textarea style="color:#000000" cols="37" rows="10" disabled="disabled">%s</textarea> </div>''' % (prototype, prototype, helptext)) request.write(''.join(helptexts)) request.write(''' </td> </tr> </table> </td> </tr> </table> </body> </html> ''') def macro_list(request): from MoinMoin import macro macros = macro.getNames(request.cfg) macros.sort() return macros def get_macro_help(request): """ Read help texts from SystemPage('HelpOnMacros')""" helppage = wikiutil.getLocalizedPage(request, "HelpOnMacros") content = helppage.get_raw_body() macro_re = re.compile( r"\|\|(<.*?>)?\{\{\{" + r"<<(?P<prototype>(?P<macro>\w*).*)>>" + r"\}\}\}\s*\|\|" + r"[^|]*\|\|[^|]*\|\|<[^>]*>" + r"\s*(?P<help>.*?)\s*\|\|\s*(?P<example>.*?)\s*(<<[^>]*>>)*\s*\|\|$", re.U|re.M) help = {} for match in macro_re.finditer(content): help[match.group('macro')] = match return help ############################################################################## ### Link dialog ############################################################################## def page_list(request): from MoinMoin import search name = request.values.get("pagename", "") if name: searchresult = search.searchPages(request, 't:"%s"' % name) pages = [p.page_name for p in searchresult.hits] else: pages = [name] request.write( '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Insert Page Link</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta content="noindex,nofollow" name="robots"> </head> <body scroll="no" style="OVERFLOW: hidden"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="PageDlgName">Page name</span><br> <select id="txtName" size="1"> %s </select> </td> </tr> </table> </td> </tr> </table> </body> </html> ''' % "".join(["<option>%s</option>\n" % wikiutil.escape(p) for p in pages])) def link_dialog(request): # list of wiki pages name = request.values.get("pagename", "") name_escaped = wikiutil.escape(name) if name: from MoinMoin import search # XXX error handling! searchresult = search.searchPages(request, 't:"%s"' % name) pages = [p.page_name for p in searchresult.hits] pages.sort() pages[0:0] = [name] page_list = ''' <tr> <td colspan=2> <select id="sctPagename" size="1" onchange="OnChangePagename(this.value);"> %s </select> <td> </tr> ''' % "\n".join(['<option value="%s">%s</option>' % (wikiutil.escape(page), wikiutil.escape(page)) for page in pages]) else: page_list = "" # list of interwiki names interwiki_list = wikiutil.load_wikimap(request) interwiki = interwiki_list.keys() interwiki.sort() iwpreferred = request.cfg.interwiki_preferred[:] if not iwpreferred or iwpreferred and iwpreferred[-1] is not None: resultlist = iwpreferred for iw in interwiki: if not iw in iwpreferred: resultlist.append(iw) else: resultlist = iwpreferred[:-1] interwiki = "\n".join( ['<option value="%s">%s</option>' % (wikiutil.escape(key), wikiutil.escape(key)) for key in resultlist]) # wiki url url_prefix_static = request.cfg.url_prefix_static scriptname = request.script_root + '/' action = scriptname basepage = wikiutil.escape(request.page.page_name) request.write(u''' <!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Name: fck_link.html * Link dialog window. * * Version: 2.0 FC (Preview) * Modified: 2005-02-18 23:55:22 * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <meta http-equiv="Content-Type" content="text/html;charset=utf-8"> <meta name="robots" content="index,nofollow"> <html> <head> <title>Link Properties</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noindex,nofollow" /> <script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinlink/fck_link.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script> </head> <body scroll="no" style="OVERFLOW: hidden"> <div id="divInfo" style="DISPLAY: none"> <span fckLang="DlgLnkType">Link Type</span><br /> <select id="cmbLinkType" onchange="SetLinkType(this.value);"> <option value="wiki" selected="selected">WikiPage</option> <option value="interwiki">Interwiki</option> <option value="url" fckLang="DlgLnkTypeURL">URL</option> </select> <br /> <br /> <div id="divLinkTypeWiki"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <form action=%(action)s method="GET"> <input type="hidden" name="action" value="fckdialog"> <input type="hidden" name="dialog" value="link"> <input type="hidden" id="basepage" name="basepage" value="%(basepage)s"> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="PageDlgName">Page Name</span><br> <input id="txtPagename" name="pagename" size="30" value="%(name_escaped)s"> </td> <td valign="bottom"> <input id=btnSearchpage type="submit" value="Search"> </td> </tr> %(page_list)s </table> </form> </td> </tr> </table> </div> <div id="divLinkTypeInterwiki"> <table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td> <table cellSpacing="0" cellPadding="0" align="center" border="0"> <tr> <td> <span fckLang="WikiDlgName">Wiki:PageName</span><br> <select id="sctInterwiki" size="1"> %(interwiki)s </select>: <input id="txtInterwikipagename"></input> </td> </tr> </table> </td> </tr> </table> </div> <div id="divLinkTypeUrl"> <table cellspacing="0" cellpadding="0" width="100%%" border="0"> <tr> <td nowrap="nowrap"> <span fckLang="DlgLnkProto">Protocol</span><br /> <select id="cmbLinkProtocol"> <option value="http://" selected="selected">http://</option> <option value="https://">https://</option> <option value="ftp://">ftp://</option> <option value="file://">file://</option> <option value="news://">news://</option> <option value="mailto:">mailto:</option> <option value="" fckLang="DlgLnkProtoOther">&lt;other&gt;</option> </select> </td> <td nowrap="nowrap">&nbsp;</td> <td nowrap="nowrap" width="100%%"> <span fckLang="DlgLnkURL">URL</span><br /> <input id="txtUrl" style="WIDTH: 100%%" type="text" onkeyup="OnUrlChange();" onchange="OnUrlChange();" /> </td> </tr> </table> <br /> </div> </div> </body> </html> ''' % locals()) def attachment_dialog(request): """ Attachment dialog for GUI editor. """ """ Features: This dialog can... """ """ - list attachments in a drop down list """ """ - list attachments also for a different page than the current one """ """ - create new attachment """ _ = request.getText url_prefix_static = request.cfg.url_prefix_static # wiki url action = request.script_root + "/" # The following code lines implement the feature "list attachments for a different page". # Meaning of the variables: # - requestedPagename : Name of the page where attachments shall be listed from. # - attachmentsPagename : Name of the page where the attachments where retrieved from. # - destinationPagename : Name of the page where attachment will be placed on. requestedPagename = wikiutil.escape(request.values.get("requestedPagename", ""), quote=True) destinationPagename = wikiutil.escape(request.values.get("destinationPagename", request.page.page_name), quote=True) attachmentsPagename = requestedPagename or wikiutil.escape(request.page.page_name) attachments = _get_files(request, attachmentsPagename) attachments.sort() attachmentList = ''' <select id="sctAttachments" size="10" style="width:100%%;visibility:hidden;" onchange="OnAttachmentListChange();"> %s </select> ''' % "\n".join(['<option value="%s">%s</option>' % (wikiutil.escape(attachment, quote=True), wikiutil.escape(attachment, quote=True)) for attachment in attachments]) # Translation of dialog texts. langAttachmentLocation = _("Attachment location") langPagename = _("Page name") langAttachmentname = _("Attachment name") langListAttachmentsButton = _("Refresh attachment list") langAttachmentList = _("List of attachments") if len(attachmentsPagename) > 50: shortenedPagename = "%s ... %s" % (attachmentsPagename[0:25], attachmentsPagename[-25:]) else: shortenedPagename = attachmentsPagename langAvailableAttachments = "%s: %s" % (_("Available attachments for page"), shortenedPagename) request.write(''' <!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Name: fck_attachment.html * Attachment dialog window. * * Version: 2.0 FC (Preview) * Modified: 2005-02-18 23:55:22 * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <meta http-equiv="Content-Type" content="text/html;charset=utf-8"> <meta name="robots" content="index,nofollow"> <html> <head> <title>Attachment Properties</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noindex,nofollow" /> <script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinattachment/fck_attachment.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script> </head> <body scroll="no" style="OVERFLOW: hidden"> <form id="DlgAttachmentForm" name="DlgAttachmentForm" action=%(action)s method="GET"> <input type="hidden" name="action" value="fckdialog"> <input type="hidden" name="dialog" value="attachment"> <input type="hidden" id="requestedPagename" name="requestedPagename" value="%(requestedPagename)s"> <input type="hidden" id="attachmentsPagename" name="attachmentsPagename" value="%(attachmentsPagename)s"> <input type="hidden" id="destinationPagename" name="destinationPagename" value="%(destinationPagename)s"> <div id="divInfo" style="valign=top;"> <div id="divLinkTypeAttachment"> <fieldset> <legend>%(langAttachmentLocation)s</legend> <table cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td valign="bottom" style="width:90%%" style="padding-bottom:10px"> <span>%(langPagename)s</span><br> </td> </tr> <tr> <td valign="bottom" style="width:100%%" style="padding-bottom:10px;padding-right:10px;"> <input id="txtPagename" type="text" onkeyup="OnPagenameChange();" onchange="OnPagenameChange();" style="width:98%%"> </td> </tr> <tr> <td valign="bottom" style="width:90%%" style="padding-bottom:10px;"> <span>%(langAttachmentname)s</span><br> </td> </tr> <tr valign="bottom"> <td valign="bottom" style="width:100%%" style="padding-bottom:10px;padding-right:10px;"> <input id="txtAttachmentname" type="text" onkeyup="OnAttachmentnameChange();" onchange="OnPagenameChange();" style="width:98%%"><br> </td> </tr> </table> </fieldset> <fieldset> <legend>%(langAvailableAttachments)s</legend> <table cellSpacing="0" cellPadding="0" width="100%%" border="0"> <tr> <td valign="bottom" style="width:100%%" style="padding-bottom:10px"> <input id="btnListAttachments" type="submit" value="%(langListAttachmentsButton)s"> </td> </tr> <tr> <td valign="top" style="padding-top:10px"> <label for="sctAttachments">%(langAttachmentList)s</label><br> %(attachmentList)s </td> </tr> </table> </fieldset> </div> </div> </form> </body> </html> ''' % locals()) ############################################################################## ### Image dialog ############################################################################## def image_dialog(request): url_prefix_static = request.cfg.url_prefix_static request.write(''' <!-- * FCKeditor - The text editor for internet * Copyright (C) 2003-2004 Frederico Caldeira Knabben * * Licensed under the terms of the GNU Lesser General Public License: * http://www.opensource.org/licenses/lgpl-license.php * * For further information visit: * http://www.fckeditor.net/ * * File Authors: * Frederico Caldeira Knabben (fredck@fckeditor.net) * Florian Festi --> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head> <title>Link Properties</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noindex,nofollow" /> <script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinimage/fck_image.js" type="text/javascript"></script> <script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script> </head> <body scroll="no" style="OVERFLOW: hidden"> <table cellspacing="0" cellpadding="0" width="100%%" border="0"> <tr> <td nowrap="nowrap"> <span fckLang="DlgLnkProto">Protocol</span><br /> <select id="cmbLinkProtocol" onchange="OnProtocolChange();"> <option value="attachment:" selected="selected">attachment:</option> <option value="http://">http://</option> <option value="https://">https://</option> <!-- crashes often: <option value="drawing:">drawing:</option> --> <option value="" fckLang="DlgLnkProtoOther">&lt;other&gt;</option> </select> </td> <td nowrap="nowrap">&nbsp;</td> <td nowrap="nowrap" width="100%%"> <span fckLang="DlgLnkURL">URL or File Name (attachment:)</span><br /> <input id="txtUrl" style="WIDTH: 100%%" type="text" onkeyup="OnUrlChange();" onchange="OnUrlChange();" /> </td> </tr> <tr> <td colspan=2> <div id="divChkLink"> <input id="chkLink" type="checkbox"> Link to </div> </td> </table> </body> </html> ''' % locals()) ############################################################################# ### Main ############################################################################# def execute(pagename, request): dialog = request.values.get("dialog", "") if dialog == "macro": macro_dialog(request) elif dialog == "macrolist": macro_list(request) elif dialog == "pagelist": page_list(request) elif dialog == "link": link_dialog(request) elif dialog == "attachment": attachment_dialog(request) elif dialog == 'image': image_dialog(request) else: from MoinMoin.Page import Page request.theme.add_msg("Dialog unknown!", "error") Page(request, pagename).send_page()
./CrossVul/dataset_final_sorted/CWE-79/py/good_3147_0
crossvul-python_data_good_2186_0
# # djblets_js.py -- JavaScript-related template tags # # Copyright (c) 2007-2009 Christian Hammond # Copyright (c) 2007-2009 David Trowbridge # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals import json from django import template from django.core.serializers import serialize from django.db.models.query import QuerySet from django.utils import six from django.utils.encoding import force_text from django.utils.safestring import mark_safe from djblets.util.serializers import DjbletsJSONEncoder register = template.Library() _safe_js_escapes = { ord('&'): '\\u0026', ord('<'): '\\u003C', ord('>'): '\\u003E', } @register.simple_tag def form_dialog_fields(form): """ Translates a Django Form object into a JavaScript list of fields. The resulting list of fields can be used to represent the form dynamically. """ s = '' for field in form: s += "{ name: '%s', " % field.name if field.is_hidden: s += "hidden: true, " else: s += "label: '%s', " % field.label_tag(field.label + ":") if field.field.required: s += "required: true, " if field.field.help_text: s += "help_text: '%s', " % field.field.help_text s += "widget: '%s' }," % six.text_type(field) # Chop off the last ',' return "[ %s ]" % s[:-1] @register.filter def json_dumps(value, indent=None): if isinstance(value, QuerySet): result = serialize('json', value, indent=indent) else: result = json.dumps(value, indent=indent, cls=DjbletsJSONEncoder) return mark_safe(force_text(result).translate(_safe_js_escapes)) @register.filter def json_dumps_items(d, append=''): """Dumps a list of keys/values from a dictionary, without braces. This works very much like ``json_dumps``, but doesn't output the surrounding braces. This allows it to be used within a JavaScript object definition alongside other custom keys. If the dictionary is not empty, and ``append`` is passed, it will be appended onto the results. This is most useful when you want to append a comma after all the dictionary items, in order to provide further keys in the template. """ if not d: return '' return mark_safe(json_dumps(d)[1:-1] + append)
./CrossVul/dataset_final_sorted/CWE-79/py/good_2186_0
crossvul-python_data_good_1645_0
"""Base Tornado handlers for the notebook. Authors: * Brian Granger """ #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import functools import json import logging import os import re import sys import traceback try: # py3 from http.client import responses except ImportError: from httplib import responses from jinja2 import TemplateNotFound from tornado import web try: from tornado.log import app_log except ImportError: app_log = logging.getLogger() from IPython.config import Application from IPython.utils.path import filefind from IPython.utils.py3compat import string_types from IPython.html.utils import is_hidden #----------------------------------------------------------------------------- # Top-level handlers #----------------------------------------------------------------------------- non_alphanum = re.compile(r'[^A-Za-z0-9]') class AuthenticatedHandler(web.RequestHandler): """A RequestHandler with an authenticated user.""" def set_default_headers(self): headers = self.settings.get('headers', {}) if "X-Frame-Options" not in headers: headers["X-Frame-Options"] = "SAMEORIGIN" for header_name,value in headers.items() : try: self.set_header(header_name, value) except Exception: # tornado raise Exception (not a subclass) # if method is unsupported (websocket and Access-Control-Allow-Origin # for example, so just ignore) pass def clear_login_cookie(self): self.clear_cookie(self.cookie_name) def get_current_user(self): user_id = self.get_secure_cookie(self.cookie_name) # For now the user_id should not return empty, but it could eventually if user_id == '': user_id = 'anonymous' if user_id is None: # prevent extra Invalid cookie sig warnings: self.clear_login_cookie() if not self.login_available: user_id = 'anonymous' return user_id @property def cookie_name(self): default_cookie_name = non_alphanum.sub('-', 'username-{}'.format( self.request.host )) return self.settings.get('cookie_name', default_cookie_name) @property def password(self): """our password""" return self.settings.get('password', '') @property def logged_in(self): """Is a user currently logged in? """ user = self.get_current_user() return (user and not user == 'anonymous') @property def login_available(self): """May a user proceed to log in? This returns True if login capability is available, irrespective of whether the user is already logged in or not. """ return bool(self.settings.get('password', '')) class IPythonHandler(AuthenticatedHandler): """IPython-specific extensions to authenticated handling Mostly property shortcuts to IPython-specific settings. """ @property def config(self): return self.settings.get('config', None) @property def log(self): """use the IPython log by default, falling back on tornado's logger""" if Application.initialized(): return Application.instance().log else: return app_log #--------------------------------------------------------------- # URLs #--------------------------------------------------------------- @property def mathjax_url(self): return self.settings.get('mathjax_url', '') @property def base_url(self): return self.settings.get('base_url', '/') #--------------------------------------------------------------- # Manager objects #--------------------------------------------------------------- @property def kernel_manager(self): return self.settings['kernel_manager'] @property def notebook_manager(self): return self.settings['notebook_manager'] @property def cluster_manager(self): return self.settings['cluster_manager'] @property def session_manager(self): return self.settings['session_manager'] @property def project_dir(self): return self.notebook_manager.notebook_dir #--------------------------------------------------------------- # CORS #--------------------------------------------------------------- @property def allow_origin(self): """Normal Access-Control-Allow-Origin""" return self.settings.get('allow_origin', '') @property def allow_origin_pat(self): """Regular expression version of allow_origin""" return self.settings.get('allow_origin_pat', None) @property def allow_credentials(self): """Whether to set Access-Control-Allow-Credentials""" return self.settings.get('allow_credentials', False) def set_default_headers(self): """Add CORS headers, if defined""" super(IPythonHandler, self).set_default_headers() if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) elif self.allow_origin_pat: origin = self.get_origin() if origin and self.allow_origin_pat.match(origin): self.set_header("Access-Control-Allow-Origin", origin) if self.allow_credentials: self.set_header("Access-Control-Allow-Credentials", 'true') def get_origin(self): # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if "Origin" in self.request.headers: origin = self.request.headers.get("Origin") else: origin = self.request.headers.get("Sec-Websocket-Origin", None) return origin #--------------------------------------------------------------- # template rendering #--------------------------------------------------------------- def get_template(self, name): """Return the jinja template object for a given name""" return self.settings['jinja2_env'].get_template(name) def render_template(self, name, **ns): ns.update(self.template_namespace) template = self.get_template(name) return template.render(**ns) @property def template_namespace(self): return dict( base_url=self.base_url, logged_in=self.logged_in, login_available=self.login_available, static_url=self.static_url, ) def get_json_body(self): """Return the body of the request as JSON data.""" if not self.request.body: return None # Do we need to call body.decode('utf-8') here? body = self.request.body.strip().decode(u'utf-8') try: model = json.loads(body) except Exception: self.log.debug("Bad JSON: %r", body) self.log.error("Couldn't parse JSON", exc_info=True) raise web.HTTPError(400, u'Invalid JSON in body of request') return model def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: self.log.debug("No template for %d", status_code) html = self.render_template('error.html', **ns) self.write(html) class Template404(IPythonHandler): """Render our 404 template""" def prepare(self): raise web.HTTPError(404) class AuthenticatedFileHandler(IPythonHandler, web.StaticFileHandler): """static files should only be accessible when logged in""" @web.authenticated def get(self, path): if os.path.splitext(path)[1] == '.ipynb': name = os.path.basename(path) self.set_header('Content-Type', 'application/json') self.set_header('Content-Disposition','attachment; filename="%s"' % name) return web.StaticFileHandler.get(self, path) def compute_etag(self): return None def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. Requires tornado 3.1 Adding to tornado's own handling, forbids the serving of hidden files. """ abs_path = super(AuthenticatedFileHandler, self).validate_absolute_path(root, absolute_path) abs_root = os.path.abspath(root) if is_hidden(abs_path, abs_root): self.log.info("Refusing to serve hidden file, via 404 Error") raise web.HTTPError(404) return abs_path def json_errors(method): """Decorate methods with this to return GitHub style JSON errors. This should be used on any JSON API on any handler method that can raise HTTPErrors. This will grab the latest HTTPError exception using sys.exc_info and then: 1. Set the HTTP status code based on the HTTPError 2. Create and return a JSON body with a message field describing the error in a human readable form. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): try: result = method(self, *args, **kwargs) except web.HTTPError as e: status = e.status_code message = e.log_message self.log.warn(message) self.set_status(e.status_code) self.set_header('Content-Type', 'application/json') self.finish(json.dumps(dict(message=message))) except Exception: self.log.error("Unhandled error in API request", exc_info=True) status = 500 message = "Unknown server error" t, value, tb = sys.exc_info() self.set_status(status) tb_text = ''.join(traceback.format_exception(t, value, tb)) reply = dict(message=message, traceback=tb_text) self.set_header('Content-Type', 'application/json') self.finish(json.dumps(reply)) else: return result return wrapper #----------------------------------------------------------------------------- # File handler #----------------------------------------------------------------------------- # to minimize subclass changes: HTTPError = web.HTTPError class FileFindHandler(web.StaticFileHandler): """subclass of StaticFileHandler for serving files from a search path""" # cache search results, don't search for files more than once _static_paths = {} def initialize(self, path, default_filename=None): if isinstance(path, string_types): path = [path] self.root = tuple( os.path.abspath(os.path.expanduser(p)) + os.sep for p in path ) self.default_filename = default_filename def compute_etag(self): return None @classmethod def get_absolute_path(cls, roots, path): """locate a file to serve on our static file search path""" with cls._lock: if path in cls._static_paths: return cls._static_paths[path] try: abspath = os.path.abspath(filefind(path, roots)) except IOError: # IOError means not found return '' cls._static_paths[path] = abspath return abspath def validate_absolute_path(self, root, absolute_path): """check if the file should be served (raises 404, 403, etc.)""" if absolute_path == '': raise web.HTTPError(404) for root in self.root: if (absolute_path + os.sep).startswith(root): break return super(FileFindHandler, self).validate_absolute_path(root, absolute_path) class TrailingSlashHandler(web.RequestHandler): """Simple redirect handler that strips trailing slashes This should be the first, highest priority handler. """ SUPPORTED_METHODS = ['GET'] def get(self): self.redirect(self.request.uri.rstrip('/')) #----------------------------------------------------------------------------- # URL pattern fragments for re-use #----------------------------------------------------------------------------- path_regex = r"(?P<path>(?:/.*)*)" notebook_name_regex = r"(?P<name>[^/]+\.ipynb)" notebook_path_regex = "%s/%s" % (path_regex, notebook_name_regex) #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- default_handlers = [ (r".*/", TrailingSlashHandler) ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1645_0
crossvul-python_data_good_2103_9
# encoding:utf-8 """ :synopsis: views "read-only" for main textual content By main textual content is meant - text of Questions, Answers and Comments. The "read-only" requirement here is not 100% strict, as for example "question" view does allow adding new comments via Ajax form post. """ import datetime import logging import urllib import operator from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed from django.core.paginator import Paginator, EmptyPage, InvalidPage from django.template.loader import get_template from django.template import RequestContext from django.utils import simplejson from django.utils.html import escape from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils import translation from django.views.decorators import csrf from django.core.urlresolvers import reverse from django.core import exceptions as django_exceptions from django.contrib.humanize.templatetags import humanize from django.http import QueryDict from django.conf import settings as django_settings import askbot from askbot import exceptions from askbot.utils.diff import textDiff as htmldiff from askbot.forms import AnswerForm, ShowQuestionForm from askbot import conf from askbot import models from askbot import schedules from askbot.models.tag import Tag from askbot import const from askbot.utils import functions from askbot.utils.html import sanitize_html from askbot.utils.decorators import anonymous_forbidden, ajax_only, get_only from askbot.utils.loading import load_module from askbot.search.state_manager import SearchState, DummySearchState from askbot.templatetags import extra_tags from askbot.conf import settings as askbot_settings from askbot.views import context # used in index page #todo: - take these out of const or settings from askbot.models import Post, Vote INDEX_PAGE_SIZE = 30 INDEX_AWARD_SIZE = 15 INDEX_TAGS_SIZE = 25 # used in tags list DEFAULT_PAGE_SIZE = 60 # used in questions # used in answers #refactor? - we have these #views that generate a listing of questions in one way or another: #index, unanswered, questions, search, tag #should we dry them up? #related topics - information drill-down, search refinement def index(request):#generates front page - shows listing of questions sorted in various ways """index view mapped to the root url of the Q&A site """ return HttpResponseRedirect(reverse('questions')) def questions(request, **kwargs): """ List of Questions, Tagged questions, and Unanswered questions. matching search query or user selection """ #before = datetime.datetime.now() if request.method != 'GET': return HttpResponseNotAllowed(['GET']) search_state = SearchState( user_logged_in=request.user.is_authenticated(), **kwargs ) page_size = int(askbot_settings.DEFAULT_QUESTIONS_PAGE_SIZE) qs, meta_data = models.Thread.objects.run_advanced_search( request_user=request.user, search_state=search_state ) if meta_data['non_existing_tags']: search_state = search_state.remove_tags(meta_data['non_existing_tags']) paginator = Paginator(qs, page_size) if paginator.num_pages < search_state.page: search_state.page = 1 page = paginator.page(search_state.page) page.object_list = list(page.object_list) # evaluate the queryset # INFO: Because for the time being we need question posts and thread authors # down the pipeline, we have to precache them in thread objects models.Thread.objects.precache_view_data_hack(threads=page.object_list) related_tags = Tag.objects.get_related_to_search( threads=page.object_list, ignored_tag_names=meta_data.get('ignored_tag_names',[]) ) tag_list_type = askbot_settings.TAG_LIST_FORMAT if tag_list_type == 'cloud': #force cloud to sort by name related_tags = sorted(related_tags, key = operator.attrgetter('name')) contributors = list( models.Thread.objects.get_thread_contributors( thread_list=page.object_list ).only('id', 'username', 'gravatar') ) paginator_context = { 'is_paginated' : (paginator.count > page_size), 'pages': paginator.num_pages, 'page': search_state.page, 'has_previous': page.has_previous(), 'has_next': page.has_next(), 'previous': page.previous_page_number(), 'next': page.next_page_number(), 'base_url' : search_state.query_string(), 'page_size' : page_size, } # We need to pass the rss feed url based # on the search state to the template. # We use QueryDict to get a querystring # from dicts and arrays. Much cleaner # than parsing and string formating. rss_query_dict = QueryDict("").copy() if search_state.query: # We have search string in session - pass it to # the QueryDict rss_query_dict.update({"q": search_state.query}) if search_state.tags: # We have tags in session - pass it to the # QueryDict but as a list - we want tags+ rss_query_dict.setlist("tags", search_state.tags) context_feed_url = '/%sfeeds/rss/?%s' % ( django_settings.ASKBOT_URL, rss_query_dict.urlencode() ) # Format the url with the QueryDict reset_method_count = len(filter(None, [search_state.query, search_state.tags, meta_data.get('author_name', None)])) if request.is_ajax(): q_count = paginator.count question_counter = ungettext('%(q_num)s question', '%(q_num)s questions', q_count) question_counter = question_counter % {'q_num': humanize.intcomma(q_count),} if q_count > page_size: paginator_tpl = get_template('main_page/paginator.html') paginator_html = paginator_tpl.render( RequestContext( request, { 'context': functions.setup_paginator(paginator_context), 'questions_count': q_count, 'page_size' : page_size, 'search_state': search_state, } ) ) else: paginator_html = '' questions_tpl = get_template('main_page/questions_loop.html') questions_html = questions_tpl.render( RequestContext( request, { 'threads': page, 'search_state': search_state, 'reset_method_count': reset_method_count, 'request': request } ) ) ajax_data = { 'query_data': { 'tags': search_state.tags, 'sort_order': search_state.sort, 'ask_query_string': search_state.ask_query_string(), }, 'paginator': paginator_html, 'question_counter': question_counter, 'faces': [],#[extra_tags.gravatar(contributor, 48) for contributor in contributors], 'feed_url': context_feed_url, 'query_string': search_state.query_string(), 'page_size' : page_size, 'questions': questions_html.replace('\n',''), 'non_existing_tags': meta_data['non_existing_tags'] } ajax_data['related_tags'] = [{ 'name': escape(tag.name), 'used_count': humanize.intcomma(tag.local_used_count) } for tag in related_tags] return HttpResponse(simplejson.dumps(ajax_data), mimetype = 'application/json') else: # non-AJAX branch template_data = { 'active_tab': 'questions', 'author_name' : meta_data.get('author_name',None), 'contributors' : contributors, 'context' : paginator_context, 'is_unanswered' : False,#remove this from template 'interesting_tag_names': meta_data.get('interesting_tag_names', None), 'ignored_tag_names': meta_data.get('ignored_tag_names', None), 'subscribed_tag_names': meta_data.get('subscribed_tag_names', None), 'language_code': translation.get_language(), 'name_of_anonymous_user' : models.get_name_of_anonymous_user(), 'page_class': 'main-page', 'page_size': page_size, 'query': search_state.query, 'threads' : page, 'questions_count' : paginator.count, 'reset_method_count': reset_method_count, 'scope': search_state.scope, 'show_sort_by_relevance': conf.should_show_sort_by_relevance(), 'search_tags' : search_state.tags, 'sort': search_state.sort, 'tab_id' : search_state.sort, 'tags' : related_tags, 'tag_list_type' : tag_list_type, 'font_size' : extra_tags.get_tag_font_size(related_tags), 'display_tag_filter_strategy_choices': conf.get_tag_display_filter_strategy_choices(), 'email_tag_filter_strategy_choices': conf.get_tag_email_filter_strategy_choices(), 'update_avatar_data': schedules.should_update_avatar_data(request), 'query_string': search_state.query_string(), 'search_state': search_state, 'feed_url': context_feed_url, } return render(request, 'main_page.html', template_data) def tags(request):#view showing a listing of available tags - plain list #1) Get parameters. This normally belongs to form cleaning. post_data = request.GET sortby = post_data.get('sort', 'used') try: page = int(post_data.get('page', '1')) except ValueError: page = 1 if sortby == 'name': order_by = 'name' else: order_by = '-used_count' query = post_data.get('query', '').strip() tag_list_type = askbot_settings.TAG_LIST_FORMAT #2) Get query set for the tags. query_params = {'deleted': False} if query != '': query_params['name__icontains'] = query tags_qs = Tag.objects.filter(**query_params).exclude(used_count=0) tags_qs = tags_qs.order_by(order_by) #3) Start populating the template context. data = { 'active_tab': 'tags', 'page_class': 'tags-page', 'tag_list_type' : tag_list_type, 'stag' : query, 'tab_id' : sortby, 'keywords' : query, 'search_state': SearchState(*[None for x in range(7)]) } if tag_list_type == 'list': #plain listing is paginated objects_list = Paginator(tags_qs, DEFAULT_PAGE_SIZE) try: tags = objects_list.page(page) except (EmptyPage, InvalidPage): tags = objects_list.page(objects_list.num_pages) paginator_data = { 'is_paginated' : (objects_list.num_pages > 1), 'pages': objects_list.num_pages, 'page': page, 'has_previous': tags.has_previous(), 'has_next': tags.has_next(), 'previous': tags.previous_page_number(), 'next': tags.next_page_number(), 'base_url' : reverse('tags') + '?sort=%s&amp;' % sortby } paginator_context = functions.setup_paginator(paginator_data) data['paginator_context'] = paginator_context else: #tags for the tag cloud are given without pagination tags = tags_qs font_size = extra_tags.get_tag_font_size(tags) data['font_size'] = font_size data['tags'] = tags if request.is_ajax(): template = get_template('tags/content.html') template_context = RequestContext(request, data) json_data = {'success': True, 'html': template.render(template_context)} json_string = simplejson.dumps(json_data) return HttpResponse(json_string, mimetype='application/json') else: return render(request, 'tags.html', data) @csrf.csrf_protect def question(request, id):#refactor - long subroutine. display question body, answers and comments """view that displays body of the question and all answers to it """ #process url parameters #todo: fix inheritance of sort method from questions #before = datetime.datetime.now() form = ShowQuestionForm(request.GET) form.full_clean()#always valid show_answer = form.cleaned_data['show_answer'] show_comment = form.cleaned_data['show_comment'] show_page = form.cleaned_data['show_page'] answer_sort_method = form.cleaned_data['answer_sort_method'] #load question and maybe refuse showing deleted question #if the question does not exist - try mapping to old questions #and and if it is not found again - then give up try: question_post = models.Post.objects.filter( post_type = 'question', id = id ).select_related('thread')[0] except IndexError: # Handle URL mapping - from old Q/A/C/ URLs to the new one try: question_post = models.Post.objects.filter( post_type='question', old_question_id = id ).select_related('thread')[0] except IndexError: raise Http404 if show_answer: try: old_answer = models.Post.objects.get_answers().get(old_answer_id=show_answer) return HttpResponseRedirect(old_answer.get_absolute_url()) except models.Post.DoesNotExist: pass elif show_comment: try: old_comment = models.Post.objects.get_comments().get(old_comment_id=show_comment) return HttpResponseRedirect(old_comment.get_absolute_url()) except models.Post.DoesNotExist: pass try: question_post.assert_is_visible_to(request.user) except exceptions.QuestionHidden, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('index')) #redirect if slug in the url is wrong if request.path.split('/')[-2] != question_post.slug: logging.debug('no slug match!') question_url = '?'.join(( question_post.get_absolute_url(), urllib.urlencode(request.GET) )) return HttpResponseRedirect(question_url) #resolve comment and answer permalinks #they go first because in theory both can be moved to another question #this block "returns" show_post and assigns actual comment and answer #to show_comment and show_answer variables #in the case if the permalinked items or their parents are gone - redirect #redirect also happens if id of the object's origin post != requested id show_post = None #used for permalinks if show_comment: #if url calls for display of a specific comment, #check that comment exists, that it belongs to #the current question #if it is an answer comment and the answer is hidden - #redirect to the default view of the question #if the question is hidden - redirect to the main page #in addition - if url points to a comment and the comment #is for the answer - we need the answer object try: show_comment = models.Post.objects.get_comments().get(id=show_comment) except models.Post.DoesNotExist: error_message = _( 'Sorry, the comment you are looking for has been ' 'deleted and is no longer accessible' ) request.user.message_set.create(message = error_message) return HttpResponseRedirect(question_post.thread.get_absolute_url()) if str(show_comment.thread._question_post().id) != str(id): return HttpResponseRedirect(show_comment.get_absolute_url()) show_post = show_comment.parent try: show_comment.assert_is_visible_to(request.user) except exceptions.AnswerHidden, error: request.user.message_set.create(message = unicode(error)) #use reverse function here because question is not yet loaded return HttpResponseRedirect(reverse('question', kwargs = {'id': id})) except exceptions.QuestionHidden, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('index')) elif show_answer: #if the url calls to view a particular answer to #question - we must check whether the question exists #whether answer is actually corresponding to the current question #and that the visitor is allowed to see it show_post = get_object_or_404(models.Post, post_type='answer', id=show_answer) if str(show_post.thread._question_post().id) != str(id): return HttpResponseRedirect(show_post.get_absolute_url()) try: show_post.assert_is_visible_to(request.user) except django_exceptions.PermissionDenied, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('question', kwargs = {'id': id})) thread = question_post.thread if getattr(django_settings, 'ASKBOT_MULTILINGUAL', False): if thread.language_code != translation.get_language(): return HttpResponseRedirect(thread.get_absolute_url()) logging.debug('answer_sort_method=' + unicode(answer_sort_method)) #load answers and post id's->athor_id mapping #posts are pre-stuffed with the correctly ordered comments updated_question_post, answers, post_to_author, published_answer_ids = thread.get_cached_post_data( sort_method = answer_sort_method, user = request.user ) question_post.set_cached_comments( updated_question_post.get_cached_comments() ) #Post.objects.precache_comments(for_posts=[question_post] + answers, visitor=request.user) user_votes = {} user_post_id_list = list() #todo: cache this query set, but again takes only 3ms! if request.user.is_authenticated(): user_votes = Vote.objects.filter( user=request.user, voted_post__id__in = post_to_author.keys() ).values_list('voted_post_id', 'vote') user_votes = dict(user_votes) #we can avoid making this query by iterating through #already loaded posts user_post_id_list = [ id for id in post_to_author if post_to_author[id] == request.user.id ] #resolve page number and comment number for permalinks show_comment_position = None if show_comment: show_page = show_comment.get_page_number(answer_posts=answers) show_comment_position = show_comment.get_order_number() elif show_answer: show_page = show_post.get_page_number(answer_posts=answers) objects_list = Paginator(answers, const.ANSWERS_PAGE_SIZE) if show_page > objects_list.num_pages: return HttpResponseRedirect(question_post.get_absolute_url()) page_objects = objects_list.page(show_page) #count visits #import ipdb; ipdb.set_trace() if functions.not_a_robot_request(request): #todo: split this out into a subroutine #todo: merge view counts per user and per session #1) view count per session update_view_count = False if 'question_view_times' not in request.session: request.session['question_view_times'] = {} last_seen = request.session['question_view_times'].get(question_post.id, None) if thread.last_activity_by_id != request.user.id: if last_seen: if last_seen < thread.last_activity_at: update_view_count = True else: update_view_count = True request.session['question_view_times'][question_post.id] = \ datetime.datetime.now() #2) run the slower jobs in a celery task from askbot import tasks tasks.record_question_visit.delay( question_post = question_post, user_id = request.user.id, update_view_count = update_view_count ) paginator_data = { 'is_paginated' : (objects_list.count > const.ANSWERS_PAGE_SIZE), 'pages': objects_list.num_pages, 'page': show_page, 'has_previous': page_objects.has_previous(), 'has_next': page_objects.has_next(), 'previous': page_objects.previous_page_number(), 'next': page_objects.next_page_number(), 'base_url' : request.path + '?sort=%s&amp;' % answer_sort_method, } paginator_context = functions.setup_paginator(paginator_data) #todo: maybe consolidate all activity in the thread #for the user into just one query? favorited = thread.has_favorite_by_user(request.user) is_cacheable = True if show_page != 1: is_cacheable = False elif show_comment_position > askbot_settings.MAX_COMMENTS_TO_SHOW: is_cacheable = False initial = { 'wiki': question_post.wiki and askbot_settings.WIKI_ON, 'email_notify': thread.is_followed_by(request.user) } #maybe load draft if request.user.is_authenticated(): #todo: refactor into methor on thread drafts = models.DraftAnswer.objects.filter( author=request.user, thread=thread ) if drafts.count() > 0: initial['text'] = drafts[0].text answer_form = AnswerForm(initial, user=request.user) user_can_post_comment = ( request.user.is_authenticated() and request.user.can_post_comment() ) user_already_gave_answer = False previous_answer = None if request.user.is_authenticated(): if askbot_settings.LIMIT_ONE_ANSWER_PER_USER: for answer in answers: if answer.author == request.user: user_already_gave_answer = True previous_answer = answer break data = { 'is_cacheable': False,#is_cacheable, #temporary, until invalidation fix 'long_time': const.LONG_TIME,#"forever" caching 'page_class': 'question-page', 'active_tab': 'questions', 'question' : question_post, 'thread': thread, 'thread_is_moderated': thread.is_moderated(), 'user_is_thread_moderator': thread.has_moderator(request.user), 'published_answer_ids': published_answer_ids, 'answer' : answer_form, 'answers' : page_objects.object_list, 'answer_count': thread.get_answer_count(request.user), 'category_tree_data': askbot_settings.CATEGORY_TREE, 'user_votes': user_votes, 'user_post_id_list': user_post_id_list, 'user_can_post_comment': user_can_post_comment,#in general 'user_already_gave_answer': user_already_gave_answer, 'oldest_answer_id': thread.get_oldest_answer_id(request.user), 'previous_answer': previous_answer, 'tab_id' : answer_sort_method, 'favorited' : favorited, 'similar_threads' : thread.get_similar_threads(), 'language_code': translation.get_language(), 'paginator_context' : paginator_context, 'show_post': show_post, 'show_comment': show_comment, 'show_comment_position': show_comment_position, } #shared with ... if askbot_settings.GROUPS_ENABLED: data['sharing_info'] = thread.get_sharing_info() data.update(context.get_for_tag_editor()) extra_context = getattr( django_settings, 'ASKBOT_QUESTION_PAGE_EXTRA_CONTEXT', None ) if extra_context: extra_context_getter = load_module(extra_context) extra_data = extra_context_getter(request, data) data.update(extra_data) return render(request, 'question.html', data) def revisions(request, id, post_type = None): assert post_type in ('question', 'answer') post = get_object_or_404(models.Post, post_type=post_type, id=id) revisions = list(models.PostRevision.objects.filter(post=post)) revisions.reverse() for i, revision in enumerate(revisions): if i == 0: revision.diff = sanitize_html(revisions[i].html) revision.summary = _('initial version') else: revision.diff = htmldiff( sanitize_html(revisions[i-1].html), sanitize_html(revision.html) ) data = { 'page_class':'revisions-page', 'active_tab':'questions', 'post': post, 'revisions': revisions, } return render(request, 'revisions.html', data) @csrf.csrf_exempt @ajax_only @anonymous_forbidden @get_only def get_comment(request): """returns text of a comment by id via ajax response requires request method get and request must be ajax """ id = int(request.GET['id']) comment = models.Post.objects.get(post_type='comment', id=id) request.user.assert_can_edit_comment(comment) return {'text': comment.text}
./CrossVul/dataset_final_sorted/CWE-79/py/good_2103_9
crossvul-python_data_bad_3530_2
from django import template from django.conf import settings from django.db.models import Q from django.template import NodeList, TemplateSyntaxError from django.template.loader import render_to_string from django.utils import simplejson from django.utils.translation import ugettext_lazy as _ from djblets.util.decorators import basictag, blocktag from djblets.util.misc import get_object_or_none from djblets.util.templatetags.djblets_utils import humanize_list from reviewboard.accounts.models import Profile from reviewboard.diffviewer.models import DiffSet from reviewboard.reviews.models import Comment, Group, ReviewRequest, \ ScreenshotComment register = template.Library() @register.tag @blocktag def forcomment(context, nodelist, filediff, review=None): """ Loops over a list of comments beloning to a filediff. This will populate a special ``comment`` variable for use in the content. This is of the type :model:`reviews.Comment`. """ new_nodelist = NodeList() context.push() if not review: comments = filediff.comments.all() else: comments = filediff.comments.filter(review=review) for comment in comments: context['comment'] = comment for node in nodelist: new_nodelist.append(node.render(context)) context.pop() return new_nodelist.render(context) @register.tag @blocktag def ifneatnumber(context, nodelist, rid): """ Returns whether or not the specified number is a "neat" number. This is a number with a special property, such as being a palindrome or having trailing zeroes. If the number is a neat number, the contained content is rendered, and two variables, ``milestone`` and ``palindrome`` are defined. """ if rid == None or rid < 1000: return "" ridstr = str(rid) interesting = False context.push() context['milestone'] = False context['palindrome'] = False if rid >= 1000: trailing = ridstr[1:] if trailing == "0" * len(trailing): context['milestone'] = True interesting = True if not interesting: if ridstr == ''.join(reversed(ridstr)): context['palindrome'] = True interesting = True if not interesting: context.pop() return "" s = nodelist.render(context) context.pop() return s @register.tag @basictag(takes_context=True) def commentcounts(context, filediff, interfilediff=None): """ Returns a JSON array of current comments for a filediff, sorted by line number. Each entry in the array has a dictionary containing the following keys: =========== ================================================== Key Description =========== ================================================== comment_id The ID of the comment text The text of the comment line The first line number num_lines The number of lines this comment spans user A dictionary containing "username" and "name" keys for the user url The URL to the comment localdraft True if this is the current user's draft comment =========== ================================================== """ comment_dict = {} user = context.get('user', None) if interfilediff: query = Comment.objects.filter(filediff=filediff, interfilediff=interfilediff) else: query = Comment.objects.filter(filediff=filediff, interfilediff__isnull=True) for comment in query: review = get_object_or_none(comment.review) if review and (review.public or review.user == user): key = (comment.first_line, comment.num_lines) comment_dict.setdefault(key, []).append({ 'comment_id': comment.id, 'text': comment.text, 'line': comment.first_line, 'num_lines': comment.num_lines, 'user': { 'username': review.user.username, 'name': review.user.get_full_name() or review.user.username, }, #'timestamp': comment.timestamp, 'url': comment.get_review_url(), 'localdraft': review.user == user and \ not review.public, }) comments_array = [] for key, value in comment_dict.iteritems(): comments_array.append({ 'linenum': key[0], 'num_lines': key[1], 'comments': value, }) comments_array.sort(cmp=lambda x, y: cmp(x['linenum'], y['linenum'] or cmp(x['num_lines'], y['num_lines']))) return simplejson.dumps(comments_array) @register.tag @basictag(takes_context=True) def screenshotcommentcounts(context, screenshot): """ Returns a JSON array of current comments for a screenshot. Each entry in the array has a dictionary containing the following keys: =========== ================================================== Key Description =========== ================================================== text The text of the comment localdraft True if this is the current user's draft comment x The X location of the comment's region y The Y location of the comment's region w The width of the comment's region h The height of the comment's region =========== ================================================== """ comments = {} user = context.get('user', None) for comment in screenshot.comments.all(): review = get_object_or_none(comment.review) if review and (review.public or review.user == user): position = '%dx%d+%d+%d' % (comment.w, comment.h, \ comment.x, comment.y) comments.setdefault(position, []).append({ 'id': comment.id, 'text': comment.text, 'user': { 'username': review.user.username, 'name': review.user.get_full_name() or review.user.username, }, 'url': comment.get_review_url(), 'localdraft' : review.user == user and \ not review.public, 'x' : comment.x, 'y' : comment.y, 'w' : comment.w, 'h' : comment.h, }) return simplejson.dumps(comments) @register.tag @basictag(takes_context=True) def reply_list(context, review, comment, context_type, context_id): """ Renders a list of comments of a specified type. This is a complex, confusing function accepts lots of inputs in order to display replies to a type of object. In each case, the replies will be rendered using the template :template:`reviews/review_reply.html`. If ``context_type`` is ``"comment"`` or ``"screenshot_comment"``, the generated list of replies are to ``comment``. If ``context_type`` is ``"body_top"`` or ```"body_bottom"``, the generated list of replies are to ``review``. Depending on the ``context_type``, these will either be replies to the top of the review body or to the bottom. The ``context_id`` parameter has to do with the internal IDs used by the JavaScript code for storing and categorizing the comments. """ def generate_reply_html(reply, timestamp, text): return render_to_string('reviews/review_reply.html', { 'context_id': context_id, 'id': reply.id, 'review': review, 'timestamp': timestamp, 'text': text, 'reply_user': reply.user, 'draft': not reply.public }) def process_body_replies(queryset, attrname, user): if user.is_anonymous(): queryset = queryset.filter(public=True) else: queryset = queryset.filter(Q(public=True) | Q(user=user)) s = "" for reply_comment in queryset: s += generate_reply_html(reply, reply.timestamp, getattr(reply, attrname)) return s user = context.get('user', None) if user.is_anonymous(): user = None s = "" if context_type == "comment" or context_type == "screenshot_comment": for reply_comment in comment.public_replies(user): s += generate_reply_html(reply_comment.review.get(), reply_comment.timestamp, reply_comment.text) elif context_type == "body_top" or context_type == "body_bottom": q = Q(public=True) if user: q = q | Q(user=user) replies = getattr(review, "%s_replies" % context_type).filter(q) for reply in replies: s += generate_reply_html(reply, reply.timestamp, getattr(reply, context_type)) return s else: raise TemplateSyntaxError, "Invalid context type passed" return s @register.inclusion_tag('reviews/review_reply_section.html', takes_context=True) def reply_section(context, review, comment, context_type, context_id): """ Renders a template for displaying a reply. This takes the same parameters as :tag:`reply_list`. The template rendered by this function, :template:`reviews/review_reply_section.html`, is responsible for invoking :tag:`reply_list` and as such passes these variables through. It does not make use of them itself. """ if comment != "": if type(comment) is ScreenshotComment: context_id += 's' context_id += str(comment.id) return { 'review': review, 'comment': comment, 'context_type': context_type, 'context_id': context_id, 'user': context.get('user', None) } @register.inclusion_tag('reviews/dashboard_entry.html', takes_context=True) def dashboard_entry(context, level, text, view, group=None): """ Renders an entry in the dashboard sidebar. This includes the name of the entry and the list of review requests associated with it. The entry is rendered by the template :template:`reviews/dashboard_entry.html`. """ user = context.get('user', None) datagrid = context.get('datagrid', None) starred = False show_count = True count = 0 if view == 'to-group': count = datagrid.counts['groups'].get(group.name, 0) elif view == 'watched-groups': starred = True show_count = False elif view in datagrid.counts: count = datagrid.counts[view] if view == 'starred': starred = True else: raise template.TemplateSyntaxError, \ "Invalid view type '%s' passed to 'dashboard_entry' tag." % view return { 'MEDIA_URL': settings.MEDIA_URL, 'MEDIA_SERIAL': settings.MEDIA_SERIAL, 'level': level, 'text': text, 'view': view, 'group': group, 'count': count, 'show_count': show_count, 'user': user, 'starred': starred, 'selected': context.get('view', None) == view and \ (not group or context.get('group', None) == group.name), } @register.simple_tag def reviewer_list(review_request): """ Returns a humanized list of target reviewers in a review request. """ return humanize_list([group.display_name or group.name \ for group in review_request.target_groups.all()] + \ [user.get_full_name() or user.username \ for user in review_request.target_people.all()]) @register.filter def bug_url(bug_id, review_request): """ Returns the URL based on a bug number on the specified review request. If the repository the review request belongs to doesn't have an associated bug tracker, this returns None. """ if (review_request.repository and review_request.repository.bug_tracker and '%s' in review_request.repository.bug_tracker): try: return review_request.repository.bug_tracker % bug_id except TypeError: logging.error("Error creating bug URL. The bug tracker URL '%s' " "is likely invalid." % review_request.repository.bug_tracker) return None @register.filter def diffsets_with_comments(review, current_pair): """ Returns a list of diffsets in the review that contain draft comments. """ if not review: return diffsets = DiffSet.objects.filter(files__comments__review=review) diffsets = diffsets.filter(files__comments__interfilediff__isnull=True) diffsets = diffsets.distinct() for diffset in diffsets: yield { 'diffset': diffset, 'is_current': current_pair[0] == diffset and current_pair[1] == None, } @register.filter def interdiffs_with_comments(review, current_pair): """ Returns a list of interdiffs in the review that contain draft comments. """ if not review: return diffsets = DiffSet.objects.filter(files__comments__review=review) diffsets = diffsets.filter(files__comments__interfilediff__isnull=False) diffsets = diffsets.distinct() for diffset in diffsets: interdiffs = DiffSet.objects.filter( files__interdiff_comments__filediff__diffset=diffset).distinct() for interdiff in interdiffs: yield { 'diffset': diffset, 'interdiff': interdiff, 'is_current': current_pair[0] == diffset and current_pair[1] == interdiff, } @register.filter def has_comments_in_diffsets_excluding(review, diffset_pair): """ Returns whether or not the specified review has any comments that aren't in the specified diffset or interdiff. """ if not review: return False current_diffset, interdiff = diffset_pair # See if there are any diffsets with comments on them in this review. q = DiffSet.objects.filter(files__comments__review=review) q = q.filter(files__comments__interfilediff__isnull=True).distinct() if not interdiff: # The user is browsing a standard diffset, so filter it out. q = q.exclude(pk=current_diffset.id) if q.count() > 0: return True # See if there are any interdiffs with comments on them in this review. q = DiffSet.objects.filter(files__comments__review=review) q = q.filter(files__comments__interfilediff__isnull=False) if interdiff: # The user is browsing an interdiff, so filter it out. q = q.exclude(pk=current_diffset.id, files__comments__interfilediff__diffset=interdiff) return q.count() > 0 @register.tag @basictag(takes_context=True) def star(context, obj): """ Renders the code for displaying a star used for starring items. The rendered code should handle click events so that the user can toggle the star. The star is rendered by the template :template:`reviews/star.html`. The passed object must be either a :model:`reviews.ReviewRequest` or a :model:`reviews.Group`. """ return render_star(context.get('user', None), obj) def render_star(user, obj): """ Does the actual work of rendering the star. The star tag is a wrapper around this. """ if user.is_anonymous(): return "" profile = None if not hasattr(obj, 'starred'): try: profile = user.get_profile() except Profile.DoesNotExist: return "" if isinstance(obj, ReviewRequest): obj_info = { 'type': 'reviewrequests', 'id': obj.id } if hasattr(obj, 'starred'): starred = obj.starred else: starred = \ profile.starred_review_requests.filter(pk=obj.id).count() > 0 elif isinstance(obj, Group): obj_info = { 'type': 'groups', 'id': obj.name } if hasattr(obj, 'starred'): starred = obj.starred else: starred = \ profile.starred_groups.filter(pk=obj.id).count() > 0 else: raise template.TemplateSyntaxError, \ "star tag received an incompatible object type (%s)" % \ type(obj) if starred: image_alt = _("Starred") else: image_alt = _("Click to star") return render_to_string('reviews/star.html', { 'object': obj_info, 'starred': int(starred), 'alt': image_alt, 'user': user, 'MEDIA_URL': settings.MEDIA_URL, })
./CrossVul/dataset_final_sorted/CWE-79/py/bad_3530_2
crossvul-python_data_bad_453_0
"""A cleanup tool for HTML. Removes unwanted tags and content. See the `Cleaner` class for details. """ import re import copy try: from urlparse import urlsplit except ImportError: # Python 3 from urllib.parse import urlsplit from lxml import etree from lxml.html import defs from lxml.html import fromstring, XHTML_NAMESPACE from lxml.html import xhtml_to_html, _transform_result try: unichr except NameError: # Python 3 unichr = chr try: unicode except NameError: # Python 3 unicode = str try: bytes except NameError: # Python < 2.6 bytes = str try: basestring except NameError: basestring = (str, bytes) __all__ = ['clean_html', 'clean', 'Cleaner', 'autolink', 'autolink_html', 'word_break', 'word_break_html'] # Look at http://code.sixapart.com/trac/livejournal/browser/trunk/cgi-bin/cleanhtml.pl # Particularly the CSS cleaning; most of the tag cleaning is integrated now # I have multiple kinds of schemes searched; but should schemes be # whitelisted instead? # max height? # remove images? Also in CSS? background attribute? # Some way to whitelist object, iframe, etc (e.g., if you want to # allow *just* embedded YouTube movies) # Log what was deleted and why? # style="behavior: ..." might be bad in IE? # Should we have something for just <meta http-equiv>? That's the worst of the # metas. # UTF-7 detections? Example: # <HEAD><META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=UTF-7"> </HEAD>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4- # you don't always have to have the charset set, if the page has no charset # and there's UTF7-like code in it. # Look at these tests: http://htmlpurifier.org/live/smoketests/xssAttacks.php # This is an IE-specific construct you can have in a stylesheet to # run some Javascript: _css_javascript_re = re.compile( r'expression\s*\(.*?\)', re.S|re.I) # Do I have to worry about @\nimport? _css_import_re = re.compile( r'@\s*import', re.I) # All kinds of schemes besides just javascript: that can cause # execution: _is_image_dataurl = re.compile( r'^data:image/.+;base64', re.I).search _is_possibly_malicious_scheme = re.compile( r'(?:javascript|jscript|livescript|vbscript|data|about|mocha):', re.I).search def _is_javascript_scheme(s): if _is_image_dataurl(s): return None return _is_possibly_malicious_scheme(s) _substitute_whitespace = re.compile(r'[\s\x00-\x08\x0B\x0C\x0E-\x19]+').sub # FIXME: should data: be blocked? # FIXME: check against: http://msdn2.microsoft.com/en-us/library/ms537512.aspx _conditional_comment_re = re.compile( r'\[if[\s\n\r]+.*?][\s\n\r]*>', re.I|re.S) _find_styled_elements = etree.XPath( "descendant-or-self::*[@style]") _find_external_links = etree.XPath( ("descendant-or-self::a [normalize-space(@href) and substring(normalize-space(@href),1,1) != '#'] |" "descendant-or-self::x:a[normalize-space(@href) and substring(normalize-space(@href),1,1) != '#']"), namespaces={'x':XHTML_NAMESPACE}) class Cleaner(object): """ Instances cleans the document of each of the possible offending elements. The cleaning is controlled by attributes; you can override attributes in a subclass, or set them in the constructor. ``scripts``: Removes any ``<script>`` tags. ``javascript``: Removes any Javascript, like an ``onclick`` attribute. Also removes stylesheets as they could contain Javascript. ``comments``: Removes any comments. ``style``: Removes any style tags. ``inline_style`` Removes any style attributes. Defaults to the value of the ``style`` option. ``links``: Removes any ``<link>`` tags ``meta``: Removes any ``<meta>`` tags ``page_structure``: Structural parts of a page: ``<head>``, ``<html>``, ``<title>``. ``processing_instructions``: Removes any processing instructions. ``embedded``: Removes any embedded objects (flash, iframes) ``frames``: Removes any frame-related tags ``forms``: Removes any form tags ``annoying_tags``: Tags that aren't *wrong*, but are annoying. ``<blink>`` and ``<marquee>`` ``remove_tags``: A list of tags to remove. Only the tags will be removed, their content will get pulled up into the parent tag. ``kill_tags``: A list of tags to kill. Killing also removes the tag's content, i.e. the whole subtree, not just the tag itself. ``allow_tags``: A list of tags to include (default include all). ``remove_unknown_tags``: Remove any tags that aren't standard parts of HTML. ``safe_attrs_only``: If true, only include 'safe' attributes (specifically the list from the feedparser HTML sanitisation web site). ``safe_attrs``: A set of attribute names to override the default list of attributes considered 'safe' (when safe_attrs_only=True). ``add_nofollow``: If true, then any <a> tags will have ``rel="nofollow"`` added to them. ``host_whitelist``: A list or set of hosts that you can use for embedded content (for content like ``<object>``, ``<link rel="stylesheet">``, etc). You can also implement/override the method ``allow_embedded_url(el, url)`` or ``allow_element(el)`` to implement more complex rules for what can be embedded. Anything that passes this test will be shown, regardless of the value of (for instance) ``embedded``. Note that this parameter might not work as intended if you do not make the links absolute before doing the cleaning. Note that you may also need to set ``whitelist_tags``. ``whitelist_tags``: A set of tags that can be included with ``host_whitelist``. The default is ``iframe`` and ``embed``; you may wish to include other tags like ``script``, or you may want to implement ``allow_embedded_url`` for more control. Set to None to include all tags. This modifies the document *in place*. """ scripts = True javascript = True comments = True style = False inline_style = None links = True meta = True page_structure = True processing_instructions = True embedded = True frames = True forms = True annoying_tags = True remove_tags = None allow_tags = None kill_tags = None remove_unknown_tags = True safe_attrs_only = True safe_attrs = defs.safe_attrs add_nofollow = False host_whitelist = () whitelist_tags = set(['iframe', 'embed']) def __init__(self, **kw): for name, value in kw.items(): if not hasattr(self, name): raise TypeError( "Unknown parameter: %s=%r" % (name, value)) setattr(self, name, value) if self.inline_style is None and 'inline_style' not in kw: self.inline_style = self.style # Used to lookup the primary URL for a given tag that is up for # removal: _tag_link_attrs = dict( script='src', link='href', # From: http://java.sun.com/j2se/1.4.2/docs/guide/misc/applet.html # From what I can tell, both attributes can contain a link: applet=['code', 'object'], iframe='src', embed='src', layer='src', # FIXME: there doesn't really seem like a general way to figure out what # links an <object> tag uses; links often go in <param> tags with values # that we don't really know. You'd have to have knowledge about specific # kinds of plugins (probably keyed off classid), and match against those. ##object=?, # FIXME: not looking at the action currently, because it is more complex # than than -- if you keep the form, you should keep the form controls. ##form='action', a='href', ) def __call__(self, doc): """ Cleans the document. """ if hasattr(doc, 'getroot'): # ElementTree instance, instead of an element doc = doc.getroot() # convert XHTML to HTML xhtml_to_html(doc) # Normalize a case that IE treats <image> like <img>, and that # can confuse either this step or later steps. for el in doc.iter('image'): el.tag = 'img' if not self.comments: # Of course, if we were going to kill comments anyway, we don't # need to worry about this self.kill_conditional_comments(doc) kill_tags = set(self.kill_tags or ()) remove_tags = set(self.remove_tags or ()) allow_tags = set(self.allow_tags or ()) if self.scripts: kill_tags.add('script') if self.safe_attrs_only: safe_attrs = set(self.safe_attrs) for el in doc.iter(etree.Element): attrib = el.attrib for aname in attrib.keys(): if aname not in safe_attrs: del attrib[aname] if self.javascript: if not (self.safe_attrs_only and self.safe_attrs == defs.safe_attrs): # safe_attrs handles events attributes itself for el in doc.iter(etree.Element): attrib = el.attrib for aname in attrib.keys(): if aname.startswith('on'): del attrib[aname] doc.rewrite_links(self._remove_javascript_link, resolve_base_href=False) # If we're deleting style then we don't have to remove JS links # from styles, otherwise... if not self.inline_style: for el in _find_styled_elements(doc): old = el.get('style') new = _css_javascript_re.sub('', old) new = _css_import_re.sub('', new) if self._has_sneaky_javascript(new): # Something tricky is going on... del el.attrib['style'] elif new != old: el.set('style', new) if not self.style: for el in list(doc.iter('style')): if el.get('type', '').lower().strip() == 'text/javascript': el.drop_tree() continue old = el.text or '' new = _css_javascript_re.sub('', old) # The imported CSS can do anything; we just can't allow: new = _css_import_re.sub('', old) if self._has_sneaky_javascript(new): # Something tricky is going on... el.text = '/* deleted */' elif new != old: el.text = new if self.comments or self.processing_instructions: # FIXME: why either? I feel like there's some obscure reason # because you can put PIs in comments...? But I've already # forgotten it kill_tags.add(etree.Comment) if self.processing_instructions: kill_tags.add(etree.ProcessingInstruction) if self.style: kill_tags.add('style') if self.inline_style: etree.strip_attributes(doc, 'style') if self.links: kill_tags.add('link') elif self.style or self.javascript: # We must get rid of included stylesheets if Javascript is not # allowed, as you can put Javascript in them for el in list(doc.iter('link')): if 'stylesheet' in el.get('rel', '').lower(): # Note this kills alternate stylesheets as well if not self.allow_element(el): el.drop_tree() if self.meta: kill_tags.add('meta') if self.page_structure: remove_tags.update(('head', 'html', 'title')) if self.embedded: # FIXME: is <layer> really embedded? # We should get rid of any <param> tags not inside <applet>; # These are not really valid anyway. for el in list(doc.iter('param')): found_parent = False parent = el.getparent() while parent is not None and parent.tag not in ('applet', 'object'): parent = parent.getparent() if parent is None: el.drop_tree() kill_tags.update(('applet',)) # The alternate contents that are in an iframe are a good fallback: remove_tags.update(('iframe', 'embed', 'layer', 'object', 'param')) if self.frames: # FIXME: ideally we should look at the frame links, but # generally frames don't mix properly with an HTML # fragment anyway. kill_tags.update(defs.frame_tags) if self.forms: remove_tags.add('form') kill_tags.update(('button', 'input', 'select', 'textarea')) if self.annoying_tags: remove_tags.update(('blink', 'marquee')) _remove = [] _kill = [] for el in doc.iter(): if el.tag in kill_tags: if self.allow_element(el): continue _kill.append(el) elif el.tag in remove_tags: if self.allow_element(el): continue _remove.append(el) if _remove and _remove[0] == doc: # We have to drop the parent-most tag, which we can't # do. Instead we'll rewrite it: el = _remove.pop(0) el.tag = 'div' el.attrib.clear() elif _kill and _kill[0] == doc: # We have to drop the parent-most element, which we can't # do. Instead we'll clear it: el = _kill.pop(0) if el.tag != 'html': el.tag = 'div' el.clear() _kill.reverse() # start with innermost tags for el in _kill: el.drop_tree() for el in _remove: el.drop_tag() if self.remove_unknown_tags: if allow_tags: raise ValueError( "It does not make sense to pass in both allow_tags and remove_unknown_tags") allow_tags = set(defs.tags) if allow_tags: bad = [] for el in doc.iter(): if el.tag not in allow_tags: bad.append(el) if bad: if bad[0] is doc: el = bad.pop(0) el.tag = 'div' el.attrib.clear() for el in bad: el.drop_tag() if self.add_nofollow: for el in _find_external_links(doc): if not self.allow_follow(el): rel = el.get('rel') if rel: if ('nofollow' in rel and ' nofollow ' in (' %s ' % rel)): continue rel = '%s nofollow' % rel else: rel = 'nofollow' el.set('rel', rel) def allow_follow(self, anchor): """ Override to suppress rel="nofollow" on some anchors. """ return False def allow_element(self, el): if el.tag not in self._tag_link_attrs: return False attr = self._tag_link_attrs[el.tag] if isinstance(attr, (list, tuple)): for one_attr in attr: url = el.get(one_attr) if not url: return False if not self.allow_embedded_url(el, url): return False return True else: url = el.get(attr) if not url: return False return self.allow_embedded_url(el, url) def allow_embedded_url(self, el, url): if (self.whitelist_tags is not None and el.tag not in self.whitelist_tags): return False scheme, netloc, path, query, fragment = urlsplit(url) netloc = netloc.lower().split(':', 1)[0] if scheme not in ('http', 'https'): return False if netloc in self.host_whitelist: return True return False def kill_conditional_comments(self, doc): """ IE conditional comments basically embed HTML that the parser doesn't normally see. We can't allow anything like that, so we'll kill any comments that could be conditional. """ bad = [] self._kill_elements( doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment) def _kill_elements(self, doc, condition, iterate=None): bad = [] for el in doc.iter(iterate): if condition(el): bad.append(el) for el in bad: el.drop_tree() def _remove_javascript_link(self, link): # links like "j a v a s c r i p t:" might be interpreted in IE new = _substitute_whitespace('', link) if _is_javascript_scheme(new): # FIXME: should this be None to delete? return '' return link _substitute_comments = re.compile(r'/\*.*?\*/', re.S).sub def _has_sneaky_javascript(self, style): """ Depending on the browser, stuff like ``e x p r e s s i o n(...)`` can get interpreted, or ``expre/* stuff */ssion(...)``. This checks for attempt to do stuff like this. Typically the response will be to kill the entire style; if you have just a bit of Javascript in the style another rule will catch that and remove only the Javascript from the style; this catches more sneaky attempts. """ style = self._substitute_comments('', style) style = style.replace('\\', '') style = _substitute_whitespace('', style) style = style.lower() if 'javascript:' in style: return True if 'expression(' in style: return True return False def clean_html(self, html): result_type = type(html) if isinstance(html, basestring): doc = fromstring(html) else: doc = copy.deepcopy(html) self(doc) return _transform_result(result_type, doc) clean = Cleaner() clean_html = clean.clean_html ############################################################ ## Autolinking ############################################################ _link_regexes = [ re.compile(r'(?P<body>https?://(?P<host>[a-z0-9._-]+)(?:/[/\-_.,a-z0-9%&?;=~]*)?(?:\([/\-_.,a-z0-9%&?;=~]*\))?)', re.I), # This is conservative, but autolinking can be a bit conservative: re.compile(r'mailto:(?P<body>[a-z0-9._-]+@(?P<host>[a-z0-9_.-]+[a-z]))', re.I), ] _avoid_elements = ['textarea', 'pre', 'code', 'head', 'select', 'a'] _avoid_hosts = [ re.compile(r'^localhost', re.I), re.compile(r'\bexample\.(?:com|org|net)$', re.I), re.compile(r'^127\.0\.0\.1$'), ] _avoid_classes = ['nolink'] def autolink(el, link_regexes=_link_regexes, avoid_elements=_avoid_elements, avoid_hosts=_avoid_hosts, avoid_classes=_avoid_classes): """ Turn any URLs into links. It will search for links identified by the given regular expressions (by default mailto and http(s) links). It won't link text in an element in avoid_elements, or an element with a class in avoid_classes. It won't link to anything with a host that matches one of the regular expressions in avoid_hosts (default localhost and 127.0.0.1). If you pass in an element, the element's tail will not be substituted, only the contents of the element. """ if el.tag in avoid_elements: return class_name = el.get('class') if class_name: class_name = class_name.split() for match_class in avoid_classes: if match_class in class_name: return for child in list(el): autolink(child, link_regexes=link_regexes, avoid_elements=avoid_elements, avoid_hosts=avoid_hosts, avoid_classes=avoid_classes) if child.tail: text, tail_children = _link_text( child.tail, link_regexes, avoid_hosts, factory=el.makeelement) if tail_children: child.tail = text index = el.index(child) el[index+1:index+1] = tail_children if el.text: text, pre_children = _link_text( el.text, link_regexes, avoid_hosts, factory=el.makeelement) if pre_children: el.text = text el[:0] = pre_children def _link_text(text, link_regexes, avoid_hosts, factory): leading_text = '' links = [] last_pos = 0 while 1: best_match, best_pos = None, None for regex in link_regexes: regex_pos = last_pos while 1: match = regex.search(text, pos=regex_pos) if match is None: break host = match.group('host') for host_regex in avoid_hosts: if host_regex.search(host): regex_pos = match.end() break else: break if match is None: continue if best_pos is None or match.start() < best_pos: best_match = match best_pos = match.start() if best_match is None: # No more matches if links: assert not links[-1].tail links[-1].tail = text else: assert not leading_text leading_text = text break link = best_match.group(0) end = best_match.end() if link.endswith('.') or link.endswith(','): # These punctuation marks shouldn't end a link end -= 1 link = link[:-1] prev_text = text[:best_match.start()] if links: assert not links[-1].tail links[-1].tail = prev_text else: assert not leading_text leading_text = prev_text anchor = factory('a') anchor.set('href', link) body = best_match.group('body') if not body: body = link if body.endswith('.') or body.endswith(','): body = body[:-1] anchor.text = body links.append(anchor) text = text[end:] return leading_text, links def autolink_html(html, *args, **kw): result_type = type(html) if isinstance(html, basestring): doc = fromstring(html) else: doc = copy.deepcopy(html) autolink(doc, *args, **kw) return _transform_result(result_type, doc) autolink_html.__doc__ = autolink.__doc__ ############################################################ ## Word wrapping ############################################################ _avoid_word_break_elements = ['pre', 'textarea', 'code'] _avoid_word_break_classes = ['nobreak'] def word_break(el, max_width=40, avoid_elements=_avoid_word_break_elements, avoid_classes=_avoid_word_break_classes, break_character=unichr(0x200b)): """ Breaks any long words found in the body of the text (not attributes). Doesn't effect any of the tags in avoid_elements, by default ``<textarea>`` and ``<pre>`` Breaks words by inserting &#8203;, which is a unicode character for Zero Width Space character. This generally takes up no space in rendering, but does copy as a space, and in monospace contexts usually takes up space. See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion """ # Character suggestion of &#8203 comes from: # http://www.cs.tut.fi/~jkorpela/html/nobr.html if el.tag in _avoid_word_break_elements: return class_name = el.get('class') if class_name: dont_break = False class_name = class_name.split() for avoid in avoid_classes: if avoid in class_name: dont_break = True break if dont_break: return if el.text: el.text = _break_text(el.text, max_width, break_character) for child in el: word_break(child, max_width=max_width, avoid_elements=avoid_elements, avoid_classes=avoid_classes, break_character=break_character) if child.tail: child.tail = _break_text(child.tail, max_width, break_character) def word_break_html(html, *args, **kw): result_type = type(html) doc = fromstring(html) word_break(doc, *args, **kw) return _transform_result(result_type, doc) def _break_text(text, max_width, break_character): words = text.split() for word in words: if len(word) > max_width: replacement = _insert_break(word, max_width, break_character) text = text.replace(word, replacement) return text _break_prefer_re = re.compile(r'[^a-z]', re.I) def _insert_break(word, width, break_character): orig_word = word result = '' while len(word) > width: start = word[:width] breaks = list(_break_prefer_re.finditer(start)) if breaks: last_break = breaks[-1] # Only walk back up to 10 characters to find a nice break: if last_break.end() > width-10: # FIXME: should the break character be at the end of the # chunk, or the beginning of the next chunk? start = word[:last_break.end()] result += start + break_character word = word[len(start):] result += word return result
./CrossVul/dataset_final_sorted/CWE-79/py/bad_453_0
crossvul-python_data_good_5790_1
import calendar import datetime import re import sys import urllib import urlparse from email.utils import formatdate from django.utils.datastructures import MultiValueDict from django.utils.encoding import smart_str, force_unicode from django.utils.functional import allow_lazy ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_unicode(urllib.quote(smart_str(url), smart_str(safe))) urlquote = allow_lazy(urlquote, unicode) def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe))) urlquote_plus = allow_lazy(urlquote_plus, unicode) def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_unicode(urllib.unquote(smart_str(quoted_url))) urlunquote = allow_lazy(urlunquote, unicode) def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_unicode(urllib.unquote_plus(smart_str(quoted_url))) urlunquote_plus = allow_lazy(urlunquote_plus, unicode) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first case to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return urllib.urlencode( [(smart_str(k), isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s GMT' % rfcdate[:25] def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an floating point number expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: raise ValueError("%r is not a valid date" % date) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int. if value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ digits = "0123456789abcdefghijklmnopqrstuvwxyz" factor = 0 if not 0 <= i <= sys.maxint: raise ValueError("Base36 conversion input too large or incorrect type.") # Find starting factor while True: factor += 1 if i < 36 ** factor: factor -= 1 break base36 = [] # Construct base36 representation while factor >= 0: j = 36 ** factor base36.append(digits[i // j]) i = i % j factor -= 1 return ''.join(base36) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.decode('string_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necesary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') if sys.version_info >= (2, 6): def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2) return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port) else: # Python 2.5 compatibility. This actually works for Python 2.6 and above, # but the above definition is much more obviously correct and so is # preferred going forward. def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2) return p1[0:2] == p2[0:2] def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url. """ if not url: return False url_info = urlparse.urlparse(url) return (not url_info[1] or url_info[1] == host) and \ (not url_info[0] or url_info[0] in ['http', 'https'])
./CrossVul/dataset_final_sorted/CWE-79/py/good_5790_1
crossvul-python_data_bad_5510_1
from __future__ import absolute_import, division, unicode_literals from six import text_type import re from ..constants import voidElements, booleanAttributes, spaceCharacters from ..constants import rcdataElements, entities, xmlEntities from .. import utils from xml.sax.saxutils import escape spaceCharacters = "".join(spaceCharacters) quoteAttributeSpec = re.compile("[" + spaceCharacters + "\"'=<>`]") try: from codecs import register_error, xmlcharrefreplace_errors except ImportError: unicode_encode_errors = "strict" else: unicode_encode_errors = "htmlentityreplace" encode_entity_map = {} is_ucs4 = len("\U0010FFFF") == 1 for k, v in list(entities.items()): # skip multi-character entities if ((is_ucs4 and len(v) > 1) or (not is_ucs4 and len(v) > 2)): continue if v != "&": if len(v) == 2: v = utils.surrogatePairToCodepoint(v) else: v = ord(v) if v not in encode_entity_map or k.islower(): # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc. encode_entity_map[v] = k def htmlentityreplace_errors(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): res = [] codepoints = [] skip = False for i, c in enumerate(exc.object[exc.start:exc.end]): if skip: skip = False continue index = i + exc.start if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2]) skip = True else: codepoint = ord(c) codepoints.append(codepoint) for cp in codepoints: e = encode_entity_map.get(cp) if e: res.append("&") res.append(e) if not e.endswith(";"): res.append(";") else: res.append("&#x%s;" % (hex(cp)[2:])) return ("".join(res), exc.end) else: return xmlcharrefreplace_errors(exc) register_error(unicode_encode_errors, htmlentityreplace_errors) del register_error class HTMLSerializer(object): # attribute quoting options quote_attr_values = False quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values=True|False Whether to quote attribute values that don't require quoting per HTML5 parsing rules. quote_char=u'"'|u"'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example <input disabled="disabled"> becomes <input disabled>. use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. <hr/>. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation """ if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, unicode_encode_errors) else: return string def encodeStrict(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from ..filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from ..filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from ..filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from ..filters.optionaltags import Filter treewalker = Filter(treewalker) # Alphabetical attributes must be last, as other filters # could add attributes and alter the order if self.alphabetical_attributes: from ..filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "<!DOCTYPE %s" % token["name"] if token["publicId"]: doctype += ' PUBLIC "%s"' % token["publicId"] elif token["systemId"]: doctype += " SYSTEM" if token["systemId"]: if token["systemId"].find('"') >= 0: if token["systemId"].find("'") >= 0: self.serializeError("System identifer contains both single and double quote characters") quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("</") >= 0: self.serializeError("Unexpected </ in CDATA") yield self.encode(token["data"]) else: yield self.encode(escape(token["data"])) elif type in ("StartTag", "EmptyTag"): name = token["name"] yield self.encodeStrict("<%s" % name) if name in rcdataElements and not self.escape_rcdata: in_cdata = True elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") for (attr_namespace, attr_name), attr_value in token["data"].items(): # TODO: Add namespace support here k = attr_name v = attr_value yield self.encodeStrict(' ') yield self.encodeStrict(k) if not self.minimize_boolean_attributes or \ (k not in booleanAttributes.get(name, tuple()) and k not in booleanAttributes.get("", tuple())): yield self.encodeStrict("=") if self.quote_attr_values: quote_attr = True else: quote_attr = len(v) == 0 or quoteAttributeSpec.search(v) v = v.replace("&", "&amp;") if self.escape_lt_in_attrs: v = v.replace("<", "&lt;") if quote_attr: quote_char = self.quote_char if self.use_best_quote_char: if "'" in v and '"' not in v: quote_char = '"' elif '"' in v and "'" not in v: quote_char = "'" if quote_char == "'": v = v.replace("'", "&#39;") else: v = v.replace('"', "&quot;") yield self.encodeStrict(quote_char) yield self.encode(v) yield self.encodeStrict(quote_char) else: yield self.encode(v) if name in voidElements and self.use_trailing_solidus: if self.space_before_trailing_solidus: yield self.encodeStrict(" /") else: yield self.encodeStrict("/") yield self.encode(">") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") yield self.encodeStrict("</%s>" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError("Comment contains --") yield self.encodeStrict("<!--%s-->" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if key not in entities: self.serializeError("Entity %s not recognized" % name) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError def SerializeError(Exception): """Error in serialized tree""" pass
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5510_1
crossvul-python_data_good_1644_8
"""Tornado handlers for security logging.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from tornado import gen, web from ...base.handlers import APIHandler, json_errors from . import csp_report_uri class CSPReportHandler(APIHandler): '''Accepts a content security policy violation report''' @web.authenticated @json_errors def post(self): '''Log a content security policy violation report''' csp_report = self.get_json_body() self.log.warn("Content security violation: %s", self.request.body.decode('utf8', 'replace')) default_handlers = [ (csp_report_uri, CSPReportHandler) ]
./CrossVul/dataset_final_sorted/CWE-79/py/good_1644_8
crossvul-python_data_bad_1644_10
import json from tornado import web, gen from ..base.handlers import IPythonHandler, json_errors from ..utils import url_path_join class TerminalRootHandler(IPythonHandler): @web.authenticated @json_errors def get(self): tm = self.terminal_manager terms = [{'name': name} for name in tm.terminals] self.finish(json.dumps(terms)) @web.authenticated @json_errors def post(self): """POST /terminals creates a new terminal and redirects to it""" name, _ = self.terminal_manager.new_named_terminal() self.finish(json.dumps({'name': name})) class TerminalHandler(IPythonHandler): SUPPORTED_METHODS = ('GET', 'DELETE') @web.authenticated @json_errors def get(self, name): tm = self.terminal_manager if name in tm.terminals: self.finish(json.dumps({'name': name})) else: raise web.HTTPError(404, "Terminal not found: %r" % name) @web.authenticated @json_errors @gen.coroutine def delete(self, name): tm = self.terminal_manager if name in tm.terminals: yield tm.terminate(name, force=True) self.set_status(204) self.finish() else: raise web.HTTPError(404, "Terminal not found: %r" % name)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_10
crossvul-python_data_bad_3890_0
import difflib from bs4 import BeautifulSoup from django.utils.encoding import force_str from django.utils.html import escape, format_html, format_html_join from django.utils.safestring import mark_safe from django.utils.text import capfirst from django.utils.translation import ugettext_lazy as _ from wagtail.core import blocks class FieldComparison: is_field = True is_child_relation = False def __init__(self, field, obj_a, obj_b): self.field = field self.val_a = field.value_from_object(obj_a) self.val_b = field.value_from_object(obj_b) def field_label(self): """ Returns a label for this field to be displayed to the user """ verbose_name = getattr(self.field, 'verbose_name', None) if verbose_name is None: # Relations don't have a verbose_name verbose_name = self.field.name.replace('_', ' ') return capfirst(verbose_name) def htmldiff(self): if self.val_a != self.val_b: return TextDiff([('deletion', self.val_a), ('addition', self.val_b)]).to_html() else: return escape(self.val_a) def has_changed(self): """ Returns True if the field has changed """ return self.val_a != self.val_b class TextFieldComparison(FieldComparison): def htmldiff(self): return diff_text(self.val_a, self.val_b).to_html() class RichTextFieldComparison(TextFieldComparison): def htmldiff(self): return diff_text( BeautifulSoup(force_str(self.val_a), 'html5lib').getText(), BeautifulSoup(force_str(self.val_b), 'html5lib').getText() ).to_html() def get_comparison_class_for_block(block): if hasattr(block, 'get_comparison_class'): return block.get_comparison_class() elif isinstance(block, blocks.CharBlock): return CharBlockComparison elif isinstance(block, blocks.RichTextBlock): return RichTextBlockComparison elif isinstance(block, blocks.StructBlock): return StructBlockComparison else: # As all stream field blocks have a HTML representation, fall back to diffing that. return RichTextBlockComparison class BlockComparison: def __init__(self, block, exists_a, exists_b, val_a, val_b): self.block = block self.exists_a = exists_a self.exists_b = exists_b self.val_a = val_a self.val_b = val_b def is_new(self): return self.exists_b and not self.exists_a def is_deleted(self): return self.exists_a and not self.exists_b def has_changed(self): return self.val_a != self.val_b def htmlvalue(self, val): return self.block.render_basic(val) class CharBlockComparison(BlockComparison): def htmldiff(self): return diff_text( force_str(self.val_a), force_str(self.val_b) ).to_html() class RichTextBlockComparison(BlockComparison): def htmldiff(self): return diff_text( BeautifulSoup(force_str(self.val_a), 'html5lib').getText(), BeautifulSoup(force_str(self.val_b), 'html5lib').getText() ).to_html() class StructBlockComparison(BlockComparison): def htmlvalue(self, val): htmlvalues = [] for name, block in self.block.child_blocks.items(): label = self.block.child_blocks[name].label comparison_class = get_comparison_class_for_block(block) htmlvalues.append((label, comparison_class(block, True, True, val[name], val[name]).htmlvalue(val[name]))) return format_html('<dl>\n{}\n</dl>', format_html_join( '\n', ' <dt>{}</dt>\n <dd>{}</dd>', htmlvalues)) def htmldiff(self): htmldiffs = [] for name, block in self.block.child_blocks.items(): label = self.block.child_blocks[name].label comparison_class = get_comparison_class_for_block(block) htmldiffs.append((label, comparison_class(block, self.exists_a, self.exists_b, self.val_a[name], self.val_b[name]).htmldiff())) return format_html('<dl>\n{}\n</dl>', format_html_join( '\n', ' <dt>{}</dt>\n <dd>{}</dd>', htmldiffs)) class StreamBlockComparison(BlockComparison): def get_block_comparisons(self): a_blocks = list(self.val_a) or [] b_blocks = list(self.val_b) or [] a_blocks_by_id = {block.id: block for block in a_blocks} b_blocks_by_id = {block.id: block for block in b_blocks} deleted_ids = a_blocks_by_id.keys() - b_blocks_by_id.keys() comparisons = [] for block in b_blocks: comparison_class = get_comparison_class_for_block(block.block) if block.id in a_blocks_by_id: # Changed/existing block comparisons.append(comparison_class(block.block, True, True, a_blocks_by_id[block.id].value, block.value)) else: # New block comparisons.append(comparison_class(block.block, False, True, None, block.value)) # Insert deleted blocks at the index where they used to be deleted_block_indices = [(block, i) for i, block in enumerate(a_blocks) if block.id in deleted_ids] for block, index in deleted_block_indices: comparison_class = get_comparison_class_for_block(block.block) comparison_to_insert = comparison_class(block.block, True, False, block.value, None) # Insert the block back in where it was before it was deleted. # Note: we need to account for new blocks when finding the position. current_index = 0 block_inserted = False for i, comparison in enumerate(comparisons): if comparison.is_new(): continue if current_index == index: comparisons.insert(i, comparison_to_insert) block_inserted = True break current_index += 1 # Deleted block was from the end if not block_inserted: comparisons.append(comparison_to_insert) return comparisons def htmldiff(self): comparisons_html = [] for comparison in self.get_block_comparisons(): classes = ['comparison__child-object'] if comparison.is_new(): classes.append('addition') block_rendered = comparison.htmlvalue(comparison.val_b) elif comparison.is_deleted(): classes.append('deletion') block_rendered = comparison.htmlvalue(comparison.val_a) elif comparison.has_changed(): block_rendered = comparison.htmldiff() else: block_rendered = comparison.htmlvalue(comparison.val_a) classes = ' '.join(classes) comparisons_html.append('<div class="{0}">{1}</div>'.format(classes, block_rendered)) return mark_safe('\n'.join(comparisons_html)) class StreamFieldComparison(FieldComparison): def has_block_ids(self, val): if not val: return True return bool(val[0].id) def htmldiff(self): # Our method for diffing streamfields relies on the blocks in both revisions having UUIDs. # But as UUIDs were added in Wagtail 1.11 we can't compare revisions that were created before # that Wagtail version. if self.has_block_ids(self.val_a) and self.has_block_ids(self.val_b): return StreamBlockComparison(self.field.stream_block, True, True, self.val_a, self.val_b).htmldiff() else: # Fall back to diffing the HTML representation return diff_text( BeautifulSoup(force_str(self.val_a), 'html5lib').getText(), BeautifulSoup(force_str(self.val_b), 'html5lib').getText() ).to_html() class ChoiceFieldComparison(FieldComparison): def htmldiff(self): val_a = force_str(dict(self.field.flatchoices).get(self.val_a, self.val_a), strings_only=True) val_b = force_str(dict(self.field.flatchoices).get(self.val_b, self.val_b), strings_only=True) if self.val_a != self.val_b: diffs = [] if val_a: diffs += [('deletion', val_a)] if val_b: diffs += [('addition', val_b)] return TextDiff(diffs).to_html() else: return escape(val_a) class M2MFieldComparison(FieldComparison): def get_items(self): return list(self.val_a), list(self.val_b) def get_item_display(self, item): return str(item) def htmldiff(self): # Get tags items_a, items_b = self.get_items() # Calculate changes sm = difflib.SequenceMatcher(0, items_a, items_b) changes = [] for op, i1, i2, j1, j2 in sm.get_opcodes(): if op == 'replace': for item in items_a[i1:i2]: changes.append(('deletion', self.get_item_display(item))) for item in items_b[j1:j2]: changes.append(('addition', self.get_item_display(item))) elif op == 'delete': for item in items_a[i1:i2]: changes.append(('deletion', self.get_item_display(item))) elif op == 'insert': for item in items_b[j1:j2]: changes.append(('addition', self.get_item_display(item))) elif op == 'equal': for item in items_a[i1:i2]: changes.append(('equal', self.get_item_display(item))) # Convert changelist to HTML return TextDiff(changes, separator=", ").to_html() def has_changed(self): items_a, items_b = self.get_items() return items_a != items_b class TagsFieldComparison(M2MFieldComparison): def get_item_display(self, tag): return tag.slug class ForeignObjectComparison(FieldComparison): def get_objects(self): model = self.field.related_model obj_a = model.objects.filter(pk=self.val_a).first() obj_b = model.objects.filter(pk=self.val_b).first() return obj_a, obj_b def htmldiff(self): obj_a, obj_b = self.get_objects() if obj_a != obj_b: if obj_a and obj_b: # Changed return TextDiff([('deletion', force_str(obj_a)), ('addition', force_str(obj_b))]).to_html() elif obj_b: # Added return TextDiff([('addition', force_str(obj_b))]).to_html() elif obj_a: # Removed return TextDiff([('deletion', force_str(obj_a))]).to_html() else: if obj_a: return escape(force_str(obj_a)) else: return mark_safe(_("None")) class ChildRelationComparison: is_field = False is_child_relation = True def __init__(self, field, field_comparisons, obj_a, obj_b): self.field = field self.field_comparisons = field_comparisons self.val_a = getattr(obj_a, field.related_name) self.val_b = getattr(obj_b, field.related_name) def field_label(self): """ Returns a label for this field to be displayed to the user """ verbose_name = getattr(self.field, 'verbose_name', None) if verbose_name is None: # Relations don't have a verbose_name verbose_name = self.field.name.replace('_', ' ') return capfirst(verbose_name) def get_mapping(self, objs_a, objs_b): """ This bit of code attempts to match the objects in the A revision with their counterpart in the B revision. A match is firstly attempted by PK (where a matching ID indicates they're the same). We compare remaining the objects by their field data; the objects with the fewest fields changed are matched until there are no more possible matches left. This returns 4 values: - map_forwards => a mapping of object indexes from the B version to the A version - map_backwards => a mapping of object indexes from the A version to the B version - added => a list of indices for objects that didn't exist in the B version - deleted => a list of indices for objects that didn't exist in the A version Note the indices are 0-based array indices indicating the location of the object in either the objs_a or objs_b arrays. For example: objs_a => A, B, C, D objs_b => B, C, D, E Will return the following: map_forwards = { 1: 0, # B (objs_a: objs_b) 2: 1, # C (objs_a: objs_b) 3: 2, # D (objs_a: objs_b) } map_backwards = { 0: 1, # B (objs_b: objs_a) 1: 2, # C (objs_b: objs_a) 2: 3, # D (objs_b: objs_a) } added = [4] # D in objs_b deleted = [0] # A in objs_a """ map_forwards = {} map_backwards = {} added = [] deleted = [] # Match child objects on PK (ID) for a_idx, a_child in enumerate(objs_a): for b_idx, b_child in enumerate(objs_b): if b_idx in map_backwards: continue if a_child.pk is not None and b_child.pk is not None and a_child.pk == b_child.pk: map_forwards[a_idx] = b_idx map_backwards[b_idx] = a_idx # Now try to match them by data matches = [] for a_idx, a_child in enumerate(objs_a): if a_idx not in map_forwards: for b_idx, b_child in enumerate(objs_b): if b_idx not in map_backwards: # If they both have a PK (ID) that is different, they can't be the same child object if a_child.pk and b_child.pk and a_child.pk != b_child.pk: continue comparison = self.get_child_comparison(objs_a[a_idx], objs_b[b_idx]) num_differences = comparison.get_num_differences() matches.append((a_idx, b_idx, num_differences)) # Objects with the least differences will be matched first. So only the best possible matches are made matches.sort(key=lambda match: match[2]) for a_idx, b_idx, num_differences in matches: # Make sure both objects were not matched previously if a_idx in map_forwards or b_idx in map_backwards: continue # Match! map_forwards[a_idx] = b_idx map_backwards[b_idx] = a_idx # Mark unmapped objects as added/deleted for a_idx, a_child in enumerate(objs_a): if a_idx not in map_forwards: deleted.append(a_idx) for b_idx, b_child in enumerate(objs_b): if b_idx not in map_backwards: added.append(b_idx) return map_forwards, map_backwards, added, deleted def get_child_comparison(self, obj_a, obj_b): return ChildObjectComparison(self.field.related_model, self.field_comparisons, obj_a, obj_b) def get_child_comparisons(self): """ Returns a list of ChildObjectComparison objects. Representing all child objects that existed in either version. They are returned in the order they appear in the B version with deletions appended at the end. All child objects are returned, regardless of whether they were actually changed. """ objs_a = list(self.val_a.all()) objs_b = list(self.val_b.all()) map_forwards, map_backwards, added, deleted = self.get_mapping(objs_a, objs_b) objs_a = dict(enumerate(objs_a)) objs_b = dict(enumerate(objs_b)) comparisons = [] for b_idx, b_child in objs_b.items(): if b_idx in added: comparisons.append(self.get_child_comparison(None, b_child)) else: comparisons.append(self.get_child_comparison(objs_a[map_backwards[b_idx]], b_child)) for a_idx, a_child in objs_a.items(): if a_idx in deleted: comparisons.append(self.get_child_comparison(a_child, None)) return comparisons def has_changed(self): """ Returns true if any changes were made to any of the child objects. This includes adding, deleting and reordering. """ objs_a = list(self.val_a.all()) objs_b = list(self.val_b.all()) map_forwards, map_backwards, added, deleted = self.get_mapping(objs_a, objs_b) if added or deleted: return True for a_idx, b_idx in map_forwards.items(): comparison = self.get_child_comparison(objs_a[a_idx], objs_b[b_idx]) if comparison.has_changed(): return True return False class ChildObjectComparison: def __init__(self, model, field_comparisons, obj_a, obj_b): self.model = model self.field_comparisons = field_comparisons self.obj_a = obj_a self.obj_b = obj_b def is_addition(self): """ Returns True if this child object was created since obj_a """ return self.obj_b and not self.obj_a def is_deletion(self): """ Returns True if this child object was deleted in obj_b """ return self.obj_a and not self.obj_b def get_position_change(self): """ Returns the change in position as an integer. Positive if the object was moved down, negative if it moved up. For example: '3' indicates the object moved down three spaces. '-1' indicates the object moved up one space. """ if not self.is_addition() and not self.is_deletion(): sort_a = getattr(self.obj_a, 'sort_order', 0) or 0 sort_b = getattr(self.obj_b, 'sort_order', 0) or 0 return sort_b - sort_a def get_field_comparisons(self): """ Returns a list of comparisons for all the fields in this object. Fields that haven't changed are included as well. """ comparisons = [] if self.is_addition() or self.is_deletion(): # Display the fields without diff as one of the versions are missing obj = self.obj_a or self.obj_b for field_comparison in self.field_comparisons: comparisons.append(field_comparison(obj, obj)) else: for field_comparison in self.field_comparisons: comparisons.append(field_comparison(self.obj_a, self.obj_b)) return comparisons def has_changed(self): for comparison in self.get_field_comparisons(): if comparison.has_changed(): return True return False def get_num_differences(self): """ Returns the number of fields that differ between the two objects. """ num_differences = 0 for comparison in self.get_field_comparisons(): if comparison.has_changed(): num_differences += 1 return num_differences class TextDiff: def __init__(self, changes, separator=""): self.changes = changes self.separator = separator def to_html(self, tag='span', addition_class='addition', deletion_class='deletion'): html = [] for change_type, value in self.changes: if change_type == 'equal': html.append(escape(value)) elif change_type == 'addition': html.append('<{tag} class="{classname}">{value}</{tag}>'.format( tag=tag, classname=addition_class, value=escape(value) )) elif change_type == 'deletion': html.append('<{tag} class="{classname}">{value}</{tag}>'.format( tag=tag, classname=deletion_class, value=escape(value) )) return mark_safe(self.separator.join(html)) def diff_text(a, b): """ Performs a diffing algorithm on two pieces of text. Returns a string of HTML containing the content of both texts with <span> tags inserted indicating where the differences are. """ def tokenise(text): """ Tokenises a string by spliting it into individual characters and grouping the alphanumeric ones together. This means that punctuation, whitespace, CJK characters, etc become separate tokens and words/numbers are merged together to form bigger tokens. This makes the output of the diff easier to read as words are not broken up. """ tokens = [] current_token = "" for c in text or "": if c.isalnum(): current_token += c else: if current_token: tokens.append(current_token) current_token = "" tokens.append(c) if current_token: tokens.append(current_token) return tokens a_tok = tokenise(a) b_tok = tokenise(b) sm = difflib.SequenceMatcher(lambda t: len(t) <= 4, a_tok, b_tok) changes = [] for op, i1, i2, j1, j2 in sm.get_opcodes(): if op == 'replace': for token in a_tok[i1:i2]: changes.append(('deletion', token)) for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'delete': for token in a_tok[i1:i2]: changes.append(('deletion', token)) elif op == 'insert': for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'equal': for token in a_tok[i1:i2]: changes.append(('equal', token)) # Merge ajacent changes which have the same type. This just cleans up the HTML a bit merged_changes = [] current_value = [] current_change_type = None for change_type, value in changes: if change_type != current_change_type: if current_change_type is not None: merged_changes.append((current_change_type, ''.join(current_value))) current_value = [] current_change_type = change_type current_value.append(value) if current_value: merged_changes.append((current_change_type, ''.join(current_value))) return TextDiff(merged_changes)
./CrossVul/dataset_final_sorted/CWE-79/py/bad_3890_0
crossvul-python_data_good_5525_1
############################################################################## # # Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## """Zope-specific Python Expression Handler Handler for Python expressions that uses the RestrictedPython package. $Id$ """ from AccessControl import safe_builtins from AccessControl.ZopeGuards import guarded_getattr, get_safe_globals from RestrictedPython import compile_restricted_eval from zope.tales.tales import CompilerError from zope.tales.pythonexpr import PythonExpr class PythonExpr(PythonExpr): _globals = get_safe_globals() _globals['_getattr_'] = guarded_getattr _globals['__debug__' ] = __debug__ def __init__(self, name, expr, engine): self.text = self.expr = text = expr.strip().replace('\n', ' ') # Unicode expression are not handled properly by RestrictedPython # We convert the expression to UTF-8 (ajung) if isinstance(text, unicode): text = text.encode('utf-8') code, err, warn, use = compile_restricted_eval(text, self.__class__.__name__) if err: raise engine.getCompilerError()('Python expression error:\n%s' % '\n'.join(err)) self._varnames = use.keys() self._code = code def __call__(self, econtext): __traceback_info__ = self.text vars = self._bind_used_names(econtext, {}) vars.update(self._globals) return eval(self._code, vars, {}) class _SecureModuleImporter: __allow_access_to_unprotected_subobjects__ = True def __getitem__(self, module): mod = safe_builtins['__import__'](module) path = module.split('.') for name in path[1:]: mod = getattr(mod, name) return mod from DocumentTemplate.DT_Util import TemplateDict, InstanceDict from AccessControl.DTML import RestrictedDTML class Rtd(RestrictedDTML, TemplateDict): this = None def call_with_ns(f, ns, arg=1): td = Rtd() # prefer 'context' to 'here'; fall back to 'None' this = ns.get('context', ns.get('here')) td.this = this request = ns.get('request', {}) if hasattr(request, 'taintWrapper'): request = request.taintWrapper() td._push(request) td._push(InstanceDict(td.this, td)) td._push(ns) try: if arg==2: return f(None, td) else: return f(td) finally: td._pop(3)
./CrossVul/dataset_final_sorted/CWE-79/py/good_5525_1
crossvul-python_data_good_4953_0
from __future__ import unicode_literals import base64 import calendar import datetime import re import sys import unicodedata from binascii import Error as BinasciiError from email.utils import formatdate from django.utils import six from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, force_str, force_text from django.utils.functional import keep_lazy_text from django.utils.six.moves.urllib.parse import ( quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode, urlparse, ) ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) RFC3986_GENDELIMS = str(":/?#[]@") RFC3986_SUBDELIMS = str("!$&'()*+,;=") PROTOCOL_TO_PORT = { 'http': 80, 'https': 443, } @keep_lazy_text def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(quote(force_str(url), force_str(safe))) @keep_lazy_text def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_text(quote_plus(force_str(url), force_str(safe))) @keep_lazy_text def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_text(unquote(force_str(quoted_url))) @keep_lazy_text def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_text(unquote_plus(force_str(quoted_url))) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first cast to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return original_urlencode( [(force_str(k), [force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ return formatdate(epoch_seconds, usegmt=True) def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an integer expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2]) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int to avoid # returning a long (#15067). The long type was removed in Python 3. if six.PY2 and value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ char_set = '0123456789abcdefghijklmnopqrstuvwxyz' if i < 0: raise ValueError("Negative base36 conversion input.") if six.PY2: if not isinstance(i, six.integer_types): raise TypeError("Non-integer base36 conversion input.") if i > sys.maxint: raise ValueError("Base36 conversion input too large.") if i < 36: return char_set[i] b36 = '' while i != 0: i, n = divmod(i, 36) b36 = char_set[n] + b36 return b36 def urlsafe_base64_encode(s): """ Encodes a bytestring in base64 for use in URLs, stripping any trailing equal signs. """ return base64.urlsafe_b64encode(s).rstrip(b'\n=') def urlsafe_base64_decode(s): """ Decodes a base64 encoded string, adding back any trailing equal signs that might have been stripped. """ s = force_bytes(s) try: return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'=')) except (LookupError, BinasciiError) as e: raise ValueError(e) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.encode('ascii').decode('unicode_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necessary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') def unquote_etag(etag): """ Unquote an ETag string; i.e. revert quote_etag(). """ return etag.strip('"').replace('\\"', '"').replace('\\\\', '\\') if etag else etag def is_same_domain(host, pattern): """ Return ``True`` if the host is either an exact match or a match to the wildcard pattern. Any pattern beginning with a period matches a domain and all of its subdomains. (e.g. ``.example.com`` matches ``example.com`` and ``foo.example.com``). Anything else is an exact string match. """ if not pattern: return False pattern = pattern.lower() return ( pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or pattern == host ) def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url. """ if url is not None: url = url.strip() if not url: return False # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host) def _is_safe_url(url, host): # Chrome considers any URL with more than two slashes to be absolute, but # urlparse is not so flexible. Treat any url with three slashes as unsafe. if url.startswith('///'): return False url_info = urlparse(url) # Forbid URLs like http:///example.com - with a scheme, but without a hostname. # In that URL, example.com is not the hostname but, a path component. However, # Chrome will still consider example.com to be the hostname, so we must not # allow this syntax. if not url_info.netloc and url_info.scheme: return False # Forbid URLs that start with control characters. Some browsers (like # Chrome) ignore quite a few control characters at the start of a # URL and might consider the URL as scheme relative. if unicodedata.category(url[0])[0] == 'C': return False return ((not url_info.netloc or url_info.netloc == host) and (not url_info.scheme or url_info.scheme in ['http', 'https']))
./CrossVul/dataset_final_sorted/CWE-79/py/good_4953_0
crossvul-python_data_bad_4097_3
# -*- coding: utf-8 -*- from wagtail.core.models import Page from wagtail.tests.testapp.models import ( FormField, FormFieldWithCustomSubmission, FormPage, FormPageWithCustomSubmission, FormPageWithRedirect, RedirectFormField) def make_form_page(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') form_page = home_page.add_child(instance=FormPage(**kwargs)) FormField.objects.create( page=form_page, sort_order=1, label="Your email", field_type='email', required=True, ) FormField.objects.create( page=form_page, sort_order=2, label="Your message", field_type='multiline', required=True, ) FormField.objects.create( page=form_page, sort_order=3, label="Your choices", field_type='checkboxes', required=False, choices='foo,bar,baz', ) return form_page def make_form_page_with_custom_submission(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('intro', "<p>Boring intro text</p>") kwargs.setdefault('thank_you_text', "<p>Thank you for your patience!</p>") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') form_page = home_page.add_child(instance=FormPageWithCustomSubmission(**kwargs)) FormFieldWithCustomSubmission.objects.create( page=form_page, sort_order=1, label="Your email", field_type='email', required=True, ) FormFieldWithCustomSubmission.objects.create( page=form_page, sort_order=2, label="Your message", field_type='multiline', required=True, ) FormFieldWithCustomSubmission.objects.create( page=form_page, sort_order=3, label="Your choices", field_type='checkboxes', required=False, choices='foo,bar,baz', ) return form_page def make_form_page_with_redirect(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') kwargs.setdefault('thank_you_redirect_page', home_page) form_page = home_page.add_child(instance=FormPageWithRedirect(**kwargs)) # form_page.thank_you_redirect_page = home_page RedirectFormField.objects.create( page=form_page, sort_order=1, label="Your email", field_type='email', required=True, ) RedirectFormField.objects.create( page=form_page, sort_order=2, label="Your message", field_type='multiline', required=True, ) RedirectFormField.objects.create( page=form_page, sort_order=3, label="Your choices", field_type='checkboxes', required=False, choices='foo,bar,baz', ) return form_page def make_types_test_form_page(**kwargs): kwargs.setdefault('title', "Contact us") kwargs.setdefault('slug', "contact-us") kwargs.setdefault('to_address', "to@email.com") kwargs.setdefault('from_address', "from@email.com") kwargs.setdefault('subject', "The subject") home_page = Page.objects.get(url_path='/home/') form_page = home_page.add_child(instance=FormPage(**kwargs)) FormField.objects.create( page=form_page, sort_order=1, label="Single line text", field_type='singleline', required=False, ) FormField.objects.create( page=form_page, sort_order=2, label="Multiline", field_type='multiline', required=False, ) FormField.objects.create( page=form_page, sort_order=3, label="Email", field_type='email', required=False, ) FormField.objects.create( page=form_page, sort_order=4, label="Number", field_type='number', required=False, ) FormField.objects.create( page=form_page, sort_order=5, label="URL", field_type='url', required=False, ) FormField.objects.create( page=form_page, sort_order=6, label="Checkbox", field_type='checkbox', required=False, ) FormField.objects.create( page=form_page, sort_order=7, label="Checkboxes", field_type='checkboxes', required=False, choices='foo,bar,baz', ) FormField.objects.create( page=form_page, sort_order=8, label="Drop down", field_type='dropdown', required=False, choices='spam,ham,eggs', ) FormField.objects.create( page=form_page, sort_order=9, label="Multiple select", field_type='multiselect', required=False, choices='qux,quux,quuz,corge', ) FormField.objects.create( page=form_page, sort_order=10, label="Radio buttons", field_type='radio', required=False, choices='wibble,wobble,wubble', ) FormField.objects.create( page=form_page, sort_order=11, label="Date", field_type='date', required=False, ) FormField.objects.create( page=form_page, sort_order=12, label="Datetime", field_type='datetime', required=False, ) return form_page
./CrossVul/dataset_final_sorted/CWE-79/py/bad_4097_3
crossvul-python_data_good_5191_4
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import os import tempfile import uuid from django.contrib.auth.models import User from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.core.files.storage import FileSystemStorage from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Section(models.Model): """ A simple section that links to articles, to test linking to related items in admin views. """ name = models.CharField(max_length=100) def __str__(self): return self.name @property def name_property(self): """ A property that simply returns the name. Used to test #24461 """ return self.name @python_2_unicode_compatible class Article(models.Model): """ A simple article to test admin views. Test backwards compatibility. """ title = models.CharField(max_length=100) content = models.TextField() date = models.DateTimeField() section = models.ForeignKey(Section, null=True, blank=True) sub_section = models.ForeignKey(Section, null=True, blank=True, on_delete=models.SET_NULL, related_name='+') def __str__(self): return self.title def model_year(self): return self.date.year model_year.admin_order_field = 'date' model_year.short_description = '' def model_year_reversed(self): return self.date.year model_year_reversed.admin_order_field = '-date' model_year_reversed.short_description = '' @python_2_unicode_compatible class Book(models.Model): """ A simple book that has chapters. """ name = models.CharField(max_length=100, verbose_name='¿Name?') def __str__(self): return self.name @python_2_unicode_compatible class Promo(models.Model): name = models.CharField(max_length=100, verbose_name='¿Name?') book = models.ForeignKey(Book) def __str__(self): return self.name @python_2_unicode_compatible class Chapter(models.Model): title = models.CharField(max_length=100, verbose_name='¿Title?') content = models.TextField() book = models.ForeignKey(Book) def __str__(self): return self.title class Meta: # Use a utf-8 bytestring to ensure it works (see #11710) verbose_name = '¿Chapter?' @python_2_unicode_compatible class ChapterXtra1(models.Model): chap = models.OneToOneField(Chapter, verbose_name='¿Chap?') xtra = models.CharField(max_length=100, verbose_name='¿Xtra?') def __str__(self): return '¿Xtra1: %s' % self.xtra @python_2_unicode_compatible class ChapterXtra2(models.Model): chap = models.OneToOneField(Chapter, verbose_name='¿Chap?') xtra = models.CharField(max_length=100, verbose_name='¿Xtra?') def __str__(self): return '¿Xtra2: %s' % self.xtra class RowLevelChangePermissionModel(models.Model): name = models.CharField(max_length=100, blank=True) class CustomArticle(models.Model): content = models.TextField() date = models.DateTimeField() @python_2_unicode_compatible class ModelWithStringPrimaryKey(models.Model): string_pk = models.CharField(max_length=255, primary_key=True) def __str__(self): return self.string_pk def get_absolute_url(self): return '/dummy/%s/' % self.string_pk @python_2_unicode_compatible class Color(models.Model): value = models.CharField(max_length=10) warm = models.BooleanField(default=False) def __str__(self): return self.value # we replicate Color to register with another ModelAdmin class Color2(Color): class Meta: proxy = True @python_2_unicode_compatible class Thing(models.Model): title = models.CharField(max_length=20) color = models.ForeignKey(Color, limit_choices_to={'warm': True}) pub_date = models.DateField(blank=True, null=True) def __str__(self): return self.title @python_2_unicode_compatible class Actor(models.Model): name = models.CharField(max_length=50) age = models.IntegerField() title = models.CharField(max_length=50, null=True, blank=True) def __str__(self): return self.name @python_2_unicode_compatible class Inquisition(models.Model): expected = models.BooleanField(default=False) leader = models.ForeignKey(Actor) country = models.CharField(max_length=20) def __str__(self): return "by %s from %s" % (self.leader, self.country) @python_2_unicode_compatible class Sketch(models.Model): title = models.CharField(max_length=100) inquisition = models.ForeignKey(Inquisition, limit_choices_to={'leader__name': 'Palin', 'leader__age': 27, 'expected': False, }) defendant0 = models.ForeignKey(Actor, limit_choices_to={'title__isnull': False}, related_name='as_defendant0') defendant1 = models.ForeignKey(Actor, limit_choices_to={'title__isnull': True}, related_name='as_defendant1') def __str__(self): return self.title def today_callable_dict(): return {"last_action__gte": datetime.datetime.today()} def today_callable_q(): return models.Q(last_action__gte=datetime.datetime.today()) @python_2_unicode_compatible class Character(models.Model): username = models.CharField(max_length=100) last_action = models.DateTimeField() def __str__(self): return self.username @python_2_unicode_compatible class StumpJoke(models.Model): variation = models.CharField(max_length=100) most_recently_fooled = models.ForeignKey(Character, limit_choices_to=today_callable_dict, related_name="+") has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+") def __str__(self): return self.variation class Fabric(models.Model): NG_CHOICES = ( ('Textured', ( ('x', 'Horizontal'), ('y', 'Vertical'), )), ('plain', 'Smooth'), ) surface = models.CharField(max_length=20, choices=NG_CHOICES) @python_2_unicode_compatible class Person(models.Model): GENDER_CHOICES = ( (1, "Male"), (2, "Female"), ) name = models.CharField(max_length=100) gender = models.IntegerField(choices=GENDER_CHOICES) age = models.IntegerField(default=21) alive = models.BooleanField(default=True) def __str__(self): return self.name @python_2_unicode_compatible class Persona(models.Model): """ A simple persona associated with accounts, to test inlining of related accounts which inherit from a common accounts class. """ name = models.CharField(blank=False, max_length=80) def __str__(self): return self.name @python_2_unicode_compatible class Account(models.Model): """ A simple, generic account encapsulating the information shared by all types of accounts. """ username = models.CharField(blank=False, max_length=80) persona = models.ForeignKey(Persona, related_name="accounts") servicename = 'generic service' def __str__(self): return "%s: %s" % (self.servicename, self.username) class FooAccount(Account): """A service-specific account of type Foo.""" servicename = 'foo' class BarAccount(Account): """A service-specific account of type Bar.""" servicename = 'bar' @python_2_unicode_compatible class Subscriber(models.Model): name = models.CharField(blank=False, max_length=80) email = models.EmailField(blank=False, max_length=175) def __str__(self): return "%s (%s)" % (self.name, self.email) class ExternalSubscriber(Subscriber): pass class OldSubscriber(Subscriber): pass class Media(models.Model): name = models.CharField(max_length=60) class Podcast(Media): release_date = models.DateField() class Meta: ordering = ('release_date',) # overridden in PodcastAdmin class Vodcast(Media): media = models.OneToOneField(Media, primary_key=True, parent_link=True) released = models.BooleanField(default=False) class Parent(models.Model): name = models.CharField(max_length=128) class Child(models.Model): parent = models.ForeignKey(Parent, editable=False) name = models.CharField(max_length=30, blank=True) @python_2_unicode_compatible class EmptyModel(models.Model): def __str__(self): return "Primary key = %s" % self.id temp_storage = FileSystemStorage(tempfile.mkdtemp()) UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload') class Gallery(models.Model): name = models.CharField(max_length=100) class Picture(models.Model): name = models.CharField(max_length=100) image = models.FileField(storage=temp_storage, upload_to='test_upload') gallery = models.ForeignKey(Gallery, related_name="pictures") class Language(models.Model): iso = models.CharField(max_length=5, primary_key=True) name = models.CharField(max_length=50) english_name = models.CharField(max_length=50) shortlist = models.BooleanField(default=False) class Meta: ordering = ('iso',) # a base class for Recommender and Recommendation class Title(models.Model): pass class TitleTranslation(models.Model): title = models.ForeignKey(Title) text = models.CharField(max_length=100) class Recommender(Title): pass class Recommendation(Title): recommender = models.ForeignKey(Recommender) class Collector(models.Model): name = models.CharField(max_length=100) class Widget(models.Model): owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class DooHickey(models.Model): code = models.CharField(max_length=10, primary_key=True) owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class Grommet(models.Model): code = models.AutoField(primary_key=True) owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class Whatsit(models.Model): index = models.IntegerField(primary_key=True) owner = models.ForeignKey(Collector) name = models.CharField(max_length=100) class Doodad(models.Model): name = models.CharField(max_length=100) class FancyDoodad(Doodad): owner = models.ForeignKey(Collector) expensive = models.BooleanField(default=True) @python_2_unicode_compatible class Category(models.Model): collector = models.ForeignKey(Collector) order = models.PositiveIntegerField() class Meta: ordering = ('order',) def __str__(self): return '%s:o%s' % (self.id, self.order) class Link(models.Model): posted = models.DateField( default=lambda: datetime.date.today() - datetime.timedelta(days=7) ) url = models.URLField() post = models.ForeignKey("Post") readonly_link_content = models.TextField() class PrePopulatedPost(models.Model): title = models.CharField(max_length=100) published = models.BooleanField(default=False) slug = models.SlugField() class PrePopulatedSubPost(models.Model): post = models.ForeignKey(PrePopulatedPost) subtitle = models.CharField(max_length=100) subslug = models.SlugField() class Post(models.Model): title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)") content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)") readonly_content = models.TextField() posted = models.DateField( default=datetime.date.today, help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)" ) public = models.NullBooleanField() def awesomeness_level(self): return "Very awesome." # Proxy model to test overridden fields attrs on Post model so as not to # interfere with other tests. class FieldOverridePost(Post): class Meta: proxy = True @python_2_unicode_compatible class Gadget(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name @python_2_unicode_compatible class Villain(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class SuperVillain(Villain): pass @python_2_unicode_compatible class FunkyTag(models.Model): "Because we all know there's only one real use case for GFKs." name = models.CharField(max_length=25) content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') def __str__(self): return self.name @python_2_unicode_compatible class Plot(models.Model): name = models.CharField(max_length=100) team_leader = models.ForeignKey(Villain, related_name='lead_plots') contact = models.ForeignKey(Villain, related_name='contact_plots') tags = GenericRelation(FunkyTag) def __str__(self): return self.name @python_2_unicode_compatible class PlotDetails(models.Model): details = models.CharField(max_length=100) plot = models.OneToOneField(Plot, null=True, blank=True) def __str__(self): return self.details class PlotProxy(Plot): class Meta: proxy = True @python_2_unicode_compatible class SecretHideout(models.Model): """ Secret! Not registered with the admin! """ location = models.CharField(max_length=100) villain = models.ForeignKey(Villain) def __str__(self): return self.location @python_2_unicode_compatible class SuperSecretHideout(models.Model): """ Secret! Not registered with the admin! """ location = models.CharField(max_length=100) supervillain = models.ForeignKey(SuperVillain) def __str__(self): return self.location @python_2_unicode_compatible class CyclicOne(models.Model): name = models.CharField(max_length=25) two = models.ForeignKey('CyclicTwo') def __str__(self): return self.name @python_2_unicode_compatible class CyclicTwo(models.Model): name = models.CharField(max_length=25) one = models.ForeignKey(CyclicOne) def __str__(self): return self.name class Topping(models.Model): name = models.CharField(max_length=20) class Pizza(models.Model): name = models.CharField(max_length=20) toppings = models.ManyToManyField('Topping', related_name='pizzas') class Album(models.Model): owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL) title = models.CharField(max_length=30) class Employee(Person): code = models.CharField(max_length=20) class WorkHour(models.Model): datum = models.DateField() employee = models.ForeignKey(Employee) class Question(models.Model): question = models.CharField(max_length=20) @python_2_unicode_compatible class Answer(models.Model): question = models.ForeignKey(Question, on_delete=models.PROTECT) answer = models.CharField(max_length=20) def __str__(self): return self.answer class Reservation(models.Model): start_date = models.DateTimeField() price = models.IntegerField() DRIVER_CHOICES = ( ('bill', 'Bill G'), ('steve', 'Steve J'), ) RESTAURANT_CHOICES = ( ('indian', 'A Taste of India'), ('thai', 'Thai Pography'), ('pizza', 'Pizza Mama'), ) class FoodDelivery(models.Model): reference = models.CharField(max_length=100) driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True) restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True) class Meta: unique_together = (("driver", "restaurant"),) @python_2_unicode_compatible class CoverLetter(models.Model): author = models.CharField(max_length=30) date_written = models.DateField(null=True, blank=True) def __str__(self): return self.author class Paper(models.Model): title = models.CharField(max_length=30) author = models.CharField(max_length=30, blank=True, null=True) class ShortMessage(models.Model): content = models.CharField(max_length=140) timestamp = models.DateTimeField(null=True, blank=True) @python_2_unicode_compatible class Telegram(models.Model): title = models.CharField(max_length=30) date_sent = models.DateField(null=True, blank=True) def __str__(self): return self.title class Story(models.Model): title = models.CharField(max_length=100) content = models.TextField() class OtherStory(models.Model): title = models.CharField(max_length=100) content = models.TextField() class ComplexSortedPerson(models.Model): name = models.CharField(max_length=100) age = models.PositiveIntegerField() is_employee = models.NullBooleanField() class PluggableSearchPerson(models.Model): name = models.CharField(max_length=100) age = models.PositiveIntegerField() class PrePopulatedPostLargeSlug(models.Model): """ Regression test for #15938: a large max_length for the slugfield must not be localized in prepopulated_fields_js.html or it might end up breaking the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000) """ title = models.CharField(max_length=100) published = models.BooleanField(default=False) # `db_index=False` because MySQL cannot index large CharField (#21196). slug = models.SlugField(max_length=1000, db_index=False) class AdminOrderedField(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) class AdminOrderedModelMethod(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) def some_order(self): return self.order some_order.admin_order_field = 'order' class AdminOrderedAdminMethod(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) class AdminOrderedCallable(models.Model): order = models.IntegerField() stuff = models.CharField(max_length=200) @python_2_unicode_compatible class Report(models.Model): title = models.CharField(max_length=100) def __str__(self): return self.title class MainPrepopulated(models.Model): name = models.CharField(max_length=100) pubdate = models.DateField() status = models.CharField( max_length=20, choices=(('option one', 'Option One'), ('option two', 'Option Two'))) slug1 = models.SlugField(blank=True) slug2 = models.SlugField(blank=True) class RelatedPrepopulated(models.Model): parent = models.ForeignKey(MainPrepopulated) name = models.CharField(max_length=75) pubdate = models.DateField() status = models.CharField( max_length=20, choices=(('option one', 'Option One'), ('option two', 'Option Two'))) slug1 = models.SlugField(max_length=50) slug2 = models.SlugField(max_length=60) class UnorderedObject(models.Model): """ Model without any defined `Meta.ordering`. Refs #16819. """ name = models.CharField(max_length=255) bool = models.BooleanField(default=True) class UndeletableObject(models.Model): """ Model whose show_delete in admin change_view has been disabled Refs #10057. """ name = models.CharField(max_length=255) class UnchangeableObject(models.Model): """ Model whose change_view is disabled in admin Refs #20640. """ class UserMessenger(models.Model): """ Dummy class for testing message_user functions on ModelAdmin """ class Simple(models.Model): """ Simple model with nothing on it for use in testing """ class Choice(models.Model): choice = models.IntegerField(blank=True, null=True, choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion'))) class ParentWithDependentChildren(models.Model): """ Issue #20522 Model where the validation of child foreign-key relationships depends on validation of the parent """ some_required_info = models.PositiveIntegerField() family_name = models.CharField(max_length=255, blank=False) class DependentChild(models.Model): """ Issue #20522 Model that depends on validation of the parent class for one of its fields to validate during clean """ parent = models.ForeignKey(ParentWithDependentChildren) family_name = models.CharField(max_length=255) class _Manager(models.Manager): def get_queryset(self): return super(_Manager, self).get_queryset().filter(pk__gt=1) class FilteredManager(models.Model): def __str__(self): return "PK=%d" % self.pk pk_gt_1 = _Manager() objects = models.Manager() class EmptyModelVisible(models.Model): """ See ticket #11277. """ class EmptyModelHidden(models.Model): """ See ticket #11277. """ class EmptyModelMixin(models.Model): """ See ticket #11277. """ class State(models.Model): name = models.CharField(max_length=100) class City(models.Model): state = models.ForeignKey(State) name = models.CharField(max_length=100) def get_absolute_url(self): return '/dummy/%s/' % self.pk class Restaurant(models.Model): city = models.ForeignKey(City) name = models.CharField(max_length=100) def get_absolute_url(self): return '/dummy/%s/' % self.pk class Worker(models.Model): work_at = models.ForeignKey(Restaurant) name = models.CharField(max_length=50) surname = models.CharField(max_length=50) # Models for #23329 class ReferencedByParent(models.Model): name = models.CharField(max_length=20, unique=True) class ParentWithFK(models.Model): fk = models.ForeignKey( ReferencedByParent, to_field='name', related_name='hidden+', ) class ChildOfReferer(ParentWithFK): pass # Models for #23431 class ReferencedByInline(models.Model): name = models.CharField(max_length=20, unique=True) class InlineReference(models.Model): fk = models.ForeignKey( ReferencedByInline, to_field='name', related_name='hidden+', ) class InlineReferer(models.Model): refs = models.ManyToManyField(InlineReference) # Models for #23604 and #23915 class Recipe(models.Model): rname = models.CharField(max_length=20, unique=True) class Ingredient(models.Model): iname = models.CharField(max_length=20, unique=True) recipes = models.ManyToManyField(Recipe, through='RecipeIngredient') class RecipeIngredient(models.Model): ingredient = models.ForeignKey(Ingredient, to_field='iname') recipe = models.ForeignKey(Recipe, to_field='rname') # Model for #23839 class NotReferenced(models.Model): # Don't point any FK at this model. pass # Models for #23934 class ExplicitlyProvidedPK(models.Model): name = models.IntegerField(primary_key=True) class ImplicitlyGeneratedPK(models.Model): name = models.IntegerField(unique=True) # Models for #25622 class ReferencedByGenRel(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') class GenRelReference(models.Model): references = GenericRelation(ReferencedByGenRel) class ParentWithUUIDPK(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) title = models.CharField(max_length=100) def __str__(self): return str(self.id) class RelatedWithUUIDPKModel(models.Model): parent = models.ForeignKey(ParentWithUUIDPK, on_delete=models.CASCADE)
./CrossVul/dataset_final_sorted/CWE-79/py/good_5191_4
crossvul-python_data_bad_1644_8
"""Tornado handlers for security logging.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from tornado import gen, web from ...base.handlers import IPythonHandler, json_errors from . import csp_report_uri class CSPReportHandler(IPythonHandler): '''Accepts a content security policy violation report''' @web.authenticated @json_errors def post(self): '''Log a content security policy violation report''' csp_report = self.get_json_body() self.log.warn("Content security violation: %s", self.request.body.decode('utf8', 'replace')) default_handlers = [ (csp_report_uri, CSPReportHandler) ]
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1644_8
crossvul-python_data_good_2105_2
""" :synopsis: most ajax processors for askbot This module contains most (but not all) processors for Ajax requests. Not so clear if this subdivision was necessary as separation of Ajax and non-ajax views is not always very clean. """ import datetime import logging from bs4 import BeautifulSoup from django.conf import settings as django_settings from django.core import exceptions #from django.core.management import call_command from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.http import Http404 from django.http import HttpResponse from django.http import HttpResponseBadRequest from django.http import HttpResponseRedirect from django.http import HttpResponseForbidden from django.forms import ValidationError, IntegerField, CharField from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.template.loader import get_template from django.views.decorators import csrf from django.utils import simplejson from django.utils.html import escape from django.utils.translation import ugettext as _ from django.utils.translation import string_concat from askbot.utils.slug import slugify from askbot import models from askbot import forms from askbot import conf from askbot import const from askbot import mail from askbot.conf import settings as askbot_settings from askbot.utils import category_tree from askbot.utils import decorators from askbot.utils import url_utils from askbot.utils.forms import get_db_object_or_404 from django.template import RequestContext from askbot.skins.loaders import render_into_skin_as_string from askbot.skins.loaders import render_text_into_skin from askbot.models.tag import get_tags_by_names @csrf.csrf_exempt def manage_inbox(request): """delete, mark as new or seen user's response memo objects, excluding flags request data is memo_list - list of integer id's of the ActivityAuditStatus items and action_type - string - one of delete|mark_new|mark_seen """ response_data = dict() try: if request.is_ajax(): if request.method == 'POST': post_data = simplejson.loads(request.raw_post_data) if request.user.is_authenticated(): activity_types = const.RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY activity_types += ( const.TYPE_ACTIVITY_MENTION, const.TYPE_ACTIVITY_MARK_OFFENSIVE, const.TYPE_ACTIVITY_MODERATED_NEW_POST, const.TYPE_ACTIVITY_MODERATED_POST_EDIT ) user = request.user memo_set = models.ActivityAuditStatus.objects.filter( id__in = post_data['memo_list'], activity__activity_type__in = activity_types, user = user ) action_type = post_data['action_type'] if action_type == 'delete': memo_set.delete() elif action_type == 'mark_new': memo_set.update(status = models.ActivityAuditStatus.STATUS_NEW) elif action_type == 'mark_seen': memo_set.update(status = models.ActivityAuditStatus.STATUS_SEEN) elif action_type == 'remove_flag': for memo in memo_set: activity_type = memo.activity.activity_type if activity_type == const.TYPE_ACTIVITY_MARK_OFFENSIVE: request.user.flag_post( post = memo.activity.content_object, cancel_all = True ) elif activity_type in \ ( const.TYPE_ACTIVITY_MODERATED_NEW_POST, const.TYPE_ACTIVITY_MODERATED_POST_EDIT ): post_revision = memo.activity.content_object request.user.approve_post_revision(post_revision) memo.delete() #elif action_type == 'close': # for memo in memo_set: # if memo.activity.content_object.post_type == "question": # request.user.close_question(question = memo.activity.content_object, reason = 7) # memo.delete() elif action_type == 'delete_post': for memo in memo_set: content_object = memo.activity.content_object if isinstance(content_object, models.PostRevision): post = content_object.post else: post = content_object request.user.delete_post(post) reject_reason = models.PostFlagReason.objects.get( id = post_data['reject_reason_id'] ) template = get_template('email/rejected_post.html') data = { 'post': post.html, 'reject_reason': reject_reason.details.html } body_text = template.render(RequestContext(request, data)) mail.send_mail( subject_line = _('your post was not accepted'), body_text = unicode(body_text), recipient_list = [post.author.email,] ) memo.delete() user.update_response_counts() response_data['success'] = True data = simplejson.dumps(response_data) return HttpResponse(data, mimetype="application/json") else: raise exceptions.PermissionDenied( _('Sorry, but anonymous users cannot access the inbox') ) else: raise exceptions.PermissionDenied('must use POST request') else: #todo: show error page but no-one is likely to get here return HttpResponseRedirect(reverse('index')) except Exception, e: message = unicode(e) if message == '': message = _('Oops, apologies - there was some error') response_data['message'] = message response_data['success'] = False data = simplejson.dumps(response_data) return HttpResponse(data, mimetype="application/json") def process_vote(user = None, vote_direction = None, post = None): """function (non-view) that actually processes user votes - i.e. up- or down- votes in the future this needs to be converted into a real view function for that url and javascript will need to be adjusted also in the future make keys in response data be more meaningful right now they are kind of cryptic - "status", "count" """ if user.is_anonymous(): raise exceptions.PermissionDenied(_( 'Sorry, anonymous users cannot vote' )) user.assert_can_vote_for_post(post = post, direction = vote_direction) vote = user.get_old_vote_for_post(post) response_data = {} if vote != None: user.assert_can_revoke_old_vote(vote) score_delta = vote.cancel() response_data['count'] = post.points+ score_delta response_data['status'] = 1 #this means "cancel" else: #this is a new vote votes_left = user.get_unused_votes_today() if votes_left <= 0: raise exceptions.PermissionDenied( _('Sorry you ran out of votes for today') ) votes_left -= 1 if votes_left <= \ askbot_settings.VOTES_LEFT_WARNING_THRESHOLD: msg = _('You have %(votes_left)s votes left for today') \ % {'votes_left': votes_left } response_data['message'] = msg if vote_direction == 'up': vote = user.upvote(post = post) else: vote = user.downvote(post = post) response_data['count'] = post.points response_data['status'] = 0 #this means "not cancel", normal operation response_data['success'] = 1 return response_data @csrf.csrf_exempt def vote(request): """ todo: this subroutine needs serious refactoring it's too long and is hard to understand vote_type: acceptAnswer : 0, questionUpVote : 1, questionDownVote : 2, favorite : 4, answerUpVote: 5, answerDownVote:6, offensiveQuestion : 7, remove offensiveQuestion flag : 7.5, remove all offensiveQuestion flag : 7.6, offensiveAnswer:8, remove offensiveAnswer flag : 8.5, remove all offensiveAnswer flag : 8.6, removeQuestion: 9, removeAnswer:10 questionSubscribeUpdates:11 questionUnSubscribeUpdates:12 accept answer code: response_data['allowed'] = -1, Accept his own answer 0, no allowed - Anonymous 1, Allowed - by default response_data['success'] = 0, failed 1, Success - by default response_data['status'] = 0, By default 1, Answer has been accepted already(Cancel) vote code: allowed = -3, Don't have enough votes left -2, Don't have enough reputation score -1, Vote his own post 0, no allowed - Anonymous 1, Allowed - by default status = 0, By default 1, Cancel 2, Vote is too old to be canceled offensive code: allowed = -3, Don't have enough flags left -2, Don't have enough reputation score to do this 0, not allowed 1, allowed status = 0, by default 1, can't do it again """ response_data = { "allowed": 1, "success": 1, "status" : 0, "count" : 0, "message" : '' } try: if request.is_ajax() and request.method == 'POST': vote_type = request.POST.get('type') else: raise Exception(_('Sorry, something is not right here...')) id = request.POST.get('postId') if vote_type == '0': if askbot_settings.ACCEPTING_ANSWERS_ENABLED is False: return if request.user.is_authenticated(): answer_id = request.POST.get('postId') answer = get_object_or_404(models.Post, post_type='answer', id = answer_id) # make sure question author is current user if answer.accepted(): request.user.unaccept_best_answer(answer) response_data['status'] = 1 #cancelation else: request.user.accept_best_answer(answer) #################################################################### answer.thread.update_summary_html() # regenerate question/thread summary html #################################################################### else: raise exceptions.PermissionDenied( _('Sorry, but anonymous users cannot accept answers') ) elif vote_type in ('1', '2', '5', '6'):#Q&A up/down votes ############################### # all this can be avoided with # better query parameters vote_direction = 'up' if vote_type in ('2','6'): vote_direction = 'down' if vote_type in ('5', '6'): #todo: fix this weirdness - why postId here #and not with question? post_id = request.POST.get('postId') post = get_object_or_404(models.Post, post_type='answer', id=post_id) else: post = get_object_or_404(models.Post, post_type='question', id=id) # ###################### response_data = process_vote( user = request.user, vote_direction = vote_direction, post = post ) #################################################################### if vote_type in ('1', '2'): # up/down-vote question post.thread.update_summary_html() # regenerate question/thread summary html #################################################################### elif vote_type in ['7', '8']: #flag question or answer if vote_type == '7': post = get_object_or_404(models.Post, post_type='question', id=id) if vote_type == '8': id = request.POST.get('postId') post = get_object_or_404(models.Post, post_type='answer', id=id) request.user.flag_post(post) response_data['count'] = post.offensive_flag_count response_data['success'] = 1 elif vote_type in ['7.5', '8.5']: #flag question or answer if vote_type == '7.5': post = get_object_or_404(models.Post, post_type='question', id=id) if vote_type == '8.5': id = request.POST.get('postId') post = get_object_or_404(models.Post, post_type='answer', id=id) request.user.flag_post(post, cancel = True) response_data['count'] = post.offensive_flag_count response_data['success'] = 1 elif vote_type in ['7.6', '8.6']: #flag question or answer if vote_type == '7.6': post = get_object_or_404(models.Post, id=id) if vote_type == '8.6': id = request.POST.get('postId') post = get_object_or_404(models.Post, id=id) request.user.flag_post(post, cancel_all = True) response_data['count'] = post.offensive_flag_count response_data['success'] = 1 elif vote_type in ['9', '10']: #delete question or answer post = get_object_or_404(models.Post, post_type='question', id=id) if vote_type == '10': id = request.POST.get('postId') post = get_object_or_404(models.Post, post_type='answer', id=id) if post.deleted == True: request.user.restore_post(post = post) else: request.user.delete_post(post = post) elif request.is_ajax() and request.method == 'POST': if not request.user.is_authenticated(): response_data['allowed'] = 0 response_data['success'] = 0 question = get_object_or_404(models.Post, post_type='question', id=id) vote_type = request.POST.get('type') #accept answer if vote_type == '4': fave = request.user.toggle_favorite_question(question) response_data['count'] = models.FavoriteQuestion.objects.filter(thread = question.thread).count() if fave == False: response_data['status'] = 1 elif vote_type == '11':#subscribe q updates user = request.user if user.is_authenticated(): if user not in question.thread.followed_by.all(): user.follow_question(question) if askbot_settings.EMAIL_VALIDATION == True \ and user.email_isvalid == False: response_data['message'] = \ _( 'Your subscription is saved, but email address ' '%(email)s needs to be validated, please see ' '<a href="%(details_url)s">more details here</a>' ) % {'email':user.email,'details_url':reverse('faq') + '#validate'} subscribed = user.subscribe_for_followed_question_alerts() if subscribed: if 'message' in response_data: response_data['message'] += '<br/>' response_data['message'] += _('email update frequency has been set to daily') #response_data['status'] = 1 #responst_data['allowed'] = 1 else: pass #response_data['status'] = 0 #response_data['allowed'] = 0 elif vote_type == '12':#unsubscribe q updates user = request.user if user.is_authenticated(): user.unfollow_question(question) else: response_data['success'] = 0 response_data['message'] = u'Request mode is not supported. Please try again.' if vote_type not in (1, 2, 4, 5, 6, 11, 12): #favorite or subscribe/unsubscribe #upvote or downvote question or answer - those #are handled within user.upvote and user.downvote post = models.Post.objects.get(id = id) post.thread.invalidate_cached_data() data = simplejson.dumps(response_data) except Exception, e: response_data['message'] = unicode(e) response_data['success'] = 0 data = simplejson.dumps(response_data) return HttpResponse(data, mimetype="application/json") #internally grouped views - used by the tagging system @csrf.csrf_exempt @decorators.post_only @decorators.ajax_login_required def mark_tag(request, **kwargs):#tagging system action = kwargs['action'] post_data = simplejson.loads(request.raw_post_data) raw_tagnames = post_data['tagnames'] reason = post_data['reason'] assert reason in ('good', 'bad', 'subscribed') #separate plain tag names and wildcard tags tagnames, wildcards = forms.clean_marked_tagnames(raw_tagnames) if request.user.is_administrator() and 'user' in post_data: user = get_object_or_404(models.User, pk=post_data['user']) else: user = request.user cleaned_tagnames, cleaned_wildcards = user.mark_tags( tagnames, wildcards, reason = reason, action = action ) #lastly - calculate tag usage counts tag_usage_counts = dict() for name in tagnames: if name in cleaned_tagnames: tag_usage_counts[name] = 1 else: tag_usage_counts[name] = 0 for name in wildcards: if name in cleaned_wildcards: tag_usage_counts[name] = models.Tag.objects.filter( name__startswith = name[:-1] ).count() else: tag_usage_counts[name] = 0 return HttpResponse(simplejson.dumps(tag_usage_counts), mimetype="application/json") #@decorators.ajax_only @decorators.get_only def get_tags_by_wildcard(request): """returns an json encoded array of tag names in the response to a wildcard tag name """ wildcard = request.GET.get('wildcard', None) if wildcard is None: return HttpResponseForbidden() matching_tags = models.Tag.objects.get_by_wildcards( [wildcard,] ) count = matching_tags.count() names = matching_tags.values_list('name', flat = True)[:20] re_data = simplejson.dumps({'tag_count': count, 'tag_names': list(names)}) return HttpResponse(re_data, mimetype = 'application/json') @decorators.get_only def get_thread_shared_users(request): """returns snippet of html with users""" thread_id = request.GET['thread_id'] thread_id = IntegerField().clean(thread_id) thread = models.Thread.objects.get(id=thread_id) users = thread.get_users_shared_with() data = { 'users': users, } html = render_into_skin_as_string('widgets/user_list.html', data, request) re_data = simplejson.dumps({ 'html': html, 'users_count': users.count(), 'success': True }) return HttpResponse(re_data, mimetype='application/json') @decorators.get_only def get_thread_shared_groups(request): """returns snippet of html with groups""" thread_id = request.GET['thread_id'] thread_id = IntegerField().clean(thread_id) thread = models.Thread.objects.get(id=thread_id) groups = thread.get_groups_shared_with() data = {'groups': groups} html = render_into_skin_as_string('widgets/groups_list.html', data, request) re_data = simplejson.dumps({ 'html': html, 'groups_count': groups.count(), 'success': True }) return HttpResponse(re_data, mimetype='application/json') @decorators.ajax_only def get_html_template(request): """returns rendered template""" template_name = request.REQUEST.get('template_name', None) allowed_templates = ( 'widgets/tag_category_selector.html', ) #have allow simple context for the templates if template_name not in allowed_templates: raise Http404 return { 'html': get_template(template_name).render() } @decorators.get_only def get_tag_list(request): """returns tags to use in the autocomplete function """ tags = models.Tag.objects.filter( deleted = False, status = models.Tag.STATUS_ACCEPTED ) tag_names = tags.values_list( 'name', flat = True ) output = '\n'.join(map(escape, tag_names)) return HttpResponse(output, mimetype = 'text/plain') @decorators.get_only def load_object_description(request): """returns text of the object description in text""" obj = get_db_object_or_404(request.GET)#askbot forms utility text = getattr(obj.description, 'text', '').strip() return HttpResponse(text, mimetype = 'text/plain') @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def save_object_description(request): """if object description does not exist, creates a new record, otherwise edits an existing one""" obj = get_db_object_or_404(request.POST) text = request.POST['text'] if obj.description: request.user.edit_post(obj.description, body_text=text) else: request.user.post_object_description(obj, body_text=text) return {'html': obj.description.html} @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def rename_tag(request): if request.user.is_anonymous() \ or not request.user.is_administrator_or_moderator(): raise exceptions.PermissionDenied() post_data = simplejson.loads(request.raw_post_data) to_name = forms.clean_tag(post_data['to_name']) from_name = forms.clean_tag(post_data['from_name']) path = post_data['path'] #kwargs = {'from': old_name, 'to': new_name} #call_command('rename_tags', **kwargs) tree = category_tree.get_data() category_tree.rename_category( tree, from_name = from_name, to_name = to_name, path = path ) category_tree.save_data(tree) @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def delete_tag(request): """todo: actually delete tags now it is only deletion of category from the tree""" if request.user.is_anonymous() \ or not request.user.is_administrator_or_moderator(): raise exceptions.PermissionDenied() try: post_data = simplejson.loads(request.raw_post_data) tag_name = post_data['tag_name'] path = post_data['path'] tree = category_tree.get_data() category_tree.delete_category(tree, tag_name, path) category_tree.save_data(tree) except Exception: if 'tag_name' in locals(): logging.critical('could not delete tag %s' % tag_name) else: logging.critical('failed to parse post data %s' % request.raw_post_data) raise exceptions.PermissionDenied(_('Sorry, could not delete tag')) return {'tree_data': tree} @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def add_tag_category(request): """adds a category at the tip of a given path expects the following keys in the ``request.POST`` * path - array starting with zero giving path to the category page where to add the category * new_category_name - string that must satisfy the same requiremets as a tag return json with the category tree data todo: switch to json stored in the live settings now we have indented input """ if request.user.is_anonymous() \ or not request.user.is_administrator_or_moderator(): raise exceptions.PermissionDenied() post_data = simplejson.loads(request.raw_post_data) category_name = forms.clean_tag(post_data['new_category_name']) path = post_data['path'] tree = category_tree.get_data() if category_tree.path_is_valid(tree, path) == False: raise ValueError('category insertion path is invalid') new_path = category_tree.add_category(tree, category_name, path) category_tree.save_data(tree) return { 'tree_data': tree, 'new_path': new_path } @decorators.get_only def get_groups_list(request): """returns names of group tags for the autocomplete function""" global_group = models.Group.objects.get_global_group() groups = models.Group.objects.exclude_personal() group_names = groups.exclude( name=global_group.name ).values_list( 'name', flat = True ) output = '\n'.join(group_names) return HttpResponse(output, mimetype = 'text/plain') @csrf.csrf_protect def subscribe_for_tags(request): """process subscription of users by tags""" #todo - use special separator to split tags tag_names = request.REQUEST.get('tags','').strip().split() pure_tag_names, wildcards = forms.clean_marked_tagnames(tag_names) if request.user.is_authenticated(): if request.method == 'POST': if 'ok' in request.POST: request.user.mark_tags( pure_tag_names, wildcards, reason = 'good', action = 'add' ) request.user.message_set.create( message = _('Your tag subscription was saved, thanks!') ) else: message = _( 'Tag subscription was canceled (<a href="%(url)s">undo</a>).' ) % {'url': escape(request.path) + '?tags=' + request.REQUEST['tags']} request.user.message_set.create(message = message) return HttpResponseRedirect(reverse('index')) else: data = {'tags': tag_names} return render(request, 'subscribe_for_tags.html', data) else: all_tag_names = pure_tag_names + wildcards message = _('Please sign in to subscribe for: %(tags)s') \ % {'tags': ', '.join(all_tag_names)} request.user.message_set.create(message = message) request.session['subscribe_for_tags'] = (pure_tag_names, wildcards) return HttpResponseRedirect(url_utils.get_login_url()) @decorators.admins_only def list_bulk_tag_subscription(request): if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED is False: raise Http404 object_list = models.BulkTagSubscription.objects.all() data = {'object_list': object_list} return render(request, 'tags/list_bulk_tag_subscription.html', data) @decorators.admins_only def create_bulk_tag_subscription(request): if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED is False: raise Http404 data = {'action': _('Create')} if request.method == "POST": form = forms.BulkTagSubscriptionForm(request.POST) if form.is_valid(): tag_names = form.cleaned_data['tags'].split(' ') user_list = form.cleaned_data.get('users') group_list = form.cleaned_data.get('groups') bulk_subscription = models.BulkTagSubscription.objects.create( tag_names=tag_names, tag_author=request.user, user_list=user_list, group_list=group_list ) return HttpResponseRedirect(reverse('list_bulk_tag_subscription')) else: data['form'] = form else: data['form'] = forms.BulkTagSubscriptionForm() return render(request, 'tags/form_bulk_tag_subscription.html', data) @decorators.admins_only def edit_bulk_tag_subscription(request, pk): if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED is False: raise Http404 bulk_subscription = get_object_or_404(models.BulkTagSubscription, pk=pk) data = {'action': _('Edit')} if request.method == "POST": form = forms.BulkTagSubscriptionForm(request.POST) if form.is_valid(): bulk_subscription.tags.clear() bulk_subscription.users.clear() bulk_subscription.groups.clear() if 'groups' in form.cleaned_data: group_ids = [user.id for user in form.cleaned_data['groups']] bulk_subscription.groups.add(*group_ids) tags, new_tag_names = get_tags_by_names(form.cleaned_data['tags'].split(' ')) tag_id_list = [tag.id for tag in tags] for new_tag_name in new_tag_names: new_tag = models.Tag.objects.create(name=new_tag_name, created_by=request.user) tag_id_list.append(new_tag.id) bulk_subscription.tags.add(*tag_id_list) user_ids = [] for user in form.cleaned_data['users']: user_ids.append(user) user.mark_tags(bulk_subscription.tag_list(), reason='subscribed', action='add') bulk_subscription.users.add(*user_ids) return HttpResponseRedirect(reverse('list_bulk_tag_subscription')) else: form_initial = { 'users': bulk_subscription.users.all(), 'groups': bulk_subscription.groups.all(), 'tags': ' '.join([tag.name for tag in bulk_subscription.tags.all()]), } data.update({ 'bulk_subscription': bulk_subscription, 'form': forms.BulkTagSubscriptionForm(initial=form_initial), }) return render(request, 'tags/form_bulk_tag_subscription.html', data) @decorators.admins_only @decorators.post_only def delete_bulk_tag_subscription(request): if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED is False: raise Http404 pk = request.POST.get('pk') if pk: bulk_subscription = get_object_or_404(models.BulkTagSubscription, pk=pk) bulk_subscription.delete() return HttpResponseRedirect(reverse('list_bulk_tag_subscription')) else: return HttpResponseRedirect(reverse('list_bulk_tag_subscription')) @decorators.get_only def api_get_questions(request): """json api for retrieving questions by title match""" query = request.GET.get('query_text', '').strip() tag_name = request.GET.get('tag_name', None) if askbot_settings.GROUPS_ENABLED: threads = models.Thread.objects.get_visible(user=request.user) else: threads = models.Thread.objects.all() if tag_name: threads = threads.filter(tags__name=tag_name) if query: threads = threads.get_for_title_query(query) #todo: filter out deleted threads, for now there is no way threads = threads.distinct()[:30] thread_list = list() for thread in threads:#todo: this is a temp hack until thread model is fixed try: thread_list.append({ 'title': escape(thread.title), 'url': thread.get_absolute_url(), 'answer_count': thread.get_answer_count(request.user) }) except: continue json_data = simplejson.dumps(thread_list) return HttpResponse(json_data, mimetype = "application/json") @csrf.csrf_exempt @decorators.post_only @decorators.ajax_login_required def set_tag_filter_strategy(request): """saves data in the ``User.[email/display]_tag_filter_strategy`` for the current user """ filter_type = request.POST['filter_type'] filter_value = int(request.POST['filter_value']) assert(filter_type in ('display', 'email')) if filter_type == 'display': allowed_values_dict = dict(conf.get_tag_display_filter_strategy_choices()) assert(filter_value in allowed_values_dict) request.user.display_tag_filter_strategy = filter_value else: allowed_values_dict = dict(conf.get_tag_email_filter_strategy_choices()) assert(filter_value in allowed_values_dict) request.user.email_tag_filter_strategy = filter_value request.user.save() return HttpResponse('', mimetype = "application/json") @login_required @csrf.csrf_protect def close(request, id):#close question """view to initiate and process question close """ question = get_object_or_404(models.Post, post_type='question', id=id) try: if request.method == 'POST': form = forms.CloseForm(request.POST) if form.is_valid(): reason = form.cleaned_data['reason'] request.user.close_question( question = question, reason = reason ) return HttpResponseRedirect(question.get_absolute_url()) else: request.user.assert_can_close_question(question) form = forms.CloseForm() data = { 'question': question, 'form': form, } return render(request, 'close.html', data) except exceptions.PermissionDenied, e: request.user.message_set.create(message = unicode(e)) return HttpResponseRedirect(question.get_absolute_url()) @login_required @csrf.csrf_protect def reopen(request, id):#re-open question """view to initiate and process question close this is not an ajax view """ question = get_object_or_404(models.Post, post_type='question', id=id) # open question try: if request.method == 'POST' : request.user.reopen_question(question) return HttpResponseRedirect(question.get_absolute_url()) else: request.user.assert_can_reopen_question(question) closed_by_profile_url = question.thread.closed_by.get_profile_url() closed_by_username = question.thread.closed_by.username data = { 'question' : question, 'closed_by_profile_url': closed_by_profile_url, 'closed_by_username': closed_by_username, } return render(request, 'reopen.html', data) except exceptions.PermissionDenied, e: request.user.message_set.create(message = unicode(e)) return HttpResponseRedirect(question.get_absolute_url()) @csrf.csrf_exempt @decorators.ajax_only def swap_question_with_answer(request): """receives two json parameters - answer id and new question title the view is made to be used only by the site administrator or moderators """ if request.user.is_authenticated(): if request.user.is_administrator() or request.user.is_moderator(): answer = models.Post.objects.get_answers( request.user ).get( id=request.POST['answer_id'] ) new_question = answer.swap_with_question(new_title = request.POST['new_title']) return {'question_url': new_question.get_absolute_url() } raise Http404 @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def upvote_comment(request): if request.user.is_anonymous(): raise exceptions.PermissionDenied(_('Please sign in to vote')) form = forms.VoteForm(request.POST) if form.is_valid(): comment_id = form.cleaned_data['post_id'] cancel_vote = form.cleaned_data['cancel_vote'] comment = get_object_or_404(models.Post, post_type='comment', id=comment_id) process_vote( post = comment, vote_direction = 'up', user = request.user ) else: raise ValueError #FIXME: rename js return {'score': comment.points} @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def delete_post(request): if request.user.is_anonymous(): raise exceptions.PermissionDenied(_('Please sign in to delete/restore posts')) form = forms.VoteForm(request.POST) if form.is_valid(): post_id = form.cleaned_data['post_id'] post = get_object_or_404( models.Post, post_type__in = ('question', 'answer'), id = post_id ) if form.cleaned_data['cancel_vote']: request.user.restore_post(post) else: request.user.delete_post(post) else: raise ValueError return {'is_deleted': post.deleted} #askbot-user communication system @csrf.csrf_exempt def read_message(request):#marks message a read if request.method == "POST": if request.POST['formdata'] == 'required': request.session['message_silent'] = 1 if request.user.is_authenticated(): request.user.delete_messages() return HttpResponse('') @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def edit_group_membership(request): #todo: this call may need to go. #it used to be the one creating groups #from the user profile page #we have a separate method form = forms.EditGroupMembershipForm(request.POST) if form.is_valid(): group_name = form.cleaned_data['group_name'] user_id = form.cleaned_data['user_id'] try: user = models.User.objects.get(id=user_id) except models.User.DoesNotExist: raise exceptions.PermissionDenied( 'user with id %d not found' % user_id ) action = form.cleaned_data['action'] #warning: possible race condition if action == 'add': group_params = {'name': group_name, 'user': user} group = models.Group.objects.get_or_create(**group_params) request.user.edit_group_membership(user, group, 'add') template = get_template('widgets/group_snippet.html') return { 'name': group.name, 'description': getattr(group.tag_wiki, 'text', ''), 'html': template.render({'group': group}) } elif action == 'remove': try: group = models.Group.objects.get(group_name = group_name) request.user.edit_group_membership(user, group, 'remove') except models.Group.DoesNotExist: raise exceptions.PermissionDenied() else: raise exceptions.PermissionDenied() else: raise exceptions.PermissionDenied() @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def save_group_logo_url(request): """saves urls for the group logo""" form = forms.GroupLogoURLForm(request.POST) if form.is_valid(): group_id = form.cleaned_data['group_id'] image_url = form.cleaned_data['image_url'] group = models.Group.objects.get(id = group_id) group.logo_url = image_url group.save() else: raise ValueError('invalid data found when saving group logo') @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def add_group(request): group_name = request.POST.get('group') if group_name: group = models.Group.objects.get_or_create( name=group_name, openness=models.Group.OPEN, user=request.user, ) url = reverse('users_by_group', kwargs={'group_id': group.id, 'group_slug': slugify(group_name)}) response_dict = dict(group_name = group_name, url = url ) return response_dict @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def delete_group_logo(request): group_id = IntegerField().clean(int(request.POST['group_id'])) group = models.Group.objects.get(id = group_id) group.logo_url = None group.save() @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def delete_post_reject_reason(request): reason_id = IntegerField().clean(int(request.POST['reason_id'])) reason = models.PostFlagReason.objects.get(id = reason_id) reason.delete() @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def toggle_group_profile_property(request): #todo: this might be changed to more general "toggle object property" group_id = IntegerField().clean(int(request.POST['group_id'])) property_name = CharField().clean(request.POST['property_name']) assert property_name in ( 'moderate_email', 'moderate_answers_to_enquirers', 'is_vip' ) group = models.Group.objects.get(id = group_id) new_value = not getattr(group, property_name) setattr(group, property_name, new_value) group.save() return {'is_enabled': new_value} @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def set_group_openness(request): group_id = IntegerField().clean(int(request.POST['group_id'])) value = IntegerField().clean(int(request.POST['value'])) group = models.Group.objects.get(id=group_id) group.openness = value group.save() @csrf.csrf_exempt @decorators.ajax_only @decorators.admins_only def edit_object_property_text(request): model_name = CharField().clean(request.REQUEST['model_name']) object_id = IntegerField().clean(request.REQUEST['object_id']) property_name = CharField().clean(request.REQUEST['property_name']) accessible_fields = ( ('Group', 'preapproved_emails'), ('Group', 'preapproved_email_domains') ) if (model_name, property_name) not in accessible_fields: raise exceptions.PermissionDenied() obj = models.get_model(model_name).objects.get(id=object_id) if request.method == 'POST': text = CharField().clean(request.POST['text']) setattr(obj, property_name, text) obj.save() elif request.method == 'GET': return {'text': getattr(obj, property_name)} else: raise exceptions.PermissionDenied() @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def join_or_leave_group(request): """called when user wants to join/leave ask to join/cancel join request, depending on the groups acceptance level for the given user returns resulting "membership_level" """ if request.user.is_anonymous(): raise exceptions.PermissionDenied() Group = models.Group Membership = models.GroupMembership group_id = IntegerField().clean(request.POST['group_id']) group = Group.objects.get(id=group_id) membership = request.user.get_group_membership(group) if membership is None: membership = request.user.join_group(group) new_level = membership.get_level_display() else: membership.delete() new_level = Membership.get_level_value_display(Membership.NONE) return {'membership_level': new_level} @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def save_post_reject_reason(request): """saves post reject reason and returns the reason id if reason_id is not given in the input - a new reason is created, otherwise a reason with the given id is edited and saved """ form = forms.EditRejectReasonForm(request.POST) if form.is_valid(): title = form.cleaned_data['title'] details = form.cleaned_data['details'] if form.cleaned_data['reason_id'] is None: reason = request.user.create_post_reject_reason( title = title, details = details ) else: reason_id = form.cleaned_data['reason_id'] reason = models.PostFlagReason.objects.get(id = reason_id) request.user.edit_post_reject_reason( reason, title = title, details = details ) return { 'reason_id': reason.id, 'title': title, 'details': details } else: raise Exception(forms.format_form_errors(form)) @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only @decorators.admins_only def moderate_suggested_tag(request): """accepts or rejects a suggested tag if thread id is given, then tag is applied to or removed from only one thread, otherwise the decision applies to all threads """ form = forms.ModerateTagForm(request.POST) if form.is_valid(): tag_id = form.cleaned_data['tag_id'] thread_id = form.cleaned_data.get('thread_id', None) try: tag = models.Tag.objects.get(id=tag_id)#can tag not exist? except models.Tag.DoesNotExist: return if thread_id: threads = models.Thread.objects.filter(id=thread_id) else: threads = tag.threads.none() if form.cleaned_data['action'] == 'accept': #todo: here we lose ability to come back #to the tag moderation and approve tag to #other threads later for the case where tag.used_count > 1 tag.status = models.Tag.STATUS_ACCEPTED tag.save() for thread in threads: thread.add_tag( tag_name = tag.name, user = tag.created_by, timestamp = datetime.datetime.now(), silent = True ) else: if tag.threads.count() > len(threads): for thread in threads: thread.tags.remove(tag) tag.used_count = tag.threads.count() tag.save() elif tag.status == models.Tag.STATUS_SUGGESTED: tag.delete() else: raise Exception(forms.format_form_errors(form)) @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def save_draft_question(request): """saves draft questions""" #todo: allow drafts for anonymous users if request.user.is_anonymous(): return form = forms.DraftQuestionForm(request.POST) if form.is_valid(): title = form.cleaned_data.get('title', '') text = form.cleaned_data.get('text', '') tagnames = form.cleaned_data.get('tagnames', '') if title or text or tagnames: try: draft = models.DraftQuestion.objects.get(author=request.user) except models.DraftQuestion.DoesNotExist: draft = models.DraftQuestion() draft.title = title draft.text = text draft.tagnames = tagnames draft.author = request.user draft.save() @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def save_draft_answer(request): """saves draft answers""" #todo: allow drafts for anonymous users if request.user.is_anonymous(): return form = forms.DraftAnswerForm(request.POST) if form.is_valid(): thread_id = form.cleaned_data['thread_id'] try: thread = models.Thread.objects.get(id=thread_id) except models.Thread.DoesNotExist: return try: draft = models.DraftAnswer.objects.get( thread=thread, author=request.user ) except models.DraftAnswer.DoesNotExist: draft = models.DraftAnswer() draft.author = request.user draft.thread = thread draft.text = form.cleaned_data.get('text', '') draft.save() @decorators.get_only def get_users_info(request): """retuns list of user names and email addresses of "fake" users - so that admins can post on their behalf""" if request.user.is_anonymous(): return HttpResponseForbidden() query = request.GET['q'] limit = IntegerField().clean(request.GET['limit']) users = models.User.objects user_info_list = users.filter(username__istartswith=query) if request.user.is_administrator_or_moderator(): user_info_list = user_info_list.values_list('username', 'email') else: user_info_list = user_info_list.values_list('username') result_list = ['|'.join(info) for info in user_info_list[:limit]] return HttpResponse('\n'.join(result_list), mimetype = 'text/plain') @csrf.csrf_protect def share_question_with_group(request): form = forms.ShareQuestionForm(request.POST) try: if form.is_valid(): thread_id = form.cleaned_data['thread_id'] group_name = form.cleaned_data['recipient_name'] thread = models.Thread.objects.get(id=thread_id) question_post = thread._question_post() #get notif set before sets1 = question_post.get_notify_sets( mentioned_users=list(), exclude_list=[request.user,] ) #share the post if group_name == askbot_settings.GLOBAL_GROUP_NAME: thread.make_public(recursive=True) else: group = models.Group.objects.get(name=group_name) thread.add_to_groups((group,), recursive=True) #get notif sets after sets2 = question_post.get_notify_sets( mentioned_users=list(), exclude_list=[request.user,] ) notify_sets = { 'for_mentions': sets2['for_mentions'] - sets1['for_mentions'], 'for_email': sets2['for_email'] - sets1['for_email'], 'for_inbox': sets2['for_inbox'] - sets1['for_inbox'] } question_post.issue_update_notifications( updated_by=request.user, notify_sets=notify_sets, activity_type=const.TYPE_ACTIVITY_POST_SHARED, timestamp=datetime.datetime.now() ) return HttpResponseRedirect(thread.get_absolute_url()) except Exception: error_message = _('Sorry, looks like sharing request was invalid') request.user.message_set.create(message=error_message) return HttpResponseRedirect(thread.get_absolute_url()) @csrf.csrf_protect def share_question_with_user(request): form = forms.ShareQuestionForm(request.POST) try: if form.is_valid(): thread_id = form.cleaned_data['thread_id'] username = form.cleaned_data['recipient_name'] thread = models.Thread.objects.get(id=thread_id) user = models.User.objects.get(username=username) group = user.get_personal_group() thread.add_to_groups([group], recursive=True) #notify the person #todo: see if user could already see the post - b/f the sharing notify_sets = { 'for_inbox': set([user]), 'for_mentions': set([user]), 'for_email': set([user]) } thread._question_post().issue_update_notifications( updated_by=request.user, notify_sets=notify_sets, activity_type=const.TYPE_ACTIVITY_POST_SHARED, timestamp=datetime.datetime.now() ) return HttpResponseRedirect(thread.get_absolute_url()) except Exception: error_message = _('Sorry, looks like sharing request was invalid') request.user.message_set.create(message=error_message) return HttpResponseRedirect(thread.get_absolute_url()) @csrf.csrf_protect def moderate_group_join_request(request): """moderator of the group can accept or reject a new user""" request_id = IntegerField().clean(request.POST['request_id']) action = request.POST['action'] assert(action in ('approve', 'deny')) activity = get_object_or_404(models.Activity, pk=request_id) group = activity.content_object applicant = activity.user if group.has_moderator(request.user): group_membership = models.GroupMembership.objects.get( user=applicant, group=group ) if action == 'approve': group_membership.level = models.GroupMembership.FULL group_membership.save() msg_data = {'user': applicant.username, 'group': group.name} message = _('%(user)s, welcome to group %(group)s!') % msg_data applicant.message_set.create(message=message) else: group_membership.delete() activity.delete() url = request.user.get_absolute_url() + '?sort=inbox&section=join_requests' return HttpResponseRedirect(url) else: raise Http404 @decorators.get_only def get_editor(request): """returns bits of html for the tinymce editor in a dictionary with keys: * html - the editor element * scripts - an array of script tags * success - True """ if 'config' not in request.GET: return HttpResponseForbidden() config = simplejson.loads(request.GET['config']) element_id = request.GET.get('id', 'editor') form = forms.EditorForm( attrs={'id': element_id}, editor_attrs=config, user=request.user ) editor_html = render_text_into_skin( '{{ form.media }} {{ form.editor }}', {'form': form}, request ) #parse out javascript and dom, and return them separately #we need that, because js needs to be added in a special way html_soup = BeautifulSoup(editor_html) parsed_scripts = list() for script in html_soup.find_all('script'): parsed_scripts.append({ 'contents': script.string, 'src': script.get('src', None) }) data = { 'html': str(html_soup.textarea), 'scripts': parsed_scripts, 'success': True } return HttpResponse(simplejson.dumps(data), mimetype='application/json') @csrf.csrf_exempt @decorators.ajax_only @decorators.post_only def publish_answer(request): """will publish or unpublish answer, if current thread is moderated """ denied_msg = _('Sorry, only thread moderators can use this function') if request.user.is_authenticated(): if request.user.is_administrator_or_moderator() is False: raise exceptions.PermissionDenied(denied_msg) #todo: assert permission answer_id = IntegerField().clean(request.POST['answer_id']) answer = models.Post.objects.get(id=answer_id, post_type='answer') if answer.thread.has_moderator(request.user) is False: raise exceptions.PermissionDenied(denied_msg) enquirer = answer.thread._question_post().author enquirer_group = enquirer.get_personal_group() if answer.has_group(enquirer_group): message = _('The answer is now unpublished') answer.remove_from_groups([enquirer_group]) else: answer.add_to_groups([enquirer_group]) message = _('The answer is now published') #todo: notify enquirer by email about the post request.user.message_set.create(message=message) return {'redirect_url': answer.get_absolute_url()}
./CrossVul/dataset_final_sorted/CWE-79/py/good_2105_2
crossvul-python_data_bad_1091_1
# -*- coding: utf-8 -*- from django.http import HttpRequest, HttpResponse, HttpResponseForbidden, \ HttpResponseNotFound from django.shortcuts import redirect from django.utils.translation import ugettext as _ from zerver.lib.response import json_success, json_error from zerver.lib.upload import upload_message_image_from_request, get_local_file_path, \ get_signed_upload_url, check_upload_within_quota from zerver.models import UserProfile, validate_attachment_request from django.conf import settings from sendfile import sendfile from mimetypes import guess_type def serve_s3(request: HttpRequest, url_path: str) -> HttpResponse: uri = get_signed_upload_url(url_path) return redirect(uri) def serve_local(request: HttpRequest, path_id: str) -> HttpResponse: local_path = get_local_file_path(path_id) if local_path is None: return HttpResponseNotFound('<p>File not found</p>') # Here we determine whether a browser should treat the file like # an attachment (and thus clicking a link to it should download) # or like a link (and thus clicking a link to it should display it # in a browser tab). This is controlled by the # Content-Disposition header; `django-sendfile` sends the # attachment-style version of that header if and only if the # attachment argument is passed to it. For attachments, # django-sendfile sets the response['Content-disposition'] like # this: `attachment; filename="b'zulip.txt'"; filename*=UTF-8''zulip.txt`. # # The "filename" field (used to name the file when downloaded) is # unreliable because it doesn't have a well-defined encoding; the # newer filename* field takes precedence, since it uses a # consistent format (urlquoted). For more details on filename* # and filename, see the below docs: # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition attachment = True file_type = guess_type(local_path)[0] if file_type is not None and (file_type.startswith("image/") or file_type == "application/pdf"): attachment = False return sendfile(request, local_path, attachment=attachment) def serve_file_backend(request: HttpRequest, user_profile: UserProfile, realm_id_str: str, filename: str) -> HttpResponse: path_id = "%s/%s" % (realm_id_str, filename) is_authorized = validate_attachment_request(user_profile, path_id) if is_authorized is None: return HttpResponseNotFound(_("<p>File not found.</p>")) if not is_authorized: return HttpResponseForbidden(_("<p>You are not authorized to view this file.</p>")) if settings.LOCAL_UPLOADS_DIR is not None: return serve_local(request, path_id) return serve_s3(request, path_id) def upload_file_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: if len(request.FILES) == 0: return json_error(_("You must specify a file to upload")) if len(request.FILES) != 1: return json_error(_("You may only upload one file at a time")) user_file = list(request.FILES.values())[0] file_size = user_file._get_size() if settings.MAX_FILE_UPLOAD_SIZE * 1024 * 1024 < file_size: return json_error(_("Uploaded file is larger than the allowed limit of %s MB") % ( settings.MAX_FILE_UPLOAD_SIZE)) check_upload_within_quota(user_profile.realm, file_size) if not isinstance(user_file.name, str): # It seems that in Python 2 unicode strings containing bytes are # rendered differently than ascii strings containing same bytes. # # Example: # >>> print('\xd3\x92') # Ӓ # >>> print(u'\xd3\x92') # Ó # # This is the cause of the problem as user_file.name variable # is received as a unicode which is converted into unicode # strings containing bytes and is rendered incorrectly. # # Example: # >>> import urllib.parse # >>> name = u'%D0%97%D0%B4%D1%80%D0%B0%D0%B2%D0%B5%D0%B8%CC%86%D1%82%D0%B5.txt' # >>> print(urllib.parse.unquote(name)) # Здравейте # This is wrong # # >>> name = '%D0%97%D0%B4%D1%80%D0%B0%D0%B2%D0%B5%D0%B8%CC%86%D1%82%D0%B5.txt' # >>> print(urllib.parse.unquote(name)) # Здравейте.txt # This is correct user_file.name = user_file.name.encode('ascii') uri = upload_message_image_from_request(request, user_file, user_profile) return json_success({'uri': uri})
./CrossVul/dataset_final_sorted/CWE-79/py/bad_1091_1
crossvul-python_data_bad_5790_1
import calendar import datetime import re import sys import urllib import urlparse from email.utils import formatdate from django.utils.datastructures import MultiValueDict from django.utils.encoding import smart_str, force_unicode from django.utils.functional import allow_lazy ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"') MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) def urlquote(url, safe='/'): """ A version of Python's urllib.quote() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_unicode(urllib.quote(smart_str(url), smart_str(safe))) urlquote = allow_lazy(urlquote, unicode) def urlquote_plus(url, safe=''): """ A version of Python's urllib.quote_plus() function that can operate on unicode strings. The url is first UTF-8 encoded before quoting. The returned string can safely be used as part of an argument to a subsequent iri_to_uri() call without double-quoting occurring. """ return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe))) urlquote_plus = allow_lazy(urlquote_plus, unicode) def urlunquote(quoted_url): """ A wrapper for Python's urllib.unquote() function that can operate on the result of django.utils.http.urlquote(). """ return force_unicode(urllib.unquote(smart_str(quoted_url))) urlunquote = allow_lazy(urlunquote, unicode) def urlunquote_plus(quoted_url): """ A wrapper for Python's urllib.unquote_plus() function that can operate on the result of django.utils.http.urlquote_plus(). """ return force_unicode(urllib.unquote_plus(smart_str(quoted_url))) urlunquote_plus = allow_lazy(urlunquote_plus, unicode) def urlencode(query, doseq=0): """ A version of Python's urllib.urlencode() function that can operate on unicode strings. The parameters are first case to UTF-8 encoded strings and then encoded as per normal. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() return urllib.urlencode( [(smart_str(k), isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v)) for k, v in query], doseq) def cookie_date(epoch_seconds=None): """ Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25]) def http_date(epoch_seconds=None): """ Formats the time to match the RFC1123 date format as specified by HTTP RFC2616 section 3.3.1. Accepts a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, defaults to the current time. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ rfcdate = formatdate(epoch_seconds) return '%s GMT' % rfcdate[:25] def parse_http_date(date): """ Parses a date format as specified by HTTP RFC2616 section 3.3.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Returns an floating point number expressed in seconds since the epoch, in UTC. """ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately # RFC2616 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: year = int(m.group('year')) if year < 100: if year < 70: year += 2000 else: year += 1900 month = MONTHS.index(m.group('mon').lower()) + 1 day = int(m.group('day')) hour = int(m.group('hour')) min = int(m.group('min')) sec = int(m.group('sec')) result = datetime.datetime(year, month, day, hour, min, sec) return calendar.timegm(result.utctimetuple()) except Exception: raise ValueError("%r is not a valid date" % date) def parse_http_date_safe(date): """ Same as parse_http_date, but returns None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Converts a base 36 string to an ``int``. Raises ``ValueError` if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is long than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") value = int(s, 36) # ... then do a final check that the value will fit into an int. if value > sys.maxint: raise ValueError("Base36 input too large") return value def int_to_base36(i): """ Converts an integer to a base36 string """ digits = "0123456789abcdefghijklmnopqrstuvwxyz" factor = 0 if not 0 <= i <= sys.maxint: raise ValueError("Base36 conversion input too large or incorrect type.") # Find starting factor while True: factor += 1 if i < 36 ** factor: factor -= 1 break base36 = [] # Construct base36 representation while factor >= 0: j = 36 ** factor base36.append(digits[i // j]) i = i % j factor -= 1 return ''.join(base36) def parse_etags(etag_str): """ Parses a string with one or several etags passed in If-None-Match and If-Match headers by the rules in RFC 2616. Returns a list of etags without surrounding double quotes (") and unescaped from \<CHAR>. """ etags = ETAG_MATCH.findall(etag_str) if not etags: # etag_str has wrong format, treat it as an opaque string then return [etag_str] etags = [e.decode('string_escape') for e in etags] return etags def quote_etag(etag): """ Wraps a string in double quotes escaping contents as necesary. """ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"') if sys.version_info >= (2, 6): def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2) return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port) else: # Python 2.5 compatibility. This actually works for Python 2.6 and above, # but the above definition is much more obviously correct and so is # preferred going forward. def same_origin(url1, url2): """ Checks if two URLs are 'same-origin' """ p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2) return p1[0:2] == p2[0:2] def is_safe_url(url, host=None): """ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host). Always returns ``False`` on an empty url. """ if not url: return False netloc = urlparse.urlparse(url)[1] return not netloc or netloc == host
./CrossVul/dataset_final_sorted/CWE-79/py/bad_5790_1
crossvul-python_data_bad_2196_0
import socketio import traceback from ajenti.http import HttpHandler from ajenti.api import BasePlugin, plugin, persistent, rootcontext from ajenti.api.http import HttpPlugin, SocketPlugin from ajenti.plugins import manager from ajenti.profiler import * class SocketIORouteHandler (HttpHandler): def __init__(self): self.namespaces = {} for cls in SocketPlugin.get_classes(): self.namespaces[cls.name] = cls def handle(self, context): return str(socketio.socketio_manage(context.env, self.namespaces, context)) class InvalidRouteHandler (HttpHandler): def handle(self, context): context.respond_not_found() return 'Invalid URL' @plugin @persistent @rootcontext class CentralDispatcher (BasePlugin, HttpHandler): def __init__(self): self.invalid = InvalidRouteHandler() self.io = SocketIORouteHandler() @profiled(lambda a, k: 'HTTP %s' % a[1].path) def handle(self, context): """ Dispatch the request to every HttpPlugin """ if hasattr(context.session, 'appcontext'): self.context = context.session.appcontext else: self.context = manager.context if context.path.startswith('/ajenti:socket'): return context.fallthrough(self.io) if not hasattr(self.context, 'http_handlers'): self.context.http_handlers = HttpPlugin.get_all() for instance in self.context.http_handlers: try: output = instance.handle(context) except Exception, e: return [self.respond_error(context, e)] if output is not None: return output return context.fallthrough(self.invalid) def respond_error(self, context, exception): context.respond_server_error() stack = traceback.format_exc() return """ <html> <body> <style> body { font-family: sans-serif; color: #888; text-align: center; } body pre { width: 600px; text-align: left; margin: auto; font-family: monospace; } </style> <img src="/ajenti:static/main/error.jpeg" /> <br/> <p> Server error </p> <pre> %s </pre> </body> </html> """ % stack
./CrossVul/dataset_final_sorted/CWE-79/py/bad_2196_0