code
stringlengths
1
1.49M
vector
listlengths
0
7.38k
snippet
listlengths
0
7.38k
#!/usr/bin/python # Copyright 2011 Google, Inc. All Rights Reserved. # simple script to walk source tree looking for third-party licenses # dumps resulting html page to stdout import os, re, mimetypes, sys # read source directories to scan from command line SOURCE = sys.argv[1:] # regex to find /* */ style comment blocks COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL) # regex used to detect if comment block is a license COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE) COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE) EXCLUDE_TYPES = [ "application/xml", "image/png", ] # list of known licenses; keys are derived by stripping all whitespace and # forcing to lowercase to help combine multiple files that have same license. KNOWN_LICENSES = {} class License: def __init__(self, license_text): self.license_text = license_text self.filenames = [] # add filename to the list of files that have the same license text def add_file(self, filename): if filename not in self.filenames: self.filenames.append(filename) LICENSE_KEY = re.compile(r"[^\w]") def find_license(license_text): # TODO(alice): a lot these licenses are almost identical Apache licenses. # Most of them differ in origin/modifications. Consider combining similar # licenses. license_key = LICENSE_KEY.sub("", license_text).lower() if license_key not in KNOWN_LICENSES: KNOWN_LICENSES[license_key] = License(license_text) return KNOWN_LICENSES[license_key] def discover_license(exact_path, filename): # when filename ends with LICENSE, assume applies to filename prefixed if filename.endswith("LICENSE"): with open(exact_path) as file: license_text = file.read() target_filename = filename[:-len("LICENSE")] if target_filename.endswith("."): target_filename = target_filename[:-1] find_license(license_text).add_file(target_filename) return None # try searching for license blocks in raw file mimetype = mimetypes.guess_type(filename) if mimetype in EXCLUDE_TYPES: return None with open(exact_path) as file: raw_file = file.read() # include comments that have both "license" and "copyright" in the text for comment in COMMENT_BLOCK.finditer(raw_file): comment = comment.group(1) if COMMENT_LICENSE.search(comment) is None: continue if COMMENT_COPYRIGHT.search(comment) is None: continue find_license(comment).add_file(filename) for source in SOURCE: for root, dirs, files in os.walk(source): for name in files: discover_license(os.path.join(root, name), name) print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>" for license in KNOWN_LICENSES.values(): print "<h3>Notices for files:</h3><ul>" filenames = license.filenames filenames.sort() for filename in filenames: print "<li>%s</li>" % (filename) print "</ul>" print "<pre>%s</pre>" % license.license_text print "</body></html>"
[ [ 1, 0, 0.0816, 0.0102, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 14, 0, 0.1224, 0.0102, 0, 0.66, 0.0714, 792, 6, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1531, 0.0102, 0, ...
[ "import os, re, mimetypes, sys", "SOURCE = sys.argv[1:]", "COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)", "COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)", "COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)", "EXCLUDE_TYPES = [\n \"application/xml\...
#!/usr/bin/python2.6 # # Simple http server to emulate api.playfoursquare.com import logging import shutil import sys import urlparse import SimpleHTTPServer import BaseHTTPServer class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """Handle playfoursquare.com requests, for testing.""" def do_GET(self): logging.warn('do_GET: %s, %s', self.command, self.path) url = urlparse.urlparse(self.path) logging.warn('do_GET: %s', url) query = urlparse.parse_qs(url.query) query_keys = [pair[0] for pair in query] response = self.handle_url(url) if response != None: self.send_200() shutil.copyfileobj(response, self.wfile) self.wfile.close() do_POST = do_GET def handle_url(self, url): path = None if url.path == '/v1/venue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/addvenue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/venues': path = '../captures/api/v1/venues.xml' elif url.path == '/v1/user': path = '../captures/api/v1/user.xml' elif url.path == '/v1/checkcity': path = '../captures/api/v1/checkcity.xml' elif url.path == '/v1/checkins': path = '../captures/api/v1/checkins.xml' elif url.path == '/v1/cities': path = '../captures/api/v1/cities.xml' elif url.path == '/v1/switchcity': path = '../captures/api/v1/switchcity.xml' elif url.path == '/v1/tips': path = '../captures/api/v1/tips.xml' elif url.path == '/v1/checkin': path = '../captures/api/v1/checkin.xml' elif url.path == '/history/12345.rss': path = '../captures/api/v1/feed.xml' if path is None: self.send_error(404) else: logging.warn('Using: %s' % path) return open(path) def send_200(self): self.send_response(200) self.send_header('Content-type', 'text/xml') self.end_headers() def main(): if len(sys.argv) > 1: port = int(sys.argv[1]) else: port = 8080 server_address = ('0.0.0.0', port) httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler) sa = httpd.socket.getsockname() print "Serving HTTP on", sa[0], "port", sa[1], "..." httpd.serve_forever() if __name__ == '__main__': main()
[ [ 1, 0, 0.0588, 0.0118, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0706, 0.0118, 0, 0.66, 0.125, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0824, 0.0118, 0, 0...
[ "import logging", "import shutil", "import sys", "import urlparse", "import SimpleHTTPServer", "import BaseHTTPServer", "class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"Handle playfoursquare.com requests, for testing.\"\"\"\n\n def do_GET(self):\n logging.warn('do_GET: %s, %s',...
#!/usr/bin/python import datetime import sys import textwrap import common from xml.dom import pulldom PARSER = """\ /** * Copyright 2009 Joe LaPenna */ package com.joelapenna.foursquare.parsers; import com.joelapenna.foursquare.Foursquare; import com.joelapenna.foursquare.error.FoursquareError; import com.joelapenna.foursquare.error.FoursquareParseException; import com.joelapenna.foursquare.types.%(type_name)s; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import java.io.IOException; import java.util.logging.Level; import java.util.logging.Logger; /** * Auto-generated: %(timestamp)s * * @author Joe LaPenna (joe@joelapenna.com) * @param <T> */ public class %(type_name)sParser extends AbstractParser<%(type_name)s> { private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName()); private static final boolean DEBUG = Foursquare.PARSER_DEBUG; @Override public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException, FoursquareError, FoursquareParseException { parser.require(XmlPullParser.START_TAG, null, null); %(type_name)s %(top_node_name)s = new %(type_name)s(); while (parser.nextTag() == XmlPullParser.START_TAG) { String name = parser.getName(); %(stanzas)s } else { // Consume something we don't understand. if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name); skipSubTree(parser); } } return %(top_node_name)s; } }""" BOOLEAN_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText())); """ GROUP_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser)); """ COMPLEX_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser)); """ STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(parser.nextText()); """ def main(): type_name, top_node_name, attributes = common.WalkNodesForAttributes( sys.argv[1]) GenerateClass(type_name, top_node_name, attributes) def GenerateClass(type_name, top_node_name, attributes): """generate it. type_name: the type of object the parser returns top_node_name: the name of the object the parser returns. per common.WalkNodsForAttributes """ stanzas = [] for name in sorted(attributes): typ, children = attributes[name] replacements = Replacements(top_node_name, name, typ, children) if typ == common.BOOLEAN: stanzas.append(BOOLEAN_STANZA % replacements) elif typ == common.GROUP: stanzas.append(GROUP_STANZA % replacements) elif typ in common.COMPLEX: stanzas.append(COMPLEX_STANZA % replacements) else: stanzas.append(STANZA % replacements) if stanzas: # pop off the extranious } else for the first conditional stanza. stanzas[0] = stanzas[0].replace('} else ', '', 1) replacements = Replacements(top_node_name, name, typ, [None]) replacements['stanzas'] = '\n'.join(stanzas).strip() print PARSER % replacements def Replacements(top_node_name, name, typ, children): # CameCaseClassName type_name = ''.join([word.capitalize() for word in top_node_name.split('_')]) # CamelCaseClassName camel_name = ''.join([word.capitalize() for word in name.split('_')]) # camelCaseLocalName attribute_name = camel_name.lower().capitalize() # mFieldName field_name = 'm' + camel_name if children[0]: sub_parser_camel_case = children[0] + 'Parser' else: sub_parser_camel_case = (camel_name[:-1] + 'Parser') return { 'type_name': type_name, 'name': name, 'top_node_name': top_node_name, 'camel_name': camel_name, 'parser_name': typ + 'Parser', 'attribute_name': attribute_name, 'field_name': field_name, 'typ': typ, 'timestamp': datetime.datetime.now(), 'sub_parser_camel_case': sub_parser_camel_case, 'sub_type': children[0] } if __name__ == '__main__': main()
[ [ 1, 0, 0.0201, 0.0067, 0, 0.66, 0, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.0268, 0.0067, 0, 0.66, 0.0769, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0336, 0.0067, 0, ...
[ "import datetime", "import sys", "import textwrap", "import common", "from xml.dom import pulldom", "PARSER = \"\"\"\\\n/**\n * Copyright 2009 Joe LaPenna\n */\n\npackage com.joelapenna.foursquare.parsers;\n\nimport com.joelapenna.foursquare.Foursquare;", "BOOLEAN_STANZA = \"\"\"\\\n } else i...
#!/usr/bin/python """ Pull a oAuth protected page from foursquare. Expects ~/.oget to contain (one on each line): CONSUMER_KEY CONSUMER_KEY_SECRET USERNAME PASSWORD Don't forget to chmod 600 the file! """ import httplib import os import re import sys import urllib import urllib2 import urlparse import user from xml.dom import pulldom from xml.dom import minidom import oauth """From: http://groups.google.com/group/foursquare-api/web/oauth @consumer = OAuth::Consumer.new("consumer_token","consumer_secret", { :site => "http://foursquare.com", :scheme => :header, :http_method => :post, :request_token_path => "/oauth/request_token", :access_token_path => "/oauth/access_token", :authorize_path => "/oauth/authorize" }) """ SERVER = 'api.foursquare.com:80' CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'} SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1() AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange' def parse_auth_response(auth_response): return ( re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0], re.search('<oauth_token_secret>(.*)</oauth_token_secret>', auth_response).groups()[0] ) def create_signed_oauth_request(username, password, consumer): oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer, http_method='POST', http_url=AUTHEXCHANGE_URL, parameters=dict(fs_username=username, fs_password=password)) oauth_request.sign_request(SIGNATURE_METHOD, consumer, None) return oauth_request def main(): url = urlparse.urlparse(sys.argv[1]) # Nevermind that the query can have repeated keys. parameters = dict(urlparse.parse_qsl(url.query)) password_file = open(os.path.join(user.home, '.oget')) lines = [line.strip() for line in password_file.readlines()] if len(lines) == 4: cons_key, cons_key_secret, username, password = lines access_token = None else: cons_key, cons_key_secret, username, password, token, secret = lines access_token = oauth.OAuthToken(token, secret) consumer = oauth.OAuthConsumer(cons_key, cons_key_secret) if not access_token: oauth_request = create_signed_oauth_request(username, password, consumer) connection = httplib.HTTPConnection(SERVER) headers = {'Content-Type' :'application/x-www-form-urlencoded'} connection.request(oauth_request.http_method, AUTHEXCHANGE_URL, body=oauth_request.to_postdata(), headers=headers) auth_response = connection.getresponse().read() token = parse_auth_response(auth_response) access_token = oauth.OAuthToken(*token) open(os.path.join(user.home, '.oget'), 'w').write('\n'.join(( cons_key, cons_key_secret, username, password, token[0], token[1]))) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, access_token, http_method='POST', http_url=url.geturl(), parameters=parameters) oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token) connection = httplib.HTTPConnection(SERVER) connection.request(oauth_request.http_method, oauth_request.to_url(), body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER) print connection.getresponse().read() #print minidom.parse(connection.getresponse()).toprettyxml(indent=' ') if __name__ == '__main__': main()
[ [ 8, 0, 0.0631, 0.0991, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1261, 0.009, 0, 0.66, 0.05, 2, 0, 1, 0, 0, 2, 0, 0 ], [ 1, 0, 0.1351, 0.009, 0, 0.66, 0....
[ "\"\"\"\nPull a oAuth protected page from foursquare.\n\nExpects ~/.oget to contain (one on each line):\nCONSUMER_KEY\nCONSUMER_KEY_SECRET\nUSERNAME\nPASSWORD", "import httplib", "import os", "import re", "import sys", "import urllib", "import urllib2", "import urlparse", "import user", "from xml....
#!/usr/bin/python import os import subprocess import sys BASEDIR = '../main/src/com/joelapenna/foursquare' TYPESDIR = '../captures/types/v1' captures = sys.argv[1:] if not captures: captures = os.listdir(TYPESDIR) for f in captures: basename = f.split('.')[0] javaname = ''.join([c.capitalize() for c in basename.split('_')]) fullpath = os.path.join(TYPESDIR, f) typepath = os.path.join(BASEDIR, 'types', javaname + '.java') parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java') cmd = 'python gen_class.py %s > %s' % (fullpath, typepath) print cmd subprocess.call(cmd, stdout=sys.stdout, shell=True) cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath) print cmd subprocess.call(cmd, stdout=sys.stdout, shell=True)
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1429, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.6...
[ "import os", "import subprocess", "import sys", "BASEDIR = '../main/src/com/joelapenna/foursquare'", "TYPESDIR = '../captures/types/v1'", "captures = sys.argv[1:]", "if not captures:\n captures = os.listdir(TYPESDIR)", " captures = os.listdir(TYPESDIR)", "for f in captures:\n basename = f.split('...
#!/usr/bin/python import logging from xml.dom import minidom from xml.dom import pulldom BOOLEAN = "boolean" STRING = "String" GROUP = "Group" # Interfaces that all FoursquareTypes implement. DEFAULT_INTERFACES = ['FoursquareType'] # Interfaces that specific FoursqureTypes implement. INTERFACES = { } DEFAULT_CLASS_IMPORTS = [ ] CLASS_IMPORTS = { # 'Checkin': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Venue': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Tip': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], } COMPLEX = [ 'Group', 'Badge', 'Beenhere', 'Checkin', 'CheckinResponse', 'City', 'Credentials', 'Data', 'Mayor', 'Rank', 'Score', 'Scoring', 'Settings', 'Stats', 'Tags', 'Tip', 'User', 'Venue', ] TYPES = COMPLEX + ['boolean'] def WalkNodesForAttributes(path): """Parse the xml file getting all attributes. <venue> <attribute>value</attribute> </venue> Returns: type_name - The java-style name the top node will have. "Venue" top_node_name - unadultured name of the xml stanza, probably the type of java class we're creating. "venue" attributes - {'attribute': 'value'} """ doc = pulldom.parse(path) type_name = None top_node_name = None attributes = {} level = 0 for event, node in doc: # For skipping parts of a tree. if level > 0: if event == pulldom.END_ELEMENT: level-=1 logging.warn('(%s) Skip end: %s' % (str(level), node)) continue elif event == pulldom.START_ELEMENT: logging.warn('(%s) Skipping: %s' % (str(level), node)) level+=1 continue if event == pulldom.START_ELEMENT: logging.warn('Parsing: ' + node.tagName) # Get the type name to use. if type_name is None: type_name = ''.join([word.capitalize() for word in node.tagName.split('_')]) top_node_name = node.tagName logging.warn('Found Top Node Name: ' + top_node_name) continue typ = node.getAttribute('type') child = node.getAttribute('child') # We don't want to walk complex types. if typ in COMPLEX: logging.warn('Found Complex: ' + node.tagName) level = 1 elif typ not in TYPES: logging.warn('Found String: ' + typ) typ = STRING else: logging.warn('Found Type: ' + typ) logging.warn('Adding: ' + str((node, typ))) attributes.setdefault(node.tagName, (typ, [child])) logging.warn('Attr: ' + str((type_name, top_node_name, attributes))) return type_name, top_node_name, attributes
[ [ 1, 0, 0.0263, 0.0088, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0439, 0.0088, 0, 0.66, 0.0833, 290, 0, 1, 0, 0, 290, 0, 0 ], [ 1, 0, 0.0526, 0.0088, 0, ...
[ "import logging", "from xml.dom import minidom", "from xml.dom import pulldom", "BOOLEAN = \"boolean\"", "STRING = \"String\"", "GROUP = \"Group\"", "DEFAULT_INTERFACES = ['FoursquareType']", "INTERFACES = {\n}", "DEFAULT_CLASS_IMPORTS = [\n]", "CLASS_IMPORTS = {\n# 'Checkin': DEFAULT_CLASS_IMP...
#!/usr/bin/python2.6 # # Simple http server to emulate api.playfoursquare.com import logging import shutil import sys import urlparse import SimpleHTTPServer import BaseHTTPServer class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """Handle playfoursquare.com requests, for testing.""" def do_GET(self): logging.warn('do_GET: %s, %s', self.command, self.path) url = urlparse.urlparse(self.path) logging.warn('do_GET: %s', url) query = urlparse.parse_qs(url.query) query_keys = [pair[0] for pair in query] response = self.handle_url(url) if response != None: self.send_200() shutil.copyfileobj(response, self.wfile) self.wfile.close() do_POST = do_GET def handle_url(self, url): path = None if url.path == '/v1/venue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/addvenue': path = '../captures/api/v1/venue.xml' elif url.path == '/v1/venues': path = '../captures/api/v1/venues.xml' elif url.path == '/v1/user': path = '../captures/api/v1/user.xml' elif url.path == '/v1/checkcity': path = '../captures/api/v1/checkcity.xml' elif url.path == '/v1/checkins': path = '../captures/api/v1/checkins.xml' elif url.path == '/v1/cities': path = '../captures/api/v1/cities.xml' elif url.path == '/v1/switchcity': path = '../captures/api/v1/switchcity.xml' elif url.path == '/v1/tips': path = '../captures/api/v1/tips.xml' elif url.path == '/v1/checkin': path = '../captures/api/v1/checkin.xml' elif url.path == '/history/12345.rss': path = '../captures/api/v1/feed.xml' if path is None: self.send_error(404) else: logging.warn('Using: %s' % path) return open(path) def send_200(self): self.send_response(200) self.send_header('Content-type', 'text/xml') self.end_headers() def main(): if len(sys.argv) > 1: port = int(sys.argv[1]) else: port = 8080 server_address = ('0.0.0.0', port) httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler) sa = httpd.socket.getsockname() print "Serving HTTP on", sa[0], "port", sa[1], "..." httpd.serve_forever() if __name__ == '__main__': main()
[ [ 1, 0, 0.0588, 0.0118, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0706, 0.0118, 0, 0.66, 0.125, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0824, 0.0118, 0, 0...
[ "import logging", "import shutil", "import sys", "import urlparse", "import SimpleHTTPServer", "import BaseHTTPServer", "class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"Handle playfoursquare.com requests, for testing.\"\"\"\n\n def do_GET(self):\n logging.warn('do_GET: %s, %s',...
#!/usr/bin/python import datetime import sys import textwrap import common from xml.dom import pulldom PARSER = """\ /** * Copyright 2009 Joe LaPenna */ package com.joelapenna.foursquare.parsers; import com.joelapenna.foursquare.Foursquare; import com.joelapenna.foursquare.error.FoursquareError; import com.joelapenna.foursquare.error.FoursquareParseException; import com.joelapenna.foursquare.types.%(type_name)s; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import java.io.IOException; import java.util.logging.Level; import java.util.logging.Logger; /** * Auto-generated: %(timestamp)s * * @author Joe LaPenna (joe@joelapenna.com) * @param <T> */ public class %(type_name)sParser extends AbstractParser<%(type_name)s> { private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName()); private static final boolean DEBUG = Foursquare.PARSER_DEBUG; @Override public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException, FoursquareError, FoursquareParseException { parser.require(XmlPullParser.START_TAG, null, null); %(type_name)s %(top_node_name)s = new %(type_name)s(); while (parser.nextTag() == XmlPullParser.START_TAG) { String name = parser.getName(); %(stanzas)s } else { // Consume something we don't understand. if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name); skipSubTree(parser); } } return %(top_node_name)s; } }""" BOOLEAN_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText())); """ GROUP_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser)); """ COMPLEX_STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser)); """ STANZA = """\ } else if ("%(name)s".equals(name)) { %(top_node_name)s.set%(camel_name)s(parser.nextText()); """ def main(): type_name, top_node_name, attributes = common.WalkNodesForAttributes( sys.argv[1]) GenerateClass(type_name, top_node_name, attributes) def GenerateClass(type_name, top_node_name, attributes): """generate it. type_name: the type of object the parser returns top_node_name: the name of the object the parser returns. per common.WalkNodsForAttributes """ stanzas = [] for name in sorted(attributes): typ, children = attributes[name] replacements = Replacements(top_node_name, name, typ, children) if typ == common.BOOLEAN: stanzas.append(BOOLEAN_STANZA % replacements) elif typ == common.GROUP: stanzas.append(GROUP_STANZA % replacements) elif typ in common.COMPLEX: stanzas.append(COMPLEX_STANZA % replacements) else: stanzas.append(STANZA % replacements) if stanzas: # pop off the extranious } else for the first conditional stanza. stanzas[0] = stanzas[0].replace('} else ', '', 1) replacements = Replacements(top_node_name, name, typ, [None]) replacements['stanzas'] = '\n'.join(stanzas).strip() print PARSER % replacements def Replacements(top_node_name, name, typ, children): # CameCaseClassName type_name = ''.join([word.capitalize() for word in top_node_name.split('_')]) # CamelCaseClassName camel_name = ''.join([word.capitalize() for word in name.split('_')]) # camelCaseLocalName attribute_name = camel_name.lower().capitalize() # mFieldName field_name = 'm' + camel_name if children[0]: sub_parser_camel_case = children[0] + 'Parser' else: sub_parser_camel_case = (camel_name[:-1] + 'Parser') return { 'type_name': type_name, 'name': name, 'top_node_name': top_node_name, 'camel_name': camel_name, 'parser_name': typ + 'Parser', 'attribute_name': attribute_name, 'field_name': field_name, 'typ': typ, 'timestamp': datetime.datetime.now(), 'sub_parser_camel_case': sub_parser_camel_case, 'sub_type': children[0] } if __name__ == '__main__': main()
[ [ 1, 0, 0.0201, 0.0067, 0, 0.66, 0, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.0268, 0.0067, 0, 0.66, 0.0769, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0336, 0.0067, 0, ...
[ "import datetime", "import sys", "import textwrap", "import common", "from xml.dom import pulldom", "PARSER = \"\"\"\\\n/**\n * Copyright 2009 Joe LaPenna\n */\n\npackage com.joelapenna.foursquare.parsers;\n\nimport com.joelapenna.foursquare.Foursquare;", "BOOLEAN_STANZA = \"\"\"\\\n } else i...
#!/usr/bin/python """ Pull a oAuth protected page from foursquare. Expects ~/.oget to contain (one on each line): CONSUMER_KEY CONSUMER_KEY_SECRET USERNAME PASSWORD Don't forget to chmod 600 the file! """ import httplib import os import re import sys import urllib import urllib2 import urlparse import user from xml.dom import pulldom from xml.dom import minidom import oauth """From: http://groups.google.com/group/foursquare-api/web/oauth @consumer = OAuth::Consumer.new("consumer_token","consumer_secret", { :site => "http://foursquare.com", :scheme => :header, :http_method => :post, :request_token_path => "/oauth/request_token", :access_token_path => "/oauth/access_token", :authorize_path => "/oauth/authorize" }) """ SERVER = 'api.foursquare.com:80' CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'} SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1() AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange' def parse_auth_response(auth_response): return ( re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0], re.search('<oauth_token_secret>(.*)</oauth_token_secret>', auth_response).groups()[0] ) def create_signed_oauth_request(username, password, consumer): oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer, http_method='POST', http_url=AUTHEXCHANGE_URL, parameters=dict(fs_username=username, fs_password=password)) oauth_request.sign_request(SIGNATURE_METHOD, consumer, None) return oauth_request def main(): url = urlparse.urlparse(sys.argv[1]) # Nevermind that the query can have repeated keys. parameters = dict(urlparse.parse_qsl(url.query)) password_file = open(os.path.join(user.home, '.oget')) lines = [line.strip() for line in password_file.readlines()] if len(lines) == 4: cons_key, cons_key_secret, username, password = lines access_token = None else: cons_key, cons_key_secret, username, password, token, secret = lines access_token = oauth.OAuthToken(token, secret) consumer = oauth.OAuthConsumer(cons_key, cons_key_secret) if not access_token: oauth_request = create_signed_oauth_request(username, password, consumer) connection = httplib.HTTPConnection(SERVER) headers = {'Content-Type' :'application/x-www-form-urlencoded'} connection.request(oauth_request.http_method, AUTHEXCHANGE_URL, body=oauth_request.to_postdata(), headers=headers) auth_response = connection.getresponse().read() token = parse_auth_response(auth_response) access_token = oauth.OAuthToken(*token) open(os.path.join(user.home, '.oget'), 'w').write('\n'.join(( cons_key, cons_key_secret, username, password, token[0], token[1]))) oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, access_token, http_method='POST', http_url=url.geturl(), parameters=parameters) oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token) connection = httplib.HTTPConnection(SERVER) connection.request(oauth_request.http_method, oauth_request.to_url(), body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER) print connection.getresponse().read() #print minidom.parse(connection.getresponse()).toprettyxml(indent=' ') if __name__ == '__main__': main()
[ [ 8, 0, 0.0631, 0.0991, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1261, 0.009, 0, 0.66, 0.05, 2, 0, 1, 0, 0, 2, 0, 0 ], [ 1, 0, 0.1351, 0.009, 0, 0.66, 0....
[ "\"\"\"\nPull a oAuth protected page from foursquare.\n\nExpects ~/.oget to contain (one on each line):\nCONSUMER_KEY\nCONSUMER_KEY_SECRET\nUSERNAME\nPASSWORD", "import httplib", "import os", "import re", "import sys", "import urllib", "import urllib2", "import urlparse", "import user", "from xml....
#!/usr/bin/python import os import subprocess import sys BASEDIR = '../main/src/com/joelapenna/foursquare' TYPESDIR = '../captures/types/v1' captures = sys.argv[1:] if not captures: captures = os.listdir(TYPESDIR) for f in captures: basename = f.split('.')[0] javaname = ''.join([c.capitalize() for c in basename.split('_')]) fullpath = os.path.join(TYPESDIR, f) typepath = os.path.join(BASEDIR, 'types', javaname + '.java') parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java') cmd = 'python gen_class.py %s > %s' % (fullpath, typepath) print cmd subprocess.call(cmd, stdout=sys.stdout, shell=True) cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath) print cmd subprocess.call(cmd, stdout=sys.stdout, shell=True)
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1429, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.6...
[ "import os", "import subprocess", "import sys", "BASEDIR = '../main/src/com/joelapenna/foursquare'", "TYPESDIR = '../captures/types/v1'", "captures = sys.argv[1:]", "if not captures:\n captures = os.listdir(TYPESDIR)", " captures = os.listdir(TYPESDIR)", "for f in captures:\n basename = f.split('...
#!/usr/bin/python import logging from xml.dom import minidom from xml.dom import pulldom BOOLEAN = "boolean" STRING = "String" GROUP = "Group" # Interfaces that all FoursquareTypes implement. DEFAULT_INTERFACES = ['FoursquareType'] # Interfaces that specific FoursqureTypes implement. INTERFACES = { } DEFAULT_CLASS_IMPORTS = [ ] CLASS_IMPORTS = { # 'Checkin': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Venue': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], # 'Tip': DEFAULT_CLASS_IMPORTS + [ # 'import com.joelapenna.foursquare.filters.VenueFilterable' # ], } COMPLEX = [ 'Group', 'Badge', 'Beenhere', 'Checkin', 'CheckinResponse', 'City', 'Credentials', 'Data', 'Mayor', 'Rank', 'Score', 'Scoring', 'Settings', 'Stats', 'Tags', 'Tip', 'User', 'Venue', ] TYPES = COMPLEX + ['boolean'] def WalkNodesForAttributes(path): """Parse the xml file getting all attributes. <venue> <attribute>value</attribute> </venue> Returns: type_name - The java-style name the top node will have. "Venue" top_node_name - unadultured name of the xml stanza, probably the type of java class we're creating. "venue" attributes - {'attribute': 'value'} """ doc = pulldom.parse(path) type_name = None top_node_name = None attributes = {} level = 0 for event, node in doc: # For skipping parts of a tree. if level > 0: if event == pulldom.END_ELEMENT: level-=1 logging.warn('(%s) Skip end: %s' % (str(level), node)) continue elif event == pulldom.START_ELEMENT: logging.warn('(%s) Skipping: %s' % (str(level), node)) level+=1 continue if event == pulldom.START_ELEMENT: logging.warn('Parsing: ' + node.tagName) # Get the type name to use. if type_name is None: type_name = ''.join([word.capitalize() for word in node.tagName.split('_')]) top_node_name = node.tagName logging.warn('Found Top Node Name: ' + top_node_name) continue typ = node.getAttribute('type') child = node.getAttribute('child') # We don't want to walk complex types. if typ in COMPLEX: logging.warn('Found Complex: ' + node.tagName) level = 1 elif typ not in TYPES: logging.warn('Found String: ' + typ) typ = STRING else: logging.warn('Found Type: ' + typ) logging.warn('Adding: ' + str((node, typ))) attributes.setdefault(node.tagName, (typ, [child])) logging.warn('Attr: ' + str((type_name, top_node_name, attributes))) return type_name, top_node_name, attributes
[ [ 1, 0, 0.0263, 0.0088, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0439, 0.0088, 0, 0.66, 0.0833, 290, 0, 1, 0, 0, 290, 0, 0 ], [ 1, 0, 0.0526, 0.0088, 0, ...
[ "import logging", "from xml.dom import minidom", "from xml.dom import pulldom", "BOOLEAN = \"boolean\"", "STRING = \"String\"", "GROUP = \"Group\"", "DEFAULT_INTERFACES = ['FoursquareType']", "INTERFACES = {\n}", "DEFAULT_CLASS_IMPORTS = [\n]", "CLASS_IMPORTS = {\n# 'Checkin': DEFAULT_CLASS_IMP...
#!/usr/bin/env python from distutils.core import setup from crawle import VERSION setup(name='CRAWL-E', version=VERSION, description='Highly distributed web crawling framework', author='Bryce Boe', author_email='bboe (_at_) cs.ucsb.edu', url='http://code.google.com/p/crawl-e', py_modules = ['crawle'] )
[ [ 1, 0, 0.1667, 0.0833, 0, 0.66, 0, 152, 0, 1, 0, 0, 152, 0, 0 ], [ 1, 0, 0.25, 0.0833, 0, 0.66, 0.5, 441, 0, 1, 0, 0, 441, 0, 0 ], [ 8, 0, 0.7083, 0.6667, 0, 0.66,...
[ "from distutils.core import setup", "from crawle import VERSION", "setup(name='CRAWL-E',\n version=VERSION,\n description='Highly distributed web crawling framework',\n author='Bryce Boe',\n author_email='bboe (_at_) cs.ucsb.edu',\n url='http://code.google.com/p/crawl-e',\n py_modu...
print("输入:") year=int(input()) print(year) for i in range(1,100): if (year%400==0 or (year%4==0 and year%100!=0)): print('year') else: print("平年!")
[ [ 8, 0, 0.0909, 0.0909, 0, 0.66, 0, 535, 3, 1, 0, 0, 0, 0, 1 ], [ 14, 0, 0.1818, 0.0909, 0, 0.66, 0.3333, 34, 3, 1, 0, 0, 901, 10, 2 ], [ 8, 0, 0.3636, 0.0909, 0, 0...
[ "print(\"输入:\")", "year=int(input())", "print(year)", "for i in range(1,100):\n if (year%400==0 or (year%4==0 and year%100!=0)):\n print('year')\n else:\n print(\"平年!\")", " if (year%400==0 or (year%4==0 and year%100!=0)):\n print('year')\n else:\n print(\"平年!\")", ...
#!/usr/bin/python # Filename: using_file.py import re i=1000 poem="" f = open('poem.txt', 'w') # open for 'w'riting while i>0: poem=poem+"B" i=i-1 f.write(poem) # write text to file f.close() # close the file
[ [ 1, 0, 0.25, 0.0833, 0, 0.66, 0, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 14, 0, 0.3333, 0.0833, 0, 0.66, 0.1667, 826, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 0, 0.4167, 0.0833, 0, 0....
[ "import re", "i=1000", "poem=\"\"", "f = open('poem.txt', 'w') # open for 'w'riting", "while i>0:\n poem=poem+\"B\"\n i=i-1", " poem=poem+\"B\"", " i=i-1", "f.write(poem) # write text to file", "f.close() # close the file" ]
bigFile= open("big.txt", 'w') bigFile.seek(28132) #大小自己定,需要几个G, fileSize就是几,速度绝对快 bigFile.write('\x00') bigFile.close()
[ [ 14, 0, 0.25, 0.25, 0, 0.66, 0, 846, 3, 2, 0, 0, 693, 10, 1 ], [ 8, 0, 0.5, 0.25, 0, 0.66, 0.3333, 66, 3, 1, 0, 0, 0, 0, 1 ], [ 8, 0, 0.75, 0.25, 0, 0.66, 0.66...
[ "bigFile= open(\"big.txt\", 'w')", "bigFile.seek(28132) #大小自己定,需要几个G, fileSize就是几,速度绝对快", "bigFile.write('\\x00')", "bigFile.close()" ]
import os def DirFile(rootDir): list_dirs = os.walk(rootDir) for root,dirs,files in list_dirs: for d in dirs: print (os.path.join(root,d)) for f in files: print (os.path.join(root,f)) DirFile('c:')
[ [ 1, 0, 0.1111, 0.1111, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 2, 0, 0.5556, 0.7778, 0, 0.66, 0.5, 298, 0, 1, 0, 0, 0, 0, 5 ], [ 14, 1, 0.3333, 0.1111, 1, 0.43...
[ "import os", "def DirFile(rootDir):\n\tlist_dirs = os.walk(rootDir)\n\tfor root,dirs,files in list_dirs:\n\t\tfor d in dirs:\n\t\t\tprint (os.path.join(root,d))\n\t\tfor f in files:\n\t\t\tprint (os.path.join(root,f))", "\tlist_dirs = os.walk(rootDir)", "\tfor root,dirs,files in list_dirs:\n\t\tfor d in dirs:...
from sys import stdin a, b, c = map(int, stdin.readline().strip().split()) print "%.3lf" % ((a+b+c)/3.0)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 256, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "a, b, c = map(int, stdin.readline().strip().split())", "print(\"%.3lf\" % ((a+b+c)/3.0))" ]
from sys import stdin from math import * r, h = map(float, stdin.readline().strip().split()) print "Area = %.3lf" % (pi*r*r*2 + 2*pi*r*h)
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.3333, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.75, 0.25, 0, 0.66, 0....
[ "from sys import stdin", "from math import *", "r, h = map(float, stdin.readline().strip().split())", "print(\"Area = %.3lf\" % (pi*r*r*2 + 2*pi*r*h))" ]
from sys import stdin from math import * n, = map(int, stdin.readline().strip().split()) rad = radians(n) print "%.3lf %.3lf" % (sin(rad), cos(rad))
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4, 0.2, 0, 0.66, 0.25, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "from math import *", "n, = map(int, stdin.readline().strip().split())", "rad = radians(n)", "print(\"%.3lf %.3lf\" % (sin(rad), cos(rad)))" ]
from sys import stdin n, = map(int, stdin.readline().strip().split()) print n*(n+1)/2
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 773, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n, = map(int, stdin.readline().strip().split())", "print(n*(n+1)/2)" ]
from sys import stdin a, b = map(int, stdin.readline().strip().split()) print b, a
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.5, 0.25, 0, 0.66, 0.5, 127, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "from sys import stdin", "a, b = map(int, stdin.readline().strip().split())", "print(b, a)" ]
from sys import stdin n, m = map(int, stdin.readline().strip().split()) a = (4*n-m)/2 b = n-a if m % 2 == 1 or a < 0 or b < 0: print "No answer" else: print a, b
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.3333, 51, 3, 2, 0, 0, 53, 10, 4 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.6667,...
[ "from sys import stdin", "n, m = map(int, stdin.readline().strip().split())", "a = (4*n-m)/2", "b = n-a" ]
from sys import stdin n, = map(int, stdin.readline().strip().split()) money = n * 95 if money >= 300: money *= 0.85 print "%.2lf" % money
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.25, 773, 3, 2, 0, 0, 53, 10, 4 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "n, = map(int, stdin.readline().strip().split())", "money = n * 95", "if money >= 300: money *= 0.85", "print(\"%.2lf\" % money)" ]
from sys import stdin n, = map(int, stdin.readline().strip().split()) print ["yes", "no"][n % 2]
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 773, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n, = map(int, stdin.readline().strip().split())", "print [\"yes\", \"no\"][n % 2]" ]
from sys import stdin from math import * x1, y1, x2, y2 = map(float, stdin.readline().strip().split()) print "%.3lf" % hypot((x1-x2), (y1-y2))
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.3333, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.75, 0.25, 0, 0.66, 0....
[ "from sys import stdin", "from math import *", "x1, y1, x2, y2 = map(float, stdin.readline().strip().split())", "print(\"%.3lf\" % hypot((x1-x2), (y1-y2)))" ]
from sys import stdin n = stdin.readline().strip().split()[0] print '%c%c%c' % (n[2], n[1], n[0])
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.5, 0.25, 0, 0.66, 0.5, 773, 6, 0, 0, 0, 0, 0, 3 ], [ 8, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "from sys import stdin", "n = stdin.readline().strip().split()[0]", "print('%c%c%c' % (n[2], n[1], n[0]))" ]
from sys import stdin x, = map(float, stdin.readline().strip().split()) print abs(x)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 190, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "x, = map(float, stdin.readline().strip().split())", "print(abs(x))" ]
from sys import stdin from calendar import isleap year, = map(int, stdin.readline().strip().split()) if isleap(year): print "yes" else: print "no"
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 917, 0, 1, 0, 0, 917, 0, 0 ], [ 14, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "from calendar import isleap", "year, = map(int, stdin.readline().strip().split())" ]
from sys import stdin f, = map(float, stdin.readline().strip().split()) print "%.3lf" % (5*(f-32)/9)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 899, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "f, = map(float, stdin.readline().strip().split())", "print(\"%.3lf\" % (5*(f-32)/9))" ]
from sys import stdin a, b, c = map(int, stdin.readline().strip().split()) if a*a + b*b == c*c or a*a + c*c == b*b or b*b + c*c == a*a: print "yes" elif a + b <= c or a + c <= b or b + c <= a: print "not a triangle" else: print "no"
[ [ 1, 0, 1, 1, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ] ]
[ "from sys import stdin" ]
from sys import stdin a = map(int, stdin.readline().strip().split()) a.sort() print a[0], a[1], a[2]
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.3333, 475, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 0.6, 0.2, 0, 0.66, 0.6667,...
[ "from sys import stdin", "a = map(int, stdin.readline().strip().split())", "a.sort()", "print(a[0], a[1], a[2])" ]
s = i = 0 while True: term = 1.0 / (i*2+1) s += term * ((-1)**i) if term < 1e-6: break i += 1 print "%.6lf" % s
[ [ 14, 0, 0.1429, 0.1429, 0, 0.66, 0, 553, 1, 0, 0, 0, 0, 1, 0 ], [ 5, 0, 0.5714, 0.7143, 0, 0.66, 0.5, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 1, 0.4286, 0.1429, 1, 0.77, ...
[ "s = i = 0", "while True:\n term = 1.0 / (i*2+1)\n s += term * ((-1)**i)\n if term < 1e-6: break\n i += 1", " term = 1.0 / (i*2+1)", " if term < 1e-6: break", "print(\"%.6lf\" % s)" ]
from sys import stdin from decimal import * a, b, c = map(int, stdin.readline().strip().split()) getcontext().prec = c print Decimal(a) / Decimal(b)
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4, 0.2, 0, 0.66, 0.25, 349, 0, 1, 0, 0, 349, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "from decimal import *", "a, b, c = map(int, stdin.readline().strip().split())", "getcontext().prec = c", "print(Decimal(a) / Decimal(b))" ]
from sys import stdin n = int(stdin.readline().strip()) print "%.3lf" % sum([1.0/x for x in range(1,n+1)])
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 773, 3, 1, 0, 0, 901, 10, 3 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n = int(stdin.readline().strip())", "print(\"%.3lf\" % sum([1.0/x for x in range(1,n+1)]))" ]
from sys import stdin print len(stdin.readline().strip())
[ [ 1, 0, 0.5, 0.5, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 8, 0, 1, 0.5, 0, 0.66, 1, 535, 3, 1, 0, 0, 0, 0, 4 ] ]
[ "from sys import stdin", "print(len(stdin.readline().strip()))" ]
from itertools import product from math import * def issqrt(n): s = int(floor(sqrt(n))) return s*s == n aabb = [a*1100+b*11 for a,b in product(range(1,10),range(10))] print ' '.join(map(str, filter(issqrt, aabb)))
[ [ 1, 0, 0.1111, 0.1111, 0, 0.66, 0, 808, 0, 1, 0, 0, 808, 0, 0 ], [ 1, 0, 0.2222, 0.1111, 0, 0.66, 0.25, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 2, 0, 0.5556, 0.3333, 0, 0....
[ "from itertools import product", "from math import *", "def issqrt(n):\n s = int(floor(sqrt(n)))\n return s*s == n", " s = int(floor(sqrt(n)))", " return s*s == n", "aabb = [a*1100+b*11 for a,b in product(range(1,10),range(10))]", "print(' '.join(map(str, filter(issqrt, aabb))))" ]
from sys import stdin a = map(int, stdin.readline().strip().split()) print "%d %d %.3lf" % (min(a), max(a), float(sum(a)) / len(a))
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 475, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "a = map(int, stdin.readline().strip().split())", "print(\"%d %d %.3lf\" % (min(a), max(a), float(sum(a)) / len(a)))" ]
from sys import stdin def cycle(n): if n == 1: return 0 elif n % 2 == 1: return cycle(n*3+1) + 1 else: return cycle(n/2) + 1 n = int(stdin.readline().strip()) print cycle(n)
[ [ 1, 0, 0.1111, 0.1111, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 2, 0, 0.5, 0.4444, 0, 0.66, 0.3333, 276, 0, 1, 1, 0, 0, 0, 2 ], [ 4, 1, 0.5556, 0.3333, 1, 0.95,...
[ "from sys import stdin", "def cycle(n):\n if n == 1: return 0\n elif n % 2 == 1: return cycle(n*3+1) + 1\n else: return cycle(n/2) + 1", " if n == 1: return 0\n elif n % 2 == 1: return cycle(n*3+1) + 1\n else: return cycle(n/2) + 1", " if n == 1: return 0", " elif n % 2 == 1: return cycle(n*3+1) + 1...
from sys import stdin n = int(stdin.readline().strip()) count = n*2-1 for i in range(n): print ' '*i + '#'*count count -= 2
[ [ 1, 0, 0.1667, 0.1667, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.3333, 0.1667, 0, 0.66, 0.3333, 773, 3, 1, 0, 0, 901, 10, 3 ], [ 14, 0, 0.5, 0.1667, 0, ...
[ "from sys import stdin", "n = int(stdin.readline().strip())", "count = n*2-1", "for i in range(n):\n print(' '*i + '#'*count)\n count -= 2", " print(' '*i + '#'*count)" ]
for abc in range(123, 329): big = str(abc) + str(abc*2) + str(abc*3) if(''.join(sorted(big)) == '123456789'): print abc, abc*2, abc*3
[ [ 6, 0, 0.75, 1, 0, 0.66, 0, 38, 3, 0, 0, 0, 0, 0, 4 ], [ 14, 1, 1, 0.5, 1, 0.89, 0, 235, 4, 0, 0, 0, 0, 0, 3 ] ]
[ "for abc in range(123, 329):\n big = str(abc) + str(abc*2) + str(abc*3)", " big = str(abc) + str(abc*2) + str(abc*3)" ]
from sys import stdin n, m = map(int, stdin.readline().strip().split()) print "%.5lf" % sum([1.0/i/i for i in range(n,m+1)])
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 51, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n, m = map(int, stdin.readline().strip().split())", "print(\"%.5lf\" % sum([1.0/i/i for i in range(n,m+1)]))" ]
from sys import stdin data = map(int, stdin.readline().strip().split()) n, m = data[0], data[-1] data = data[1:-1] print len(filter(lambda x: x < m, data))
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.25, 929, 3, 2, 0, 0, 53, 10, 4 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "data = map(int, stdin.readline().strip().split())", "n, m = data[0], data[-1]", "data = data[1:-1]", "print(len(filter(lambda x: x < m, data)))" ]
from sys import stdin def solve(a, b, c): for i in range(10, 101): if i % 3 == a and i % 5 == b and i % 7 == c: print i return print 'No answer' a, b, c = map(int, stdin.readline().strip().split()) solve(a, b, c)
[ [ 1, 0, 0.0909, 0.0909, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 2, 0, 0.5, 0.5455, 0, 0.66, 0.3333, 599, 0, 3, 0, 0, 0, 0, 3 ], [ 6, 1, 0.5, 0.3636, 1, 0.3, ...
[ "from sys import stdin", "def solve(a, b, c):\n for i in range(10, 101):\n if i % 3 == a and i % 5 == b and i % 7 == c:\n print(i)\n return\n print('No answer')", " for i in range(10, 101):\n if i % 3 == a and i % 5 == b and i % 7 == c:\n print(i)\n return", " if i % 3 == a and...
from itertools import product sol = [a*100+b*10+c for a,b,c in product(range(1,10), range(10), range(10)) if a**3+b**3+c**3 == a*100+b*10+c] print '\n'.join(map(str, sol))
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 808, 0, 1, 0, 0, 808, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 723, 5, 0, 0, 0, 0, 0, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from itertools import product", "sol = [a*100+b*10+c for a,b,c in product(range(1,10), range(10), range(10)) if a**3+b**3+c**3 == a*100+b*10+c]", "print('\\n'.join(map(str, sol)))" ]
from sys import stdin from math import * n = int(stdin.readline().strip()) print sum(map(factorial, range(1,n+1))) % (10**6)
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4, 0.2, 0, 0.66, 0.3333, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.6667,...
[ "from sys import stdin", "from math import *", "n = int(stdin.readline().strip())", "print(sum(map(factorial, range(1,n+1))) % (10**6))" ]
#!/usr/bin/python # Copyright 2011 Google, Inc. All Rights Reserved. # simple script to walk source tree looking for third-party licenses # dumps resulting html page to stdout import os, re, mimetypes, sys # read source directories to scan from command line SOURCE = sys.argv[1:] # regex to find /* */ style comment blocks COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL) # regex used to detect if comment block is a license COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE) COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE) EXCLUDE_TYPES = [ "application/xml", "image/png", ] # list of known licenses; keys are derived by stripping all whitespace and # forcing to lowercase to help combine multiple files that have same license. KNOWN_LICENSES = {} class License: def __init__(self, license_text): self.license_text = license_text self.filenames = [] # add filename to the list of files that have the same license text def add_file(self, filename): if filename not in self.filenames: self.filenames.append(filename) LICENSE_KEY = re.compile(r"[^\w]") def find_license(license_text): # TODO(alice): a lot these licenses are almost identical Apache licenses. # Most of them differ in origin/modifications. Consider combining similar # licenses. license_key = LICENSE_KEY.sub("", license_text).lower() if license_key not in KNOWN_LICENSES: KNOWN_LICENSES[license_key] = License(license_text) return KNOWN_LICENSES[license_key] def discover_license(exact_path, filename): # when filename ends with LICENSE, assume applies to filename prefixed if filename.endswith("LICENSE"): with open(exact_path) as file: license_text = file.read() target_filename = filename[:-len("LICENSE")] if target_filename.endswith("."): target_filename = target_filename[:-1] find_license(license_text).add_file(target_filename) return None # try searching for license blocks in raw file mimetype = mimetypes.guess_type(filename) if mimetype in EXCLUDE_TYPES: return None with open(exact_path) as file: raw_file = file.read() # include comments that have both "license" and "copyright" in the text for comment in COMMENT_BLOCK.finditer(raw_file): comment = comment.group(1) if COMMENT_LICENSE.search(comment) is None: continue if COMMENT_COPYRIGHT.search(comment) is None: continue find_license(comment).add_file(filename) for source in SOURCE: for root, dirs, files in os.walk(source): for name in files: discover_license(os.path.join(root, name), name) print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>" for license in KNOWN_LICENSES.values(): print "<h3>Notices for files:</h3><ul>" filenames = license.filenames filenames.sort() for filename in filenames: print "<li>%s</li>" % (filename) print "</ul>" print "<pre>%s</pre>" % license.license_text print "</body></html>"
[ [ 1, 0, 0.0816, 0.0102, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 14, 0, 0.1224, 0.0102, 0, 0.66, 0.0714, 792, 6, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1531, 0.0102, 0, ...
[ "import os, re, mimetypes, sys", "SOURCE = sys.argv[1:]", "COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)", "COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)", "COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)", "EXCLUDE_TYPES = [\n \"application/xml\...
#print 'Content-Type: application/xml' #print '' # #f = open( 'voter-info-gadget.xml', 'r' ) #xml = f.read() #f.close() # #print xml #import re #from pprint import pformat, pprint from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app #def dumpRequest( req ): # return pformat({ # 'environ': req.environ, # 'url': req.url, # 'headers': req.headers, # }) #def addDump( xml, req ): # dr = dumpRequest( req ) # dr = re.sub( r'\}', '\n}', dr ) # dr = re.sub( r"'wsgi[^\n]+\n", '', dr ) # dr = re.sub ( r'\n\s*', ' ', dr ) # dr = re.sub ( r',\s*\}', '}', dr ) # return xml.replace( 'var opt =', 'alert( (' + dr + ').toSource() );\n\n\tvar opt =' ) # poor man's template class GadgetHandler( webapp.RequestHandler ): def get( self, dump, debug ): self.response.headers['Content-Type'] = 'application/xml' if debug == None: debug = '' f = open( 'voter-info-gadget.xml', 'r' ) xml = f.read() f.close() xml = xml.replace( '{{debug}}', debug ) # poor man's template #if dump: # xml = addDump( xml, self.request ) self.response.out.write( xml ) application = webapp.WSGIApplication([ ( r'/(dump-)?(.+)?voter-info-gadget\.xml', GadgetHandler ) ], debug = True ) def main(): run_wsgi_app( application ) if __name__ == '__main__': main()
[ [ 1, 0, 0.2549, 0.0196, 0, 0.66, 0, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.2745, 0.0196, 0, 0.66, 0.2, 327, 0, 1, 0, 0, 327, 0, 0 ], [ 3, 0, 0.7059, 0.2157, 0, 0.6...
[ "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "class GadgetHandler( webapp.RequestHandler ):\n\tdef get( self, dump, debug ):\n\t\tself.response.headers['Content-Type'] = 'application/xml'\n\t\tif debug == None: debug = ''\n\t\tf = open( 'voter-info-gadg...
#!/usr/bin/env python import math # z() and zz() are a quick and dirty hack to deal with the Aleutian Islands. # We should use a more correct algorithm for extendBounds like the one # in the Maps API, but this is good enough to fix the immediate problem. def z( n ): if n > 0.0: return n - 360.0 return n def zz( n ): if n < -180.0: return n + 360.0 return n def minlat( a, b ): if a == None: return b return min( a, b ) def maxlat( a, b ): if a == None: return b return max( a, b ) def minlng( a, b ): if a == None: return b return zz( min( z(a), z(b) ) ) def maxlng( a, b ): if a == None: return b return zz( max( z(a), z(b) ) ) class Geo: def __init__( self, zoom=0, tilesize=256 ): self.zoom = zoom self.tilesize = tilesize def extendBounds( self, a, b ): return [ [ minlng( a[0][0], b[0][0] ), minlat( a[0][1] , b[0][1] ) ], [ maxlng( a[1][0], b[1][0] ), maxlat( a[1][1] , b[1][1] ) ] ] def inflateBounds( self, a, n ): return [ [ a[0][0] - n, a[0][1] - n ], [ a[1][0] + n, a[1][1] + n ] ] def offsetBounds( self, a, pt ): return [ [ a[0][0] + pt[0], a[0][1] + pt[1] ], [ a[1][0] + pt[0], a[1][1] + pt[1] ] ] def offsetBoundsMinus( self, a, pt ): return [ [ a[0][0] - pt[0], a[0][1] - pt[1] ], [ a[1][0] - pt[0], a[1][1] - pt[1] ] ] def scalePoint( self, pt, scale ): return [ pt[0] * scale, pt[1] * scale ] def scaleBounds( self, a, scale ): return [ self.scalePoint( a[0], scale ), self.scalePoint( a[1], scale ) ] def tileBounds( self, bounds ): def lo( n ): return int( n / self.tilesize ) * self.tilesize def hi( n ): return ( int( n / self.tilesize ) + 1 ) * self.tilesize min = bounds[0]; max = bounds[1] offset = [ lo(min[0]), lo(min[1]) ] size = [ hi(max[0]) - offset[0], hi(max[1]) - offset[1] ] return offset, size def pixFromGeoPoint( self, point ): lng = point[0] if lng > 180.0: lng -= 360.0 lng = lng / 360.0 + 0.5 lat = point[1] lat = 0.5 - ( math.log( math.tan( ( math.pi / 4.0 ) + ( lat * math.pi / 360.0 ) ) ) / math.pi / 2.0 ); scale = ( 1 << self.zoom ) * self.tilesize return [ int( lng * scale ), int( lat * scale ) ] def pixFromGeoBounds( self, bounds ): a = self.pixFromGeoPoint(bounds[0]) b = self.pixFromGeoPoint(bounds[1]) return [ [ a[0], b[1] ], [ b[0], a[1] ] ] #print geoToPixel( [ 0.0, 0.0 ], 0 ) == [ 128, 128 ] #print geoToPixel( [ 0.0, 0.0 ], 1 ) == [ 256, 256 ] # #print geoToPixel( [ -60.0, 45.0 ], 0 ) == [ 85, 92 ] #print geoToPixel( [ -60.0, 45.0 ], 1 ) == [ 170, 184 ] # #print geoToPixel( [ 0.0, 0.0 ], 13 )
[ [ 1, 0, 0.0291, 0.0097, 0, 0.66, 0, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 2, 0, 0.0922, 0.0388, 0, 0.66, 0.1429, 859, 0, 1, 1, 0, 0, 0, 0 ], [ 4, 1, 0.0922, 0.0194, 1, 0....
[ "import math", "def z( n ):\n\tif n > 0.0:\n\t\treturn n - 360.0\n\treturn n", "\tif n > 0.0:\n\t\treturn n - 360.0", "\t\treturn n - 360.0", "\treturn n", "def zz( n ):\n\tif n < -180.0:\n\t\treturn n + 360.0\n\treturn n", "\tif n < -180.0:\n\t\treturn n + 360.0", "\t\treturn n + 360.0", "\treturn ...
#!/usr/bin/env python array = [ { 'abbr': 'AL', 'name': 'Alabama', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'AK', 'name': 'Alaska', 'parties': { 'dem': { 'date': '02-05', 'type': 'caucus' }, 'gop': { 'date': '02-05', 'type': 'caucus' } } }, { 'abbr': 'AZ', 'name': 'Arizona', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'AR', 'name': 'Arkansas', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'CA', 'name': 'California', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'CO', 'name': 'Colorado', 'parties': { 'dem': { 'date': '02-05', 'type': 'caucus' }, 'gop': { 'date': '02-05', 'type': 'caucus' } } }, { 'abbr': 'CT', 'name': 'Connecticut', 'votesby': 'town', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'DE', 'name': 'Delaware', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'DC', 'name': 'District of Columbia', 'parties': { 'dem': { 'date': '02-12' }, 'gop': { 'date': '02-12' } } }, { 'abbr': 'FL', 'name': 'Florida', 'parties': { 'dem': { 'date': '01-29' }, 'gop': { 'date': '01-29' } } }, { 'abbr': 'GA', 'name': 'Georgia', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'HI', 'name': 'Hawaii', 'parties': { 'dem': { 'date': '02-19', 'type': 'caucus' }, 'gop': { 'date': '01-25', 'type': 'caucus' } } }, { 'abbr': 'ID', 'name': 'Idaho', 'parties': { # TEMP FOR 5-27 IDAHO PRIMARY #'dem': { 'date': '02-05', 'type': 'caucus' }, 'dem': { 'date': '05-27' }, 'gop': { 'date': '05-27' } } }, { 'abbr': 'IL', 'name': 'Illinois', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'IN', 'name': 'Indiana', 'parties': { 'dem': { 'date': '05-06' }, 'gop': { 'date': '05-06' } } }, { 'abbr': 'IA', 'name': 'Iowa', 'parties': { 'dem': { 'date': '01-03', 'type': 'caucus' }, 'gop': { 'date': '01-03', 'type': 'caucus' } } }, { 'abbr': 'KS', 'name': 'Kansas', 'votesby': 'district', 'parties': { 'dem': { 'date': '02-05', 'type': 'caucus' }, 'gop': { 'date': '02-09', 'type': 'caucus' } } }, { 'abbr': 'KY', 'name': 'Kentucky', 'parties': { 'dem': { 'date': '05-20' }, 'gop': { 'date': '05-20' } } }, { 'abbr': 'LA', 'name': 'Louisiana', 'parties': { 'dem': { 'date': '02-09' }, 'gop': { 'date': '01-22', 'type': 'caucus' } } }, { 'abbr': 'ME', 'name': 'Maine', 'parties': { 'dem': { 'date': '02-10', 'type': 'caucus' }, 'gop': { 'date': '02-01', 'type': 'caucus' } } }, { 'abbr': 'MD', 'name': 'Maryland', 'parties': { 'dem': { 'date': '02-12' }, 'gop': { 'date': '02-12' } } }, { 'abbr': 'MA', 'name': 'Massachusetts', 'votesby': 'town', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'MI', 'name': 'Michigan', 'parties': { 'dem': { 'date': '01-15' }, 'gop': { 'date': '01-15' } } }, { 'abbr': 'MN', 'name': 'Minnesota', 'parties': { 'dem': { 'date': '02-05', 'type': 'caucus' }, 'gop': { 'date': '02-05', 'type': 'caucus' } } }, { 'abbr': 'MS', 'name': 'Mississippi', 'parties': { 'dem': { 'date': '03-11' }, 'gop': { 'date': '03-11' } } }, { 'abbr': 'MO', 'name': 'Missouri', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'MT', 'name': 'Montana', 'parties': { 'dem': { 'date': '06-03' }, 'gop': { 'date': '02-05', 'type': 'caucus' } } }, { 'abbr': 'NE', 'name': 'Nebraska', 'parties': { 'dem': { 'date': '02-09', 'type': 'caucus' }, 'gop': { 'date': '05-13' } } }, { 'abbr': 'NV', 'name': 'Nevada', 'parties': { 'dem': { 'date': '01-19', 'type': 'caucus' }, 'gop': { 'date': '01-19', 'type': 'caucus' } } }, { 'abbr': 'NH', 'name': 'New Hampshire', 'votesby': 'town', 'parties': { 'dem': { 'date': '01-08' }, 'gop': { 'date': '01-08' } } }, { 'abbr': 'NJ', 'name': 'New Jersey', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'NM', 'name': 'New Mexico', 'parties': { 'dem': { 'date': '02-05', 'type': 'caucus' }, 'gop': { 'date': '06-03' } } }, { 'abbr': 'NY', 'name': 'New York', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'NC', 'name': 'North Carolina', 'parties': { 'dem': { 'date': '05-06' }, 'gop': { 'date': '05-06' } } }, { 'abbr': 'ND', 'name': 'North Dakota', 'parties': { 'dem': { 'date': '02-05', 'type': 'caucus' }, 'gop': { 'date': '02-05', 'type': 'caucus' } } }, { 'abbr': 'OH', 'name': 'Ohio', 'parties': { 'dem': { 'date': '03-04' }, 'gop': { 'date': '03-04' } } }, { 'abbr': 'OK', 'name': 'Oklahoma', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'OR', 'name': 'Oregon', 'parties': { 'dem': { 'date': '05-20' }, 'gop': { 'date': '05-20' } } }, { 'abbr': 'PA', 'name': 'Pennsylvania', 'parties': { 'dem': { 'date': '04-22' }, 'gop': { 'date': '04-22' } } }, { 'abbr': 'PR', 'name': 'Puerto Rico', 'parties': { 'dem': { 'date': '06-01' }, 'gop': { 'date': '02-24' } } }, { 'abbr': 'RI', 'name': 'Rhode Island', 'parties': { 'dem': { 'date': '03-04' }, 'gop': { 'date': '03-04' } } }, { 'abbr': 'SC', 'name': 'South Carolina', 'parties': { 'dem': { 'date': '01-26' }, 'gop': { 'date': '01-19' } } }, { 'abbr': 'SD', 'name': 'South Dakota', 'parties': { 'dem': { 'date': '06-03' }, 'gop': { 'date': '06-03' } } }, { 'abbr': 'TN', 'name': 'Tennessee', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'TX', 'name': 'Texas', 'parties': { 'dem': { 'date': '03-04' }, 'gop': { 'date': '03-04' } } }, { 'abbr': 'UT', 'name': 'Utah', 'parties': { 'dem': { 'date': '02-05' }, 'gop': { 'date': '02-05' } } }, { 'abbr': 'VT', 'name': 'Vermont', 'votesby': 'town', 'parties': { 'dem': { 'date': '03-04' }, 'gop': { 'date': '03-04' } } }, { 'abbr': 'VA', 'name': 'Virginia', 'parties': { 'dem': { 'date': '02-12' }, 'gop': { 'date': '02-12' } } }, { 'abbr': 'WA', 'name': 'Washington', 'parties': { 'dem': { 'date': '02-09', 'type': 'caucus' }, 'gop': { 'date': '02-09', 'type': 'caucus' } } }, { 'abbr': 'WV', 'name': 'West Virginia', 'parties': { 'dem': { 'date': '05-13' }, 'gop': { 'date': '05-13' } } }, { 'abbr': 'WI', 'name': 'Wisconsin', 'parties': { 'dem': { 'date': '02-19' }, 'gop': { 'date': '02-19' } } }, { 'abbr': 'WY', 'name': 'Wyoming', 'parties': { 'dem': { 'date': '03-08', 'type': 'caucus' }, 'gop': { 'date': '01-05', 'type': 'caucus' } } } ] byAbbr = {} for state in array: byAbbr[ state['abbr'] ] = state byName = {} for state in array: byName[ state['name'] ] = state
[ [ 14, 0, 0.4943, 0.977, 0, 0.66, 0, 80, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 0, 0.9862, 0.0023, 0, 0.66, 0.25, 489, 0, 0, 0, 0, 0, 6, 0 ], [ 6, 0, 0.9897, 0.0046, 0, 0.66, ...
[ "array = [\n\t{\n\t\t'abbr': 'AL',\n\t\t'name': 'Alabama',\n\t\t'parties': {\n\t\t\t'dem': { 'date': '02-05' },\n\t\t\t'gop': { 'date': '02-05' }\n\t\t}", "byAbbr = {}", "for state in array:\n\tbyAbbr[ state['abbr'] ] = state", "\tbyAbbr[ state['abbr'] ] = state", "byName = {}", "for state in array:\n\tby...
#!/usr/bin/env python # shpUtils.py # Original version by Zachary Forest Johnson # http://indiemaps.com/blog/index.php/code/pyShapefile.txt # This version modified by Michael Geary from struct import unpack import dbfUtils XY_POINT_RECORD_LENGTH = 16 db = [] def loadShapefile( filename ): # open dbf file and get features as a list global db dbfile = open( filename[0:-4] + '.dbf', 'rb' ) db = list( dbfUtils.dbfreader(dbfile) ) dbfile.close() fp = open( filename, 'rb' ) # get basic shapefile configuration fp.seek(32) filetype = readAndUnpack('i', fp.read(4)) bounds = readBounds( fp ) # fetch Records fp.seek(100) features = [] while True: feature = createRecord(fp) if feature == False: break getPolyInfo( feature ) features.append( feature ) return { 'type': filetype, 'bounds': bounds, 'features': features } record_class = { 0:'RecordNull', 1:'RecordPoint', 8:'RecordMultiPoint', 3:'RecordPolyLine', 5:'RecordPolygon' } def createRecord(fp): # read header record_number = readAndUnpack('>L', fp.read(4)) if record_number == '': return False content_length = readAndUnpack('>L', fp.read(4)) rectype = readAndUnpack('<L', fp.read(4)) shape = readRecordAny(fp,rectype) shape['type'] = rectype info = {} names = db[0] values = db[record_number+1] for i in xrange(len(names)): value = values[i] if isinstance( value, str ): value = value.strip() info[ names[i] ] = value return { 'shape':shape, 'info':info } # Reading defs def readRecordAny(fp, rectype): if rectype==0: return readRecordNull(fp) elif rectype==1: return readRecordPoint(fp) elif rectype==8: return readRecordMultiPoint(fp) elif rectype==3 or rectype==5: return readRecordPolyLine(fp) else: return False def readRecordNull(fp): return {} point_count = 0 def readRecordPoint(fp): global point_count point = [ readAndUnpack('d', fp.read(8)), readAndUnpack('d', fp.read(8)) ] point_count += 1 return point def readRecordMultiPoint(fp): shape = { 'bounds': readBounds(fp) } points = shape['points'] = [] nPoints = readAndUnpack('i', fp.read(4)) for i in xrange(nPoints): points.append(readRecordPoint(fp)) return shape def readRecordPolyLine(fp): shape = { 'bounds': readBounds(fp) } nParts = readAndUnpack('i', fp.read(4)) nPoints = readAndUnpack('i', fp.read(4)) if readAndUnpack('i', fp.read(4)): print 'ERROR: First part offset must be 0' counts = []; prev = 0 for i in xrange(nParts-1): next = readAndUnpack('i', fp.read(4)) counts.append( next - prev ) prev = next counts.append( nPoints - prev ) parts = shape['parts'] = [] for i in xrange(nParts): part = {} parts.append( part ) points = part['points'] = [] for j in xrange(counts[i]): points.append(readRecordPoint(fp)) return shape # General defs def readBounds(fp): return [ [ readAndUnpack('d',fp.read(8)), readAndUnpack('d',fp.read(8)) ], [ readAndUnpack('d',fp.read(8)), readAndUnpack('d',fp.read(8)) ] ] def readAndUnpack(fieldtype, data): if data=='': return data return unpack(fieldtype, data)[0] def getPolyInfo( feature ): nPoints = cx = cy = 0 shape = feature['shape'] shapetype = shape['type'] if shapetype == 3 or shapetype ==5: for part in shape['parts']: getPartInfo( part ) def getPartInfo( part ): points = part['points'] n = len(points) area = cx = cy = 0 xmin = ymin = 360 xmax = ymax = -360 pt = points[n-1]; xx = pt[0]; yy = pt[1] for pt in points: x = pt[0]; y = pt[1] # bounds xmin = min( x, xmin ); ymin = min( y, ymin ) xmax = max( x, xmax ); ymax = max( y, ymax ) # area and centroid a = xx * y - x * yy area += a cx += ( x + xx ) * a cy += ( y + yy ) * a # next xx = x; yy = y area /= 2 if area: centroid = [ cx / area / 6, cy / area / 6 ] else: centroid = None part.update({ 'area': abs(area), 'bounds': [ [ xmin, ymin ], [ xmax, ymax ] ], 'center': [ ( xmin + xmax ) / 2, ( ymin + ymax ) / 2 ], 'centroid': centroid, 'extent': [ abs( xmax - xmin ), abs( ymax - ymin ) ] }) def getBoundCenters(features): for feature in features: bounds = feature['shape']['bounds'] min = bounds[0]; max = bounds[1] bounds['center'] = [ ( min[0] + max[0] ) / 2, ( min[1] + max[1] ) / 2 ] def getMAT(features): print 'feature not yet available' def dumpFeatureInfo( features ): fields = [] rows = [] for feature in features: info = feature['info'] if not len(fields): for key in info: fields.append( key ) rows.append( ','.join(fields) ) cols = [] for field in fields: cols.append( str( feature['info'][field] ) ) rows.append( ','.join(cols) ) return '\r\n'.join(rows)
[ [ 1, 0, 0.0421, 0.0053, 0, 0.66, 0, 399, 0, 1, 0, 0, 399, 0, 0 ], [ 1, 0, 0.0474, 0.0053, 0, 0.66, 0.0526, 699, 0, 1, 0, 0, 699, 0, 0 ], [ 14, 0, 0.0526, 0.0053, 0, ...
[ "from struct import unpack", "import dbfUtils", "XY_POINT_RECORD_LENGTH = 16", "db = []", "def loadShapefile( filename ):\n\t# open dbf file and get features as a list\n\tglobal db\n\tdbfile = open( filename[0:-4] + '.dbf', 'rb' )\n\tdb = list( dbfUtils.dbfreader(dbfile) )\n\tdbfile.close()\n\t\n\tfp = open...
#!/usr/bin/env python # makepolys.py import codecs import json import math import os import random import re import shutil import stat import sys import time from geo import Geo import shpUtils import states #states = json.load( open('states.json') ) jsonpath = 'json' shapespath = 'shapefiles' geo = Geo() keysep = '|' states.byNumber = {} useOther = { 'CT': ( 'towns', 'cs09_d00' ), 'MA': ( 'towns', 'cs25_d00' ), 'NH': ( 'towns', 'cs33_d00' ), 'VT': ( 'towns', 'cs50_d00' ), 'KS': ( 'congressional', 'cd20_110' ), 'NE': ( 'congressional', 'cd31_110' ), 'NM': ( 'congressional', 'cd35_110' ), } districtNames = { 'CD1': 'First Congressional District', 'CD2': 'Second Congressional District', 'CD3': 'Third Congressional District', 'CD4': 'Fourth Congressional District', } def loadshapefile( filename ): print 'Loading shapefile %s' % filename t1 = time.time() shapefile = shpUtils.loadShapefile( '%s/%s' %( shapespath, filename ) ) t2 = time.time() print '%0.3f seconds load time' %( t2 - t1 ) return shapefile #def randomColor(): # def hh(): return '%02X' %( random.random() *128 + 96 ) # return hh() + hh() + hh() featuresByName = {} def featureByName( feature ): info = feature['info'] name = info['NAME'] if name not in featuresByName: featuresByName[name] = { 'feature': feature #, #'color': randomColor() } return featuresByName[name] #def filterCONUS( features ): # result = [] # for feature in features: # shape = feature['shape'] # if shape['type'] != 5: continue # info = feature['info'] # state = int(info['STATE']) # if state == 2: continue # Alaska # if state == 15: continue # Hawaii # if state == 72: continue # Puerto Rico # result.append( feature ) # return result def featuresBounds( features ): bounds = [ [ None, None ], [ None, None ] ] for feature in features: shape = feature['shape'] if shape['type'] == 5: for part in shape['parts']: bounds = geo.extendBounds( bounds, part['bounds'] ) return bounds def writeFile( filename, data ): f = open( filename, 'wb' ) f.write( data ) f.close() def readShapefile( filename ): print '----------------------------------------' print 'Loading %s' % filename shapefile = loadshapefile( filename ) features = shapefile['features'] print '%d features' % len(features) #conus = filterCONUS( features ) #conusBounds = featuresBounds( conus ) #stateFeatures = filterCONUS( stateFeatures ) #print '%d features in CONUS states' % len(stateFeatures) #writeFile( 'features.csv', shpUtils.dumpFeatureInfo(features) ) nPoints = nPolys = 0 places = {} for feature in features: shape = feature['shape'] if shape['type'] != 5: continue info = feature['info'] name = info['NAME'].decode( 'cp850' ).encode( 'utf-8' ) name = re.sub( '^(\d+)\x00.*$', 'CD\\1', name ) # congressional district name = districtNames.get( name, name ) state = info['STATE'] key = name + keysep + state if key not in places: places[key] = { 'name': name, 'state': state, 'maxarea': 0.0, 'bounds': [ [ None, None ], [ None, None ] ], 'shapes': [] } place = places[key] shapes = place['shapes'] for part in shape['parts']: nPolys += 1 points = part['points'] n = len(points) - 1 nPoints += n pts = [] area = part['area'] if area == 0: continue bounds = part['bounds'] place['bounds'] = geo.extendBounds( place['bounds'], bounds ) centroid = part['centroid'] if area > place['maxarea']: place['centroid'] = centroid place['maxarea'] = area points = part['points'] for j in xrange(n): point = points[j] #pts.append( '[%.4f,%.4f]' %( float(point[0]), float(point[1]) ) ) pts.append( '{x:%.4f,y:%.4f}' %( float(point[0]), float(point[1]) ) ) #shapes.append( '{area:%.4f,bounds:[[%.4f,%.4f],[%.4f,%.4f]],centroid:[%.4f,%.4f],points:[%s]}' %( shapes.append( '{points:[%s]}' %( #area, #bounds[0][0], bounds[0][1], #bounds[1][0], bounds[1][1], #centroid[0], centroid[1], ','.join(pts) ) ) print '%d points in %d places' %( nPoints, len(places) ) return shapefile, places def writeUS( places, path ): json = [] keys = places.keys() keys.sort() for key in keys: abbr = states.byNumber[ places[key]['state'] ]['abbr'].lower() writeJSON( '%s.js' % abbr, getPlaceJSON( places, key, abbr, 'state' ) ) #def writeStates( places, path ): # p = {} # for k in places: # if places[k] != None: # p[k] = places[k] # places = p # keys = places.keys() # keys.sort() # for key in keys: # name, number = key.split(keysep) # state = states.byNumber[number] # state['json'].append( getPlaceJSON( places, key, state['abbr'].lower(), 'county' ) ) # for state in states.array: # writeJSON( path, state['abbr'].lower(), state['json'] ) def writeJSON( path, json ): file = '%s/%s' %( jsonpath, path ) print 'Writing %s' % file writeFile( file, 'GoogleElectionMap.shapeReady(%s)' %( json ) ) def getPlaceJSON( places, key, state, type ): place = places[key] if not place: return '' bounds = place['bounds'] centroid = place['centroid'] return '{name:"%s", type:"%s",state:"%s",bounds:[[%.4f,%.4f],[%.4f,%.4f]],centroid:[%.4f,%.4f],shapes:[%s]}' %( key.split(keysep)[0], type, state, bounds[0][0], bounds[0][1], bounds[1][0], bounds[1][1], centroid[0], centroid[1], ','.join(place['shapes']) ) def generateUS( detail, path='' ): shapefile, places = readShapefile( 'states/st99_d00_shp-%s/st99_d00.shp' % detail ) for key in places: name, number = key.split(keysep) state = states.byName[name] state['json'] = [] state['counties'] = [] state['number'] = number states.byNumber[number] = state writeUS( places, path ) #def generateStates( detail, path ): # shapefile, places = readShapefile( 'counties/co99_d00_shp-%s/co99_d00.shp' % detail ) # for key, place in places.iteritems(): # name, number = key.split(keysep) # state = states.byNumber[number] # abbr = state['abbr'] # if abbr not in useOther: # state['counties'].append( place ) # else: # places[key] = None # for abbr, file in useOther.iteritems(): # state = states.byAbbr[abbr] # number = state['number'] # othershapefile, otherplaces = readShapefile( # '%(base)s/%(name)s_shp-%(detail)s/%(name)s.shp' %{ # 'base': file[0], # 'name': file[1], # 'detail': detail # } ) # for key, place in otherplaces.iteritems(): # name, number = key.split(keysep) # state = states.byNumber[number] # state['counties'].append( place ) # places[key] = place # writeStates( places, path ) #generateUS( 0, 'full' ) #generateUS( 25, '25' ) generateUS( '00' ) #generateStates( 80, 'detailed' ) print 'Done!'
[ [ 1, 0, 0.0201, 0.004, 0, 0.66, 0, 220, 0, 1, 0, 0, 220, 0, 0 ], [ 1, 0, 0.0241, 0.004, 0, 0.66, 0.0323, 463, 0, 1, 0, 0, 463, 0, 0 ], [ 1, 0, 0.0281, 0.004, 0, 0.6...
[ "import codecs", "import json", "import math", "import os", "import random", "import re", "import shutil", "import stat", "import sys", "import time", "from geo import Geo", "import shpUtils", "import states", "jsonpath = 'json'", "shapespath = 'shapefiles'", "geo = Geo()", "keysep =...
#!/usr/bin/env python # dbfUtils.py # By Raymond Hettinger # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362715 import struct, datetime, decimal, itertools def dbfreader(f): """Returns an iterator over records in a Xbase DBF file. The first row returned contains the field names. The second row contains field specs: (type, size, decimal places). Subsequent rows contain the data records. If a record is marked as deleted, it is skipped. File should be opened for binary reads. """ # See DBF format spec at: # http://www.pgts.com.au/download/public/xbase.htm#DBF_STRUCT numrec, lenheader = struct.unpack('<xxxxLH22x', f.read(32)) numfields = (lenheader - 33) // 32 fields = [] for fieldno in xrange(numfields): name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32)) name = name.replace('\0', '') # eliminate NULs from string fields.append((name, typ, size, deci)) yield [field[0] for field in fields] yield [tuple(field[1:]) for field in fields] terminator = f.read(1) assert terminator == '\r' fields.insert(0, ('DeletionFlag', 'C', 1, 0)) fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in fields]) fmtsiz = struct.calcsize(fmt) for i in xrange(numrec): record = struct.unpack(fmt, f.read(fmtsiz)) if record[0] != ' ': continue # deleted record result = [] for (name, typ, size, deci), value in itertools.izip(fields, record): if name == 'DeletionFlag': continue if typ == "N": value = value.replace('\0', '').lstrip() if value == '': value = 0 elif deci: value = decimal.Decimal(value) else: value = int(value) elif typ == 'D': y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8]) value = datetime.date(y, m, d) elif typ == 'L': value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?' result.append(value) yield result def dbfwriter(f, fieldnames, fieldspecs, records): """ Return a string suitable for writing directly to a binary dbf file. File f should be open for writing in a binary mode. Fieldnames should be no longer than ten characters and not include \x00. Fieldspecs are in the form (type, size, deci) where type is one of: C for ascii character data M for ascii character memo data (real memo fields not supported) D for datetime objects N for ints or decimal objects L for logical values 'T', 'F', or '?' size is the field width deci is the number of decimal places in the provided decimal object Records can be an iterable over the records (sequences of field values). """ # header info ver = 3 now = datetime.datetime.now() yr, mon, day = now.year-1900, now.month, now.day numrec = len(records) numfields = len(fieldspecs) lenheader = numfields * 32 + 33 lenrecord = sum(field[1] for field in fieldspecs) + 1 hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader, lenrecord) f.write(hdr) # field specs for name, (typ, size, deci) in itertools.izip(fieldnames, fieldspecs): name = name.ljust(11, '\x00') fld = struct.pack('<11sc4xBB14x', name, typ, size, deci) f.write(fld) # terminator f.write('\r') # records for record in records: f.write(' ') # deletion flag for (typ, size, deci), value in itertools.izip(fieldspecs, record): if typ == "N": value = str(value).rjust(size, ' ') elif typ == 'D': value = value.strftime('%Y%m%d') elif typ == 'L': value = str(value)[0].upper() else: value = str(value)[:size].ljust(size, ' ') assert len(value) == size f.write(value) # End of file f.write('\x1A')
[ [ 1, 0, 0.0588, 0.0084, 0, 0.66, 0, 399, 0, 4, 0, 0, 399, 0, 0 ], [ 2, 0, 0.2983, 0.4538, 0, 0.66, 0.5, 887, 0, 1, 0, 0, 0, 0, 25 ], [ 8, 1, 0.1218, 0.084, 1, 0.01,...
[ "import struct, datetime, decimal, itertools", "def dbfreader(f):\n\t\"\"\"Returns an iterator over records in a Xbase DBF file.\n\n\tThe first row returned contains the field names.\n\tThe second row contains field specs: (type, size, decimal places).\n\tSubsequent rows contain the data records.\n\tIf a record i...
#!/usr/bin/env python # get-strings.py # By Michael Geary - http://mg.to/ # See UNLICENSE or http://unlicense.org/ for public domain notice. # Reads the JSON feed for a Google Docs spreadsheet containing the # localized strings for the Google Election Center gadget, then writes # the strings for each language into a JSONP file for that language. # The JSONP output has line breaks and alphabetized keys for better # version control, e.g.: # # loadStrings({ # "areYouRegistered": "Are you registered to vote?", # "dateFormat": "{{monthName}} {{dayOfMonth}}", # "yourHome": "Your Home", # "yourVotingLocation": "Your Voting Location" # }) import json, re, urllib2 url = 'https://spreadsheets.google.com/feeds/list/0AuiC0EUz_p_xdHE3R2U5cTE0aFdHcWpTVVhPQVlzUmc/1/public/values?alt=json' langs = {} feed = json.load( urllib2.urlopen(url) )['feed'] for entry in feed['entry']: id = entry['gsx$id']['$t'] for col in entry: match = re.match( 'gsx\$text-(\w+)$', col ) if match: lang = match.group( 1 ) if lang not in langs: langs[lang] = {} langs[lang][id] = entry[col]['$t'] for lang in langs: j = json.dumps( langs[lang], indent=0, sort_keys=True ) file = 'lang-%s.js' % lang print 'Writing ' + file open( file, 'wb' ).write( 'loadStrings(%s)' % j )
[ [ 1, 0, 0.4878, 0.0244, 0, 0.66, 0, 463, 0, 3, 0, 0, 463, 0, 0 ], [ 14, 0, 0.5366, 0.0244, 0, 0.66, 0.2, 789, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.5854, 0.0244, 0, 0.6...
[ "import json, re, urllib2", "url = 'https://spreadsheets.google.com/feeds/list/0AuiC0EUz_p_xdHE3R2U5cTE0aFdHcWpTVVhPQVlzUmc/1/public/values?alt=json'", "langs = {}", "feed = json.load( urllib2.urlopen(url) )['feed']", "for entry in feed['entry']:\n\tid = entry['gsx$id']['$t']\n\tfor col in entry:\n\t\tmatch...
#!/usr/bin/env python # coding: utf-8 # make-hi.py - special HI processing for 2010 # Copyright (c) 2010 Michael Geary - http://mg.to/ # Use under either the MIT or GPL license # http://www.opensource.org/licenses/mit-license.php # http://www.opensource.org/licenses/gpl-2.0.php import re def convert( input, output ): print 'Converting %s to %s' %( input, output ) input = open( input, 'r' ) output = open( output, 'wb' ) output.write( '{\n' ) for line in input: line = line.rstrip('\n').split('\t') if len(line) > 12: precinct = line[10] url = line[12] pdfnum = re.findall( '/(\d+)EN\.pdf$', url ) if len(pdfnum): output.write( '"%s":"%s",\n' %( precinct, pdfnum[0] ) ) output.write( '}\n' ) output.close() input.close() def main(): convert( 'hi-ballot-urls.tsv', 'hi-ballot-urls.json' ) print 'Done!' if __name__ == "__main__": main()
[ [ 1, 0, 0.2941, 0.0294, 0, 0.66, 0, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 2, 0, 0.5735, 0.4706, 0, 0.66, 0.3333, 438, 0, 2, 0, 0, 0, 0, 13 ], [ 8, 1, 0.3824, 0.0294, 1, 0...
[ "import re", "def convert( input, output ):\n\tprint('Converting %s to %s' %( input, output ))\n\tinput = open( input, 'r' )\n\toutput = open( output, 'wb' )\n\toutput.write( '{\\n' )\n\tfor line in input:\n\t\tline = line.rstrip('\\n').split('\\t')\n\t\tif len(line) > 12:", "\tprint('Converting %s to %s' %( in...
#!/usr/bin/env python import codecs import re import jinja2 import markdown def process_slides(): with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile: md = codecs.open('slides.md', encoding='utf8').read() md_slides = md.split('\n---\n') print 'Compiled %s slides.' % len(md_slides) slides = [] # Process each slide separately. for md_slide in md_slides: slide = {} sections = md_slide.split('\n\n') # Extract metadata at the beginning of the slide (look for key: value) # pairs. metadata_section = sections[0] metadata = parse_metadata(metadata_section) slide.update(metadata) remainder_index = metadata and 1 or 0 # Get the content from the rest of the slide. content_section = '\n\n'.join(sections[remainder_index:]) html = markdown.markdown(content_section) slide['content'] = postprocess_html(html, metadata) slides.append(slide) template = jinja2.Template(open('base.html').read()) outfile.write(template.render(locals())) def parse_metadata(section): """Given the first part of a slide, returns metadata associated with it.""" metadata = {} metadata_lines = section.split('\n') for line in metadata_lines: colon_index = line.find(':') if colon_index != -1: key = line[:colon_index].strip() val = line[colon_index + 1:].strip() metadata[key] = val return metadata def postprocess_html(html, metadata): """Returns processed HTML to fit into the slide template format.""" if metadata.get('build_lists') and metadata['build_lists'] == 'true': html = html.replace('<ul>', '<ul class="build">') html = html.replace('<ol>', '<ol class="build">') return html if __name__ == '__main__': process_slides()
[ [ 1, 0, 0.0526, 0.0175, 0, 0.66, 0, 220, 0, 1, 0, 0, 220, 0, 0 ], [ 1, 0, 0.0702, 0.0175, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0877, 0.0175, 0, ...
[ "import codecs", "import re", "import jinja2", "import markdown", "def process_slides():\n with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile:\n md = codecs.open('slides.md', encoding='utf8').read()\n md_slides = md.split('\\n---\\n')\n print('Compiled %s slides.' ...
#!/usr/bin/env python ## Copyright (c) 2012 The WebM project authors. All Rights Reserved. ## ## Use of this source code is governed by a BSD-style license ## that can be found in the LICENSE file in the root of the source ## tree. An additional intellectual property rights grant can be found ## in the file PATENTS. All contributing project authors may ## be found in the AUTHORS file in the root of the source tree. ## """Classes for representing diff pieces.""" __author__ = "jkoleszar@google.com" import re class DiffLines(object): """A container for one half of a diff.""" def __init__(self, filename, offset, length): self.filename = filename self.offset = offset self.length = length self.lines = [] self.delta_line_nums = [] def Append(self, line): l = len(self.lines) if line[0] != " ": self.delta_line_nums.append(self.offset + l) self.lines.append(line[1:]) assert l+1 <= self.length def Complete(self): return len(self.lines) == self.length def __contains__(self, item): return item >= self.offset and item <= self.offset + self.length - 1 class DiffHunk(object): """A container for one diff hunk, consisting of two DiffLines.""" def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b): self.header = header self.left = DiffLines(file_a, start_a, len_a) self.right = DiffLines(file_b, start_b, len_b) self.lines = [] def Append(self, line): """Adds a line to the DiffHunk and its DiffLines children.""" if line[0] == "-": self.left.Append(line) elif line[0] == "+": self.right.Append(line) elif line[0] == " ": self.left.Append(line) self.right.Append(line) else: assert False, ("Unrecognized character at start of diff line " "%r" % line[0]) self.lines.append(line) def Complete(self): return self.left.Complete() and self.right.Complete() def __repr__(self): return "DiffHunk(%s, %s, len %d)" % ( self.left.filename, self.right.filename, max(self.left.length, self.right.length)) def ParseDiffHunks(stream): """Walk a file-like object, yielding DiffHunks as they're parsed.""" file_regex = re.compile(r"(\+\+\+|---) (\S+)") range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?") hunk = None while True: line = stream.readline() if not line: break if hunk is None: # Parse file names diff_file = file_regex.match(line) if diff_file: if line.startswith("---"): a_line = line a = diff_file.group(2) continue if line.startswith("+++"): b_line = line b = diff_file.group(2) continue # Parse offset/lengths diffrange = range_regex.match(line) if diffrange: if diffrange.group(2): start_a = int(diffrange.group(1)) len_a = int(diffrange.group(3)) else: start_a = 1 len_a = int(diffrange.group(1)) if diffrange.group(5): start_b = int(diffrange.group(4)) len_b = int(diffrange.group(6)) else: start_b = 1 len_b = int(diffrange.group(4)) header = [a_line, b_line, line] hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b) else: # Add the current line to the hunk hunk.Append(line) # See if the whole hunk has been parsed. If so, yield it and prepare # for the next hunk. if hunk.Complete(): yield hunk hunk = None # Partial hunks are a parse error assert hunk is None
[ [ 8, 0, 0.0787, 0.0079, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0945, 0.0079, 0, 0.66, 0.2, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1102, 0.0079, 0, 0.66, ...
[ "\"\"\"Classes for representing diff pieces.\"\"\"", "__author__ = \"jkoleszar@google.com\"", "import re", "class DiffLines(object):\n \"\"\"A container for one half of a diff.\"\"\"\n\n def __init__(self, filename, offset, length):\n self.filename = filename\n self.offset = offset\n ...
#!/usr/bin/env python ## Copyright (c) 2012 The WebM project authors. All Rights Reserved. ## ## Use of this source code is governed by a BSD-style license ## that can be found in the LICENSE file in the root of the source ## tree. An additional intellectual property rights grant can be found ## in the file PATENTS. All contributing project authors may ## be found in the AUTHORS file in the root of the source tree. ## """Wraps paragraphs of text, preserving manual formatting This is like fold(1), but has the special convention of not modifying lines that start with whitespace. This allows you to intersperse blocks with special formatting, like code blocks, with written prose. The prose will be wordwrapped, and the manual formatting will be preserved. * This won't handle the case of a bulleted (or ordered) list specially, so manual wrapping must be done. Occasionally it's useful to put something with explicit formatting that doesn't look at all like a block of text inline. indicator = has_leading_whitespace(line); if (indicator) preserve_formatting(line); The intent is that this docstring would make it through the transform and still be legible and presented as it is in the source. If additional cases are handled, update this doc to describe the effect. """ __author__ = "jkoleszar@google.com" import textwrap import sys def wrap(text): if text: return textwrap.fill(text, break_long_words=False) + '\n' return "" def main(fileobj): text = "" output = "" while True: line = fileobj.readline() if not line: break if line.lstrip() == line: text += line else: output += wrap(text) text="" output += line output += wrap(text) # Replace the file or write to stdout. if fileobj == sys.stdin: fileobj = sys.stdout else: fileobj.seek(0) fileobj.truncate(0) fileobj.write(output) if __name__ == "__main__": if len(sys.argv) > 1: main(open(sys.argv[1], "r+")) else: main(sys.stdin)
[ [ 8, 0, 0.2857, 0.3, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.4571, 0.0143, 0, 0.66, 0.1667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.4714, 0.0143, 0, 0.66, ...
[ "\"\"\"Wraps paragraphs of text, preserving manual formatting\n\nThis is like fold(1), but has the special convention of not modifying lines\nthat start with whitespace. This allows you to intersperse blocks with\nspecial formatting, like code blocks, with written prose. The prose will\nbe wordwrapped, and the manu...
#!/usr/bin/env python ## Copyright (c) 2012 The WebM project authors. All Rights Reserved. ## ## Use of this source code is governed by a BSD-style license ## that can be found in the LICENSE file in the root of the source ## tree. An additional intellectual property rights grant can be found ## in the file PATENTS. All contributing project authors may ## be found in the AUTHORS file in the root of the source tree. ## """Calculates the "intersection" of two unified diffs. Given two diffs, A and B, it finds all hunks in B that had non-context lines in A and prints them to stdout. This is useful to determine the hunks in B that are relevant to A. The resulting file can be applied with patch(1) on top of A. """ __author__ = "jkoleszar@google.com" import sys import diff def FormatDiffHunks(hunks): """Re-serialize a list of DiffHunks.""" r = [] last_header = None for hunk in hunks: this_header = hunk.header[0:2] if last_header != this_header: r.extend(hunk.header) last_header = this_header else: r.extend(hunk.header[2]) r.extend(hunk.lines) r.append("\n") return "".join(r) def ZipHunks(rhs_hunks, lhs_hunks): """Join two hunk lists on filename.""" for rhs_hunk in rhs_hunks: rhs_file = rhs_hunk.right.filename.split("/")[1:] for lhs_hunk in lhs_hunks: lhs_file = lhs_hunk.left.filename.split("/")[1:] if lhs_file != rhs_file: continue yield (rhs_hunk, lhs_hunk) def main(): old_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[1], "r"))] new_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[2], "r"))] out_hunks = [] # Join the right hand side of the older diff with the left hand side of the # newer diff. for old_hunk, new_hunk in ZipHunks(old_hunks, new_hunks): if new_hunk in out_hunks: continue old_lines = old_hunk.right new_lines = new_hunk.left # Determine if this hunk overlaps any non-context line from the other for i in old_lines.delta_line_nums: if i in new_lines: out_hunks.append(new_hunk) break if out_hunks: print FormatDiffHunks(out_hunks) sys.exit(1) if __name__ == "__main__": main()
[ [ 8, 0, 0.1645, 0.0789, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2237, 0.0132, 0, 0.66, 0.1429, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.25, 0.0132, 0, 0.66, ...
[ "\"\"\"Calculates the \"intersection\" of two unified diffs.\n\nGiven two diffs, A and B, it finds all hunks in B that had non-context lines\nin A and prints them to stdout. This is useful to determine the hunks in B that\nare relevant to A. The resulting file can be applied with patch(1) on top of A.\n\"\"\"", "...
""" * Copyright (c) 2012 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. """ #!/usr/bin/env python import sys,string,os,re,math,numpy scale = 2**16 def dist(p1,p2): x1,y1 = p1 x2,y2 = p2 if x1==x2 and y1==y2 : return 1.0 return 1/ math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)) def gettaps(p): def l(b): return int(math.floor(b)) def h(b): return int(math.ceil(b)) def t(b,p,s): return int((scale*dist(b,p)+s/2)/s) r,c = p ul=[l(r),l(c)] ur=[l(r),h(c)] ll=[h(r),l(c)] lr=[h(r),h(c)] sum = dist(ul,p)+dist(ur,p)+dist(ll,p)+dist(lr,p) t4 = scale - t(ul,p,sum) - t(ur,p,sum) - t(ll,p,sum); return [[ul,t(ul,p,sum)],[ur,t(ur,p,sum)], [ll,t(ll,p,sum)],[lr,t4]] def print_mb_taps(angle,blocksize): theta = angle / 57.2957795; affine = [[math.cos(theta),-math.sin(theta)], [math.sin(theta),math.cos(theta)]] radius = (float(blocksize)-1)/2 print " // angle of",angle,"degrees" for y in range(blocksize) : for x in range(blocksize) : r,c = numpy.dot(affine,[y-radius, x-radius]) tps = gettaps([r+radius,c+radius]) for t in tps : p,t = t tr,tc = p print " %2d, %2d, %5d, " % (tr,tc,t,), print " // %2d,%2d " % (y,x) i=float(sys.argv[1]) while i <= float(sys.argv[2]) : print_mb_taps(i,float(sys.argv[4])) i=i+float(sys.argv[3]) """ taps = [] pt=dict() ptr=dict() for y in range(16) : for x in range(16) : r,c = numpy.dot(affine,[y-7.5, x-7.5]) tps = gettaps([r+7.5,c+7.5]) j=0 for tp in tps : p,i = tp r,c = p pt[y,x,j]= [p,i] try: ptr[r,j,c].append([y,x]) except: ptr[r,j,c]=[[y,x]] j = j+1 for key in sorted(pt.keys()) : print key,pt[key] lr = -99 lj = -99 lc = 0 shuf="" mask="" for r,j,c in sorted(ptr.keys()) : for y,x in ptr[r,j,c] : if lr != r or lj != j : print "shuf_"+str(lr)+"_"+str(lj)+"_"+shuf.ljust(16,"0"), lc shuf="" lc = 0 for i in range(lc,c-1) : shuf = shuf +"0" shuf = shuf + hex(x)[2] lc =c break lr = r lj = j # print r,j,c,ptr[r,j,c] # print for r,j,c in sorted(ptr.keys()) : for y,x in ptr[r,j,c] : print r,j,c,y,x break """
[ [ 8, 0, 0.0472, 0.0849, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1038, 0.0094, 0, 0.66, 0.125, 509, 0, 6, 0, 0, 509, 0, 0 ], [ 14, 0, 0.1132, 0.0094, 0, 0.66...
[ "\"\"\"\n * Copyright (c) 2012 The WebM project authors. All Rights Reserved.\n *\n * Use of this source code is governed by a BSD-style license\n * that can be found in the LICENSE file in the root of the source\n * tree. An additional intellectual property rights grant can be found\n * in the file PATENTS. ...
import csv, os, glob import sys import numpy class affycel: def _int_(self, filename, version, header, intensityCells, intensity, maskscells, masks, outlierCells, outliers, modifiedCells, modified): self.filename = filename self.version = version self.header = {} self.intensityCells = intensityCells self.intensity = intensity self.masksCells = maskscells self.masks = masks self.outliersCells = outlierCells self.outliers = outliers self.modifiedCells = modifiedCells self.modified = modified self.custom = {} # plan to allow a custom section to be added to the CEL file def read_cel(self, filename): reader = csv.reader(open(filename, "U"),delimiter='\t') self.filename = os.path.split(filename)[1] def read_selector(areader): for row in areader: if row: if any(("[CEL]" in row, "[HEADER]" in row, "[INTENSITY]" in row, "[MASKS]" in row, "[OUTLIERS]" in row, "[MODIFIED]" in row)): rsel[row[0]](row, areader) else: print '*****something went wrong*******' def Rcel(row, areader): if '[CEL]' in row: #row passed in should contain '[CEL]' for row in areader: #Skips '[CEL]' row that was passed in if row: # skips blank rows #print 'cell', row if not any(("[HEADER]" in row, "[INTENSITY]" in row, "[MASKS]" in row, "[OUTLIERS]" in row, "[MODIFIED]" in row)): self.version = int(row[0].partition('=')[2]) #print self.version #self.version = row else: rsel[row[0]](row, areader) # Go to correct section def Rheader(row, areader): if '[HEADER]' in row: #row passed in should contain '[HEADER]' self.header = {} #self.header is a dictionary for row in reader: # skips the section heading row if row: #skips blank rows if not any(("[CEL]" in row, "[INTENSITY]" in row, "[MASKS]" in row, "[OUTLIERS]" in row, "[MODIFIED]" in row)): self.header[str(row[0].partition('=')[0])] = str(row[0].partition('=')[2]) else: rsel[row[0]](row, areader) # Go to correct section def Rintensity(row, areader): #print 'start intencity', row data = [] if "[INTENSITY]" in row: #row passed in should contain '[INTENSITY]' row = areader.next() # moves to the row after "[INTENSITY]" self.intensityCells = int(row[0].partition('=')[2]) #gets the number of intensities areader.next() #skips the colmn headings for row in reader: if row: if not any(("[CEL]" in row, "[HEADER]" in row, "[MASKS]" in row, "[OUTLIERS]" in row, "[MODIFIED]" in row)): data.append(tuple(row)) else: self.intensity = numpy.array(data, [('x',numpy.int),('y',numpy.int),('mean',numpy.float64),('stdv',numpy.float64),('npixcels',numpy.int)]) rsel[row[0]](row, areader) def Rmasks(row, areader): data = [] maskstype = [('x', int), ('y', int)] if "[MASKS]" in row: row = areader.next() # moves to the row after "[INTENSITY]" self.masksCells = int(row[0].partition('=')[2]) #gets the number of intensities areader.next() #skips the colmn headings for row in reader: if row: if not any(("[CEL]" in row, "[HEADER]" in row, "[INTESITY]" in row, "[OUTLIERS]" in row, "[MODIFIED]" in row)): data.append(tuple(row)) else: self.masks = numpy.array(data, [('x',numpy.int),('y',numpy.int)]) rsel[row[0]](row, areader) def Routliers(row, areader): data = [] if "[OUTLIERS]" in row: row = areader.next() # moves to the row after "[INTENSITY]" self.outliersCells = int(row[0].partition('=')[2]) #gets the number of intensities areader.next() #skips the colmn headings for row in reader: if row: if not any(("[CEL]" in row, "[HEADER]" in row, "[INTESITY]" in row, "[MASKS]" in row, "[MODIFIED]" in row)): data.append(tuple(row)) else: self.outliers = numpy.array(data, [('x', numpy.int), ('y', numpy.int)]) rsel[row[0]](row, areader) def Rmodified(row, areader): data = [] if "[MODIFIED]" in row: row = areader.next() # moves to the row after "[INTENSITY]" self.modifiedCells = int(row[0].partition('=')[2]) #gets the number of intensities areader.next() #skips the colmn headings for row in reader: if row: if not any(("[CEL]" in row, "[HEADER]" in row, "[INTESITY]" in row, "[MASKS]" in row, "[OUTLIERS]" in row)): print 'modified1' data.append(tuple(row)) #else, there is no else statment when there are now more rows continue on to convert data to array self.modified = numpy.array(data, [('x', numpy.int), ('y', numpy.int), ('origmean', numpy.float64)] ) #rsel[row[0]](row, areader) This should be the last item in the file rsel = {} rsel['[CEL]'] = Rcel rsel['[HEADER]']= Rheader rsel['[INTENSITY]']= Rintensity rsel['[MASKS]']= Rmasks rsel['[OUTLIERS]']= Routliers rsel['[MODIFIED]']= Rmodified read_selector(reader) def simple_normilize(self): """empty""" def cmean(self): return numpy.mean(self.intensity['mean']) def csum(self): return numpy.sum(self.intensity['mean']) def csumDobs(self): return self.csum()/len(self.intensity['mean']) if __name__ == "__main__": a = affycel() a.read_cel('example.CEL') print a.cmean() print a.csum() print a.csumDobs() testlist = (a.filename, a.version, a.header.items(), a.intensityCells, a.intensity[:5], a.masksCells, a.masks, a.outliersCells, a.outliers[:5], a.modifiedCells, a.modified[:5]) for test in testlist: print 'Test', test
[ [ 1, 0, 0.014, 0.007, 0, 0.66, 0, 312, 0, 3, 0, 0, 312, 0, 0 ], [ 1, 0, 0.021, 0.007, 0, 0.66, 0.25, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.028, 0.007, 0, 0.66, ...
[ "import csv, os, glob", "import sys", "import numpy", "class affycel:\n\n def _int_(self, filename, version, header, intensityCells, intensity, maskscells, masks, outlierCells, outliers, modifiedCells, modified):\n self.filename = filename\n self.version = version\n self.header = {}\n ...
import web import json from base import Base class XTask(Base): def GET(self): results = self.db.query("SELECT * FROM Pet") return self.query_serializer(results)
[ [ 1, 0, 0.1, 0.1, 0, 0.66, 0, 183, 0, 1, 0, 0, 183, 0, 0 ], [ 1, 0, 0.2, 0.1, 0, 0.66, 0.3333, 463, 0, 1, 0, 0, 463, 0, 0 ], [ 1, 0, 0.4, 0.1, 0, 0.66, 0.6667, ...
[ "import web", "import json", "from base import Base", "class XTask(Base):\n\t\n\tdef GET(self):\n\t\tresults = self.db.query(\"SELECT * FROM Pet\")\n\t\treturn self.query_serializer(results)", "\tdef GET(self):\n\t\tresults = self.db.query(\"SELECT * FROM Pet\")\n\t\treturn self.query_serializer(results)", ...
import web import json from datetime import datetime #thanks: http://stackoverflow.com/questions/11875770/how-to-overcome-datetime-datetime-not-json-serializable-in-python class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return obj.isoformat() return json.JSONEncoder.default(self, obj) class Base: """A base class that contains a static mysql database connector that connects to our database in the CoC""" db = web.database( dbn="mysql", host="academic-mysql.cc.gatech.edu", db="cs4400_Team_11", user="cs4400_Team_11", pw="w01goZoX" ) def query_serializer(self, query): query = [dict(result) for result in query] query = DateTimeEncoder().encode(query) return query
[ [ 1, 0, 0.037, 0.037, 0, 0.66, 0, 183, 0, 1, 0, 0, 183, 0, 0 ], [ 1, 0, 0.0741, 0.037, 0, 0.66, 0.25, 463, 0, 1, 0, 0, 463, 0, 0 ], [ 1, 0, 0.1111, 0.037, 0, 0.66, ...
[ "import web", "import json", "from datetime import datetime", "class DateTimeEncoder(json.JSONEncoder):\n\tdef default(self, obj):\n\t\tif isinstance(obj, datetime):\n\t\t\treturn obj.isoformat()\n\t\treturn json.JSONEncoder.default(self, obj)", "\tdef default(self, obj):\n\t\tif isinstance(obj, datetime):\...
import web import time from base import Base class AddPet(Base): def POST(self): input = web.input() input["age"] = int(input["age"]) print(input) input = dict(input) results = self.db.query("INSERT into Pet(Shelter_Name, Age, Gender, Pet_Name) values ($shelter_name, $age, $gender, $pet_name)", vars=input ) return str(results)
[ [ 1, 0, 0.0588, 0.0588, 0, 0.66, 0, 183, 0, 1, 0, 0, 183, 0, 0 ], [ 1, 0, 0.1176, 0.0588, 0, 0.66, 0.3333, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 1, 0, 0.1765, 0.0588, 0, ...
[ "import web", "import time", "from base import Base", "class AddPet(Base):\n\t\n\tdef POST(self):\n\t\tinput = web.input()\n\t\tinput[\"age\"] = int(input[\"age\"])\n\t\tprint(input)\n\t\tinput = dict(input)\n\t\tresults = self.db.query(\"INSERT into Pet(Shelter_Name, Age, Gender, Pet_Name) values ($shelter_n...
import web from addpet import AddPet from xtask import XTask urls = ( '/addpet', 'AddPet', '/xtask', 'XTask' #sample task ) app = web.application(urls, globals()) if __name__ == "__main__": app.run()
[ [ 1, 0, 0.0769, 0.0769, 0, 0.66, 0, 183, 0, 1, 0, 0, 183, 0, 0 ], [ 1, 0, 0.1538, 0.0769, 0, 0.66, 0.2, 163, 0, 1, 0, 0, 163, 0, 0 ], [ 1, 0, 0.2308, 0.0769, 0, 0.6...
[ "import web", "from addpet import AddPet", "from xtask import XTask", "urls = (\n '/addpet', 'AddPet',\n '/xtask', 'XTask' #sample task\n)", "app = web.application(urls, globals())", "if __name__ == \"__main__\":\n app.run()", " app.run()" ]
from sys import stdin a, b, c = map(int, stdin.readline().strip().split()) print "%.3lf" % ((a+b+c)/3.0)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 256, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "a, b, c = map(int, stdin.readline().strip().split())", "print(\"%.3lf\" % ((a+b+c)/3.0))" ]
from sys import stdin from math import * r, h = map(float, stdin.readline().strip().split()) print "Area = %.3lf" % (pi*r*r*2 + 2*pi*r*h)
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.3333, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.75, 0.25, 0, 0.66, 0....
[ "from sys import stdin", "from math import *", "r, h = map(float, stdin.readline().strip().split())", "print(\"Area = %.3lf\" % (pi*r*r*2 + 2*pi*r*h))" ]
from sys import stdin from math import * n, = map(int, stdin.readline().strip().split()) rad = radians(n) print "%.3lf %.3lf" % (sin(rad), cos(rad))
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4, 0.2, 0, 0.66, 0.25, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "from math import *", "n, = map(int, stdin.readline().strip().split())", "rad = radians(n)", "print(\"%.3lf %.3lf\" % (sin(rad), cos(rad)))" ]
from sys import stdin n, = map(int, stdin.readline().strip().split()) print n*(n+1)/2
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 773, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n, = map(int, stdin.readline().strip().split())", "print(n*(n+1)/2)" ]
from sys import stdin a, b = map(int, stdin.readline().strip().split()) print b, a
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.5, 0.25, 0, 0.66, 0.5, 127, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "from sys import stdin", "a, b = map(int, stdin.readline().strip().split())", "print(b, a)" ]
from sys import stdin n, m = map(int, stdin.readline().strip().split()) a = (4*n-m)/2 b = n-a if m % 2 == 1 or a < 0 or b < 0: print "No answer" else: print a, b
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.3333, 51, 3, 2, 0, 0, 53, 10, 4 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.6667,...
[ "from sys import stdin", "n, m = map(int, stdin.readline().strip().split())", "a = (4*n-m)/2", "b = n-a" ]
from sys import stdin n, = map(int, stdin.readline().strip().split()) money = n * 95 if money >= 300: money *= 0.85 print "%.2lf" % money
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.25, 773, 3, 2, 0, 0, 53, 10, 4 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "n, = map(int, stdin.readline().strip().split())", "money = n * 95", "if money >= 300: money *= 0.85", "print(\"%.2lf\" % money)" ]
from sys import stdin n, = map(int, stdin.readline().strip().split()) print ["yes", "no"][n % 2]
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 773, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n, = map(int, stdin.readline().strip().split())", "print [\"yes\", \"no\"][n % 2]" ]
from sys import stdin from math import * x1, y1, x2, y2 = map(float, stdin.readline().strip().split()) print "%.3lf" % hypot((x1-x2), (y1-y2))
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.3333, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.75, 0.25, 0, 0.66, 0....
[ "from sys import stdin", "from math import *", "x1, y1, x2, y2 = map(float, stdin.readline().strip().split())", "print(\"%.3lf\" % hypot((x1-x2), (y1-y2)))" ]
from sys import stdin n = stdin.readline().strip().split()[0] print '%c%c%c' % (n[2], n[1], n[0])
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.5, 0.25, 0, 0.66, 0.5, 773, 6, 0, 0, 0, 0, 0, 3 ], [ 8, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "from sys import stdin", "n = stdin.readline().strip().split()[0]", "print('%c%c%c' % (n[2], n[1], n[0]))" ]
from sys import stdin x, = map(float, stdin.readline().strip().split()) print abs(x)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 190, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "x, = map(float, stdin.readline().strip().split())", "print(abs(x))" ]
from sys import stdin from calendar import isleap year, = map(int, stdin.readline().strip().split()) if isleap(year): print "yes" else: print "no"
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 917, 0, 1, 0, 0, 917, 0, 0 ], [ 14, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "from calendar import isleap", "year, = map(int, stdin.readline().strip().split())" ]
from sys import stdin f, = map(float, stdin.readline().strip().split()) print "%.3lf" % (5*(f-32)/9)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 899, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "f, = map(float, stdin.readline().strip().split())", "print(\"%.3lf\" % (5*(f-32)/9))" ]
from sys import stdin a, b, c = map(int, stdin.readline().strip().split()) if a*a + b*b == c*c or a*a + c*c == b*b or b*b + c*c == a*a: print "yes" elif a + b <= c or a + c <= b or b + c <= a: print "not a triangle" else: print "no"
[ [ 1, 0, 1, 1, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ] ]
[ "from sys import stdin" ]
from sys import stdin a = map(int, stdin.readline().strip().split()) a.sort() print a[0], a[1], a[2]
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.3333, 475, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 0.6, 0.2, 0, 0.66, 0.6667,...
[ "from sys import stdin", "a = map(int, stdin.readline().strip().split())", "a.sort()", "print(a[0], a[1], a[2])" ]
s = i = 0 while True: term = 1.0 / (i*2+1) s += term * ((-1)**i) if term < 1e-6: break i += 1 print "%.6lf" % s
[ [ 14, 0, 0.1429, 0.1429, 0, 0.66, 0, 553, 1, 0, 0, 0, 0, 1, 0 ], [ 5, 0, 0.5714, 0.7143, 0, 0.66, 0.5, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 1, 0.4286, 0.1429, 1, 0.67, ...
[ "s = i = 0", "while True:\n term = 1.0 / (i*2+1)\n s += term * ((-1)**i)\n if term < 1e-6: break\n i += 1", " term = 1.0 / (i*2+1)", " if term < 1e-6: break", "print(\"%.6lf\" % s)" ]
from sys import stdin from decimal import * a, b, c = map(int, stdin.readline().strip().split()) getcontext().prec = c print Decimal(a) / Decimal(b)
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4, 0.2, 0, 0.66, 0.25, 349, 0, 1, 0, 0, 349, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "from decimal import *", "a, b, c = map(int, stdin.readline().strip().split())", "getcontext().prec = c", "print(Decimal(a) / Decimal(b))" ]
from sys import stdin n = int(stdin.readline().strip()) print "%.3lf" % sum([1.0/x for x in range(1,n+1)])
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 773, 3, 1, 0, 0, 901, 10, 3 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n = int(stdin.readline().strip())", "print(\"%.3lf\" % sum([1.0/x for x in range(1,n+1)]))" ]
from sys import stdin print len(stdin.readline().strip())
[ [ 1, 0, 0.5, 0.5, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 8, 0, 1, 0.5, 0, 0.66, 1, 535, 3, 1, 0, 0, 0, 0, 4 ] ]
[ "from sys import stdin", "print(len(stdin.readline().strip()))" ]
from itertools import product from math import * def issqrt(n): s = int(floor(sqrt(n))) return s*s == n aabb = [a*1100+b*11 for a,b in product(range(1,10),range(10))] print ' '.join(map(str, filter(issqrt, aabb)))
[ [ 1, 0, 0.1111, 0.1111, 0, 0.66, 0, 808, 0, 1, 0, 0, 808, 0, 0 ], [ 1, 0, 0.2222, 0.1111, 0, 0.66, 0.25, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 2, 0, 0.5556, 0.3333, 0, 0....
[ "from itertools import product", "from math import *", "def issqrt(n):\n s = int(floor(sqrt(n)))\n return s*s == n", " s = int(floor(sqrt(n)))", " return s*s == n", "aabb = [a*1100+b*11 for a,b in product(range(1,10),range(10))]", "print(' '.join(map(str, filter(issqrt, aabb))))" ]
from sys import stdin a = map(int, stdin.readline().strip().split()) print "%d %d %.3lf" % (min(a), max(a), float(sum(a)) / len(a))
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 475, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "a = map(int, stdin.readline().strip().split())", "print(\"%d %d %.3lf\" % (min(a), max(a), float(sum(a)) / len(a)))" ]
from sys import stdin def cycle(n): if n == 1: return 0 elif n % 2 == 1: return cycle(n*3+1) + 1 else: return cycle(n/2) + 1 n = int(stdin.readline().strip()) print cycle(n)
[ [ 1, 0, 0.1111, 0.1111, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 2, 0, 0.5, 0.4444, 0, 0.66, 0.3333, 276, 0, 1, 1, 0, 0, 0, 2 ], [ 4, 1, 0.5556, 0.3333, 1, 0.52,...
[ "from sys import stdin", "def cycle(n):\n if n == 1: return 0\n elif n % 2 == 1: return cycle(n*3+1) + 1\n else: return cycle(n/2) + 1", " if n == 1: return 0\n elif n % 2 == 1: return cycle(n*3+1) + 1\n else: return cycle(n/2) + 1", " if n == 1: return 0", " elif n % 2 == 1: return cycle(n*3+1) + 1...
from sys import stdin n = int(stdin.readline().strip()) count = n*2-1 for i in range(n): print ' '*i + '#'*count count -= 2
[ [ 1, 0, 0.1667, 0.1667, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.3333, 0.1667, 0, 0.66, 0.3333, 773, 3, 1, 0, 0, 901, 10, 3 ], [ 14, 0, 0.5, 0.1667, 0, ...
[ "from sys import stdin", "n = int(stdin.readline().strip())", "count = n*2-1", "for i in range(n):\n print(' '*i + '#'*count)\n count -= 2", " print(' '*i + '#'*count)" ]
for abc in range(123, 329): big = str(abc) + str(abc*2) + str(abc*3) if(''.join(sorted(big)) == '123456789'): print abc, abc*2, abc*3
[ [ 6, 0, 0.75, 1, 0, 0.66, 0, 38, 3, 0, 0, 0, 0, 0, 4 ], [ 14, 1, 1, 0.5, 1, 0.74, 0, 235, 4, 0, 0, 0, 0, 0, 3 ] ]
[ "for abc in range(123, 329):\n big = str(abc) + str(abc*2) + str(abc*3)", " big = str(abc) + str(abc*2) + str(abc*3)" ]
from sys import stdin n, m = map(int, stdin.readline().strip().split()) print "%.5lf" % sum([1.0/i/i for i in range(n,m+1)])
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 51, 3, 2, 0, 0, 53, 10, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from sys import stdin", "n, m = map(int, stdin.readline().strip().split())", "print(\"%.5lf\" % sum([1.0/i/i for i in range(n,m+1)]))" ]
from sys import stdin data = map(int, stdin.readline().strip().split()) n, m = data[0], data[-1] data = data[1:-1] print len(filter(lambda x: x < m, data))
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.4, 0.2, 0, 0.66, 0.25, 929, 3, 2, 0, 0, 53, 10, 4 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, ...
[ "from sys import stdin", "data = map(int, stdin.readline().strip().split())", "n, m = data[0], data[-1]", "data = data[1:-1]", "print(len(filter(lambda x: x < m, data)))" ]
from sys import stdin def solve(a, b, c): for i in range(10, 101): if i % 3 == a and i % 5 == b and i % 7 == c: print i return print 'No answer' a, b, c = map(int, stdin.readline().strip().split()) solve(a, b, c)
[ [ 1, 0, 0.0909, 0.0909, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 2, 0, 0.5, 0.5455, 0, 0.66, 0.3333, 599, 0, 3, 0, 0, 0, 0, 3 ], [ 6, 1, 0.5, 0.3636, 1, 0.7, ...
[ "from sys import stdin", "def solve(a, b, c):\n for i in range(10, 101):\n if i % 3 == a and i % 5 == b and i % 7 == c:\n print(i)\n return\n print('No answer')", " for i in range(10, 101):\n if i % 3 == a and i % 5 == b and i % 7 == c:\n print(i)\n return", " if i % 3 == a and...
from itertools import product sol = [a*100+b*10+c for a,b,c in product(range(1,10), range(10), range(10)) if a**3+b**3+c**3 == a*100+b*10+c] print '\n'.join(map(str, sol))
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 808, 0, 1, 0, 0, 808, 0, 0 ], [ 14, 0, 0.6667, 0.3333, 0, 0.66, 0.5, 723, 5, 0, 0, 0, 0, 0, 4 ], [ 8, 0, 1, 0.3333, 0, 0.66, ...
[ "from itertools import product", "sol = [a*100+b*10+c for a,b,c in product(range(1,10), range(10), range(10)) if a**3+b**3+c**3 == a*100+b*10+c]", "print('\\n'.join(map(str, sol)))" ]
from sys import stdin from math import * n = int(stdin.readline().strip()) print sum(map(factorial, range(1,n+1))) % (10**6)
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4, 0.2, 0, 0.66, 0.3333, 526, 0, 1, 0, 0, 526, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.6667,...
[ "from sys import stdin", "from math import *", "n = int(stdin.readline().strip())", "print(sum(map(factorial, range(1,n+1))) % (10**6))" ]
''' Module which brings history information about files from Mercurial. @author: Rodrigo Damazio ''' import re import subprocess REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*') def _GetOutputLines(args): ''' Runs an external process and returns its output as a list of lines. @param args: the arguments to run ''' process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines = True, shell = False) output = process.communicate()[0] return output.splitlines() def FillMercurialRevisions(filename, parsed_file): ''' Fills the revs attribute of all strings in the given parsed file with a list of revisions that touched the lines corresponding to that string. @param filename: the name of the file to get history for @param parsed_file: the parsed file to modify ''' # Take output of hg annotate to get revision of each line output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename]) # Create a map of line -> revision (key is list index, line 0 doesn't exist) line_revs = ['dummy'] for line in output_lines: rev_match = REVISION_REGEX.match(line) if not rev_match: raise 'Unexpected line of output from hg: %s' % line rev_hash = rev_match.group('hash') line_revs.append(rev_hash) for str in parsed_file.itervalues(): # Get the lines that correspond to each string start_line = str['startLine'] end_line = str['endLine'] # Get the revisions that touched those lines revs = [] for line_number in range(start_line, end_line + 1): revs.append(line_revs[line_number]) # Merge with any revisions that were already there # (for explict revision specification) if 'revs' in str: revs += str['revs'] # Assign the revisions to the string str['revs'] = frozenset(revs) def DoesRevisionSuperceed(filename, rev1, rev2): ''' Tells whether a revision superceeds another. This essentially means that the older revision is an ancestor of the newer one. This also returns True if the two revisions are the same. @param rev1: the revision that may be superceeding the other @param rev2: the revision that may be superceeded @return: True if rev1 superceeds rev2 or they're the same ''' if rev1 == rev2: return True # TODO: Add filename args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename] output_lines = _GetOutputLines(args) return rev2 in output_lines def NewestRevision(filename, rev1, rev2): ''' Returns which of two revisions is closest to the head of the repository. If none of them is the ancestor of the other, then we return either one. @param rev1: the first revision @param rev2: the second revision ''' if DoesRevisionSuperceed(filename, rev1, rev2): return rev1 return rev2
[ [ 8, 0, 0.0319, 0.0532, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0745, 0.0106, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0851, 0.0106, 0, 0.66...
[ "'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''", "import re", "import subprocess", "REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')", "def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines...
#!/usr/bin/python ''' Entry point for My Tracks i18n tool. @author: Rodrigo Damazio ''' import mytracks.files import mytracks.translate import mytracks.validate import sys def Usage(): print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0] print 'Commands are:' print ' cleanup' print ' translate' print ' validate' sys.exit(1) def Translate(languages): ''' Asks the user to interactively translate any missing or oudated strings from the files for the given languages. @param languages: the languages to translate ''' validator = mytracks.validate.Validator(languages) validator.Validate() missing = validator.missing_in_lang() outdated = validator.outdated_in_lang() for lang in languages: untranslated = missing[lang] + outdated[lang] if len(untranslated) == 0: continue translator = mytracks.translate.Translator(lang) translator.Translate(untranslated) def Validate(languages): ''' Computes and displays errors in the string files for the given languages. @param languages: the languages to compute for ''' validator = mytracks.validate.Validator(languages) validator.Validate() error_count = 0 if (validator.valid()): print 'All files OK' else: for lang, missing in validator.missing_in_master().iteritems(): print 'Missing in master, present in %s: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, missing in validator.missing_in_lang().iteritems(): print 'Missing in %s, present in master: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, outdated in validator.outdated_in_lang().iteritems(): print 'Outdated in %s: %s:' % (lang, str(outdated)) error_count = error_count + len(outdated) return error_count if __name__ == '__main__': argv = sys.argv argc = len(argv) if argc < 2: Usage() languages = mytracks.files.GetAllLanguageFiles() if argc == 3: langs = set(argv[2:]) if not langs.issubset(languages): raise 'Language(s) not found' # Filter just to the languages specified languages = dict((lang, lang_file) for lang, lang_file in languages.iteritems() if lang in langs or lang == 'en' ) cmd = argv[1] if cmd == 'translate': Translate(languages) elif cmd == 'validate': error_count = Validate(languages) else: Usage() error_count = 0 print '%d errors found.' % error_count
[ [ 8, 0, 0.0417, 0.0521, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0833, 0.0104, 0, 0.66, 0.125, 640, 0, 1, 0, 0, 640, 0, 0 ], [ 1, 0, 0.0938, 0.0104, 0, 0.66,...
[ "'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''", "import mytracks.files", "import mytracks.translate", "import mytracks.validate", "import sys", "def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p...
''' Module which prompts the user for translations and saves them. TODO: implement @author: Rodrigo Damazio ''' class Translator(object): ''' classdocs ''' def __init__(self, language): ''' Constructor ''' self._language = language def Translate(self, string_names): print string_names
[ [ 8, 0, 0.1905, 0.3333, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.7143, 0.619, 0, 0.66, 1, 229, 0, 2, 0, 0, 186, 0, 1 ], [ 8, 1, 0.5238, 0.1429, 1, 0.79, ...
[ "'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''", "class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor", " '''\n classdocs\n '''", " def __init__(self, language):\n '''...
''' Module which compares languague files to the master file and detects issues. @author: Rodrigo Damazio ''' import os from mytracks.parser import StringsParser import mytracks.history class Validator(object): def __init__(self, languages): ''' Builds a strings file validator. Params: @param languages: a dictionary mapping each language to its corresponding directory ''' self._langs = {} self._master = None self._language_paths = languages parser = StringsParser() for lang, lang_dir in languages.iteritems(): filename = os.path.join(lang_dir, 'strings.xml') parsed_file = parser.Parse(filename) mytracks.history.FillMercurialRevisions(filename, parsed_file) if lang == 'en': self._master = parsed_file else: self._langs[lang] = parsed_file self._Reset() def Validate(self): ''' Computes whether all the data in the files for the given languages is valid. ''' self._Reset() self._ValidateMissingKeys() self._ValidateOutdatedKeys() def valid(self): return (len(self._missing_in_master) == 0 and len(self._missing_in_lang) == 0 and len(self._outdated_in_lang) == 0) def missing_in_master(self): return self._missing_in_master def missing_in_lang(self): return self._missing_in_lang def outdated_in_lang(self): return self._outdated_in_lang def _Reset(self): # These are maps from language to string name list self._missing_in_master = {} self._missing_in_lang = {} self._outdated_in_lang = {} def _ValidateMissingKeys(self): ''' Computes whether there are missing keys on either side. ''' master_keys = frozenset(self._master.iterkeys()) for lang, file in self._langs.iteritems(): keys = frozenset(file.iterkeys()) missing_in_master = keys - master_keys missing_in_lang = master_keys - keys if len(missing_in_master) > 0: self._missing_in_master[lang] = missing_in_master if len(missing_in_lang) > 0: self._missing_in_lang[lang] = missing_in_lang def _ValidateOutdatedKeys(self): ''' Computers whether any of the language keys are outdated with relation to the master keys. ''' for lang, file in self._langs.iteritems(): outdated = [] for key, str in file.iteritems(): # Get all revisions that touched master and language files for this # string. master_str = self._master[key] master_revs = master_str['revs'] lang_revs = str['revs'] if not master_revs or not lang_revs: print 'WARNING: No revision for %s in %s' % (key, lang) continue master_file = os.path.join(self._language_paths['en'], 'strings.xml') lang_file = os.path.join(self._language_paths[lang], 'strings.xml') # Assume that the repository has a single head (TODO: check that), # and as such there is always one revision which superceeds all others. master_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2), master_revs) lang_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2), lang_revs) # If the master version is newer than the lang version if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev): outdated.append(key) if len(outdated) > 0: self._outdated_in_lang[lang] = outdated
[ [ 8, 0, 0.0304, 0.0522, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0783, 0.0087, 0, 0.66, ...
[ "'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''", "import os", "from mytracks.parser import StringsParser", "import mytracks.history", "class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida...
''' Module for dealing with resource files (but not their contents). @author: Rodrigo Damazio ''' import os.path from glob import glob import re MYTRACKS_RES_DIR = 'MyTracks/res' ANDROID_MASTER_VALUES = 'values' ANDROID_VALUES_MASK = 'values-*' def GetMyTracksDir(): ''' Returns the directory in which the MyTracks directory is located. ''' path = os.getcwd() while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)): if path == '/': raise 'Not in My Tracks project' # Go up one level path = os.path.split(path)[0] return path def GetAllLanguageFiles(): ''' Returns a mapping from all found languages to their respective directories. ''' mytracks_path = GetMyTracksDir() res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK) language_dirs = glob(res_dir) master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES) if len(language_dirs) == 0: raise 'No languages found!' if not os.path.isdir(master_dir): raise 'Couldn\'t find master file' language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs] language_tuples.append(('en', master_dir)) return dict(language_tuples)
[ [ 8, 0, 0.0667, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.125, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1556, 0.0222, 0, 0.66, ...
[ "'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''", "import os.path", "from glob import glob", "import re", "MYTRACKS_RES_DIR = 'MyTracks/res'", "ANDROID_MASTER_VALUES = 'values'", "ANDROID_VALUES_MASK = 'values-*'", "def GetMyTracksDir():\n '''\n...
''' Module which parses a string XML file. @author: Rodrigo Damazio ''' from xml.parsers.expat import ParserCreate import re #import xml.etree.ElementTree as ET class StringsParser(object): ''' Parser for string XML files. This object is not thread-safe and should be used for parsing a single file at a time, only. ''' def Parse(self, file): ''' Parses the given file and returns a dictionary mapping keys to an object with attributes for that key, such as the value, start/end line and explicit revisions. In addition to the standard XML format of the strings file, this parser supports an annotation inside comments, in one of these formats: <!-- KEEP_PARENT name="bla" --> <!-- KEEP_PARENT name="bla" rev="123456789012" --> Such an annotation indicates that we're explicitly inheriting form the master file (and the optional revision says that this decision is compatible with the master file up to that revision). @param file: the name of the file to parse ''' self._Reset() # Unfortunately expat is the only parser that will give us line numbers self._xml_parser = ParserCreate() self._xml_parser.StartElementHandler = self._StartElementHandler self._xml_parser.EndElementHandler = self._EndElementHandler self._xml_parser.CharacterDataHandler = self._CharacterDataHandler self._xml_parser.CommentHandler = self._CommentHandler file_obj = open(file) self._xml_parser.ParseFile(file_obj) file_obj.close() return self._all_strings def _Reset(self): self._currentString = None self._currentStringName = None self._currentStringValue = None self._all_strings = {} def _StartElementHandler(self, name, attrs): if name != 'string': return if 'name' not in attrs: return assert not self._currentString assert not self._currentStringName self._currentString = { 'startLine' : self._xml_parser.CurrentLineNumber, } if 'rev' in attrs: self._currentString['revs'] = [attrs['rev']] self._currentStringName = attrs['name'] self._currentStringValue = '' def _EndElementHandler(self, name): if name != 'string': return assert self._currentString assert self._currentStringName self._currentString['value'] = self._currentStringValue self._currentString['endLine'] = self._xml_parser.CurrentLineNumber self._all_strings[self._currentStringName] = self._currentString self._currentString = None self._currentStringName = None self._currentStringValue = None def _CharacterDataHandler(self, data): if not self._currentString: return self._currentStringValue += data _KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+' r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?' r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*', re.MULTILINE | re.DOTALL) def _CommentHandler(self, data): keep_parent_match = self._KEEP_PARENT_REGEX.match(data) if not keep_parent_match: return name = keep_parent_match.group('name') self._all_strings[name] = { 'keepParent' : True, 'startLine' : self._xml_parser.CurrentLineNumber, 'endLine' : self._xml_parser.CurrentLineNumber } rev = keep_parent_match.group('rev') if rev: self._all_strings[name]['revs'] = [rev]
[ [ 8, 0, 0.0261, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0609, 0.0087, 0, 0.66, 0.3333, 573, 0, 1, 0, 0, 573, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66...
[ "'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''", "from xml.parsers.expat import ParserCreate", "import re", "class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n...
import os, re, sys, urllib from time import sleep from shutil import move # Disable traceback sys.tracebacklimit = 0 # Global for finding images htmldl = '<a href="//images.4chan.org/' htmlboardfind = '[<a href="res/[0-9]+" class="replylink">Reply</a>]' # Dynamic Print Function def dynamic_print(msg): sys.stdout.write("\r"+msg) sys.stdout.flush() # Puts html content into a variable def geturl(url): f = urllib.urlopen(url) fread = f.read() return fread def countfiles(path): number_of_files = sum(1 for item in os.listdir(path) if os.path.isfile(os.path.join(path, item))) return number_of_files # Check url to know if it's a board, a thread or an invalid link def checkurl(argurl): rthread = re.findall('http://boards.4chan.org/[a-z]+/res/', argurl) rboard = re.findall('http://boards.4chan.org/[a-z]+/$', argurl) if rthread: return 'thread' elif rboard: return 'board' else: return 'error' # Returns thread number def threadnumber(argurl): thread = 'http://boards.4chan.org/[a-z]+/res/' rthread = argurl.split('/res/') return rthread[1] # Returns board letter. def boardletter(argurl): board = 'http://boards.4chan.org/[a-z]+' rboard = re.findall(board, argurl) return rboard[0].split('.org/')[1] # Resolves the path to put the images based on "board/thread_number" def path(threadnumber): board = boardletter(argurl) if os.path.isdir(board+'/'+threadnumber): return True elif os.path.isdir(board): os.path.join(board) os.mkdir(os.path.expanduser(board+'/'+threadnumber)) else: os.mkdir(board) os.mkdir(os.path.expanduser(board+'/'+threadnumber)) # Dump the thread. def dump_thread(url, boardletter, threadnumber): print url fread = geturl(url) x = 1 p = 1 if fread.count(htmldl) > 0: while x <= fread.count(htmldl): p = fread.find(htmldl, p) concatenate = '' filename = '' # Set concatenate and filename for i in range(p+11, len(htmldl)+p+30): if fread[i] == '"': break concatenate = concatenate + fread[i] if fread[i] == '/': filename = '' else: filename = filename + fread[i] # Print status msg = "[%i/%i] %s" % (x,fread.count(htmldl),str(filename)) if x == fread.count(htmldl): dynamic_print(msg) dynamic_print("") else: dynamic_print(msg) # Download and handle file/folders # If already downloaded, jump if os.path.isfile(boardletter+'/'+threadnumber+'/'+filename): jump = True # If incomplete, remove it, download and move elif os.path.isfile(filename): os.remove(filename) urllib.urlretrieve('http://'+concatenate, str(filename)) move(filename, boardletter+'/'+threadnumber) # Download and move else: urllib.urlretrieve('http://'+concatenate, str(filename)) move(filename, boardletter+'/'+threadnumber) p += 1 x += 1 else: return False def dump_board(argurl): page = 1 board = str(boardletter(argurl)) result = [] x = 0 print 'Dumping /'+board+'/' while page > 0: fread = geturl(argurl+'/'+str(page)) threads = re.findall(htmlboardfind, fread) for t in threads: url = 'http://boards.4chan.org/'+board+'/res/'+t.split('res/')[1].split('"')[0] result.append(url) x += 1 if len(threads) == 0: break page += 1 return result def update(): for dirname, dirnames, filenames in os.walk('.'): if len(dirname.split('\\')) > 2: dirurl = 'http://boards.4chan.org/'+dirname.split('\\')[1]+'/res/'+dirname.split('\\')[2] if countfiles(dirname.split('\\')[1]+'/'+dirname.split('\\')[2]) != geturl(dirurl).count(htmldl): print dirurl dump_thread(geturl(dirurl), boardletter(dirurl), threadnumber(dirurl)) else: print dirurl def doupdate(continuous=False, timer=60): if continuous == False: update() while continuous != False: update() print 'Waiting '+str(timer)+' seconds to refresh...' sleep(timer) ###### Program execution starts here ##### # Get url from argument try: arguments = sys.argv[1:] except IndexError: print "I've failed, master." # Threat the given url/command. if len(arguments) > 0: noarg = False for argurl in arguments: if checkurl(argurl) == 'thread': path(threadnumber(argurl)) dump_thread(argurl, boardletter(argurl), threadnumber(argurl)) elif checkurl(argurl) == 'board': for i in dump_board(argurl): path(threadnumber(i)) dump_thread(i, boardletter(i), threadnumber(i)) elif argurl == '--update': doupdate() elif argurl == '-u': doupdate(True, 120) else: print "This does not seem to be valid link or option." else: noarg = True if noarg == True: print '' print ' =================================================' print ' usage: %s url | Download all threads' % sys.argv[0] print ' usage: %s -u | Update' % sys.argv[0] print ' -------------------------------------------------' print ' Note:' print ' if you use a board for the url, it will get' print ' all the threads available at the moment.' print ' =================================================' sys.exit(1)
[ [ 1, 0, 0.0054, 0.0054, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0109, 0.0054, 0, 0.66, 0.0526, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 1, 0, 0.0163, 0.0054, 0, ...
[ "import os, re, sys, urllib", "from time import sleep", "from shutil import move", "sys.tracebacklimit = 0", "htmldl = '<a href=\"//images.4chan.org/'", "htmlboardfind = '[<a href=\"res/[0-9]+\" class=\"replylink\">Reply</a>]'", "def dynamic_print(msg):\t\n sys.stdout.write(\"\\r\"+msg)\n sys.stdo...