code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Utility code for managing design documents."""
from copy import deepcopy
from inspect import getsource
from itertools import groupby
from operator import attrgetter
from textwrap import dedent
from types import FunctionType
__all__ = ['ViewDefinition']
__docformat__ = 'restructuredtext en'
class ViewDefinition(object):
r"""Definition of a view stored in a specific design document.
An instance of this class can be used to access the results of the view,
as well as to keep the view definition in the design document up to date
with the definition in the application code.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
>>> view = ViewDefinition('tests', 'all', '''function(doc) {
... emit(doc._id, null);
... }''')
>>> view.get_doc(db)
The view is not yet stored in the database, in fact, design doc doesn't
even exist yet. That can be fixed using the `sync` method:
>>> view.sync(db)
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document '_design/tests'@'...' {...}>
>>> print design_doc['views']['all']['map']
function(doc) {
emit(doc._id, null);
}
If you use a Python view server, you can also use Python functions instead
of code embedded in strings:
>>> def my_map(doc):
... yield doc['somekey'], doc['somevalue']
>>> view = ViewDefinition('test2', 'somename', my_map, language='python')
>>> view.sync(db)
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document '_design/test2'@'...' {...}>
>>> print design_doc['views']['somename']['map']
def my_map(doc):
yield doc['somekey'], doc['somevalue']
Use the static `sync_many()` method to create or update a collection of
views in the database in an atomic and efficient manner, even across
different design documents.
>>> del server['python-tests']
"""
def __init__(self, design, name, map_fun, reduce_fun=None,
language='javascript', wrapper=None, options=None,
**defaults):
"""Initialize the view definition.
Note that the code in `map_fun` and `reduce_fun` is automatically
dedented, that is, any common leading whitespace is removed from each
line.
:param design: the name of the design document
:param name: the name of the view
:param map_fun: the map function code
:param reduce_fun: the reduce function code (optional)
:param language: the name of the language used
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: view specific options (e.g. {'collation':'raw'})
"""
if design.startswith('_design/'):
design = design[8:]
self.design = design
self.name = name
if isinstance(map_fun, FunctionType):
map_fun = _strip_decorators(getsource(map_fun).rstrip())
self.map_fun = dedent(map_fun.lstrip('\n'))
if isinstance(reduce_fun, FunctionType):
reduce_fun = _strip_decorators(getsource(reduce_fun).rstrip())
if reduce_fun:
reduce_fun = dedent(reduce_fun.lstrip('\n'))
self.reduce_fun = reduce_fun
self.language = language
self.wrapper = wrapper
self.options = options
self.defaults = defaults
def __call__(self, db, **options):
"""Execute the view in the given database.
:param db: the `Database` instance
:param options: optional query string parameters
:return: the view results
:rtype: `ViewResults`
"""
merged_options = self.defaults.copy()
merged_options.update(options)
return db.view('/'.join([self.design, self.name]),
wrapper=self.wrapper, **merged_options)
def __repr__(self):
return '<%s %r>' % (type(self).__name__, '/'.join([
'_design', self.design, '_view', self.name
]))
def get_doc(self, db):
"""Retrieve and return the design document corresponding to this view
definition from the given database.
:param db: the `Database` instance
:return: a `client.Document` instance, or `None` if the design document
does not exist in the database
:rtype: `Document`
"""
return db.get('_design/%s' % self.design)
def sync(self, db):
"""Ensure that the view stored in the database matches the view defined
by this instance.
:param db: the `Database` instance
"""
type(self).sync_many(db, [self])
@staticmethod
def sync_many(db, views, remove_missing=False, callback=None):
"""Ensure that the views stored in the database that correspond to a
given list of `ViewDefinition` instances match the code defined in
those instances.
This function might update more than one design document. This is done
using the CouchDB bulk update feature to ensure atomicity of the
operation.
:param db: the `Database` instance
:param views: a sequence of `ViewDefinition` instances
:param remove_missing: whether views found in a design document that
are not found in the list of `ViewDefinition`
instances should be removed
:param callback: a callback function that is invoked when a design
document gets updated; the callback gets passed the
design document as only parameter, before that doc
has actually been saved back to the database
"""
docs = []
for design, views in groupby(views, key=attrgetter('design')):
doc_id = '_design/%s' % design
doc = db.get(doc_id, {'_id': doc_id})
orig_doc = deepcopy(doc)
languages = set()
missing = list(doc.get('views', {}).keys())
for view in views:
funcs = {'map': view.map_fun}
if view.reduce_fun:
funcs['reduce'] = view.reduce_fun
if view.options:
funcs['options'] = view.options
doc.setdefault('views', {})[view.name] = funcs
languages.add(view.language)
if view.name in missing:
missing.remove(view.name)
if remove_missing and missing:
for name in missing:
del doc['views'][name]
elif missing and 'language' in doc:
languages.add(doc['language'])
if len(languages) > 1:
raise ValueError('Found different language views in one '
'design document (%r)', list(languages))
doc['language'] = list(languages)[0]
if doc != orig_doc:
if callback is not None:
callback(doc)
docs.append(doc)
db.update(docs)
def _strip_decorators(code):
retval = []
beginning = True
for line in code.splitlines():
if beginning and not line.isspace():
if line.lstrip().startswith('@'):
continue
beginning = False
retval.append(line)
return '\n'.join(retval)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/xml', 'application/x-freemind',
'application/x-sh']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if host == "localhost" or host.startswith("localhost:"):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
password = keyring.get_password(host, local_email)
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", filename], universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'", filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
# Todo(hayato): Windows users might use different path for configuration file.
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django import forms
from django.contrib.auth.models import User
class ContactForm(forms.Form):
name = forms.CharField(max_length=100,
help_text="Full Name", widget=forms.TextInput(attrs={'size':'40'}),required=False)
subject = forms.CharField(max_length=100,
help_text="Subject of your message", widget=forms.TextInput(attrs={'size':'40'}))
sender = forms.EmailField(
help_text="Your email address", widget=forms.TextInput(attrs={'size':'40'}),required=True)
message = forms.CharField(
help_text="Please enter as much text as you would like",
widget=forms.Textarea(attrs={'rows':'12','cols':'60'}))
cc_myself = forms.BooleanField(required=False,
help_text="Send yourself a copy of this message")
| Python |
from django.conf.urls.defaults import *
from views import *
urlpatterns = patterns('',
(r'^', contact),
(r'^thanks/$', static, {'template':'contact_thanks.html'}),
)
| Python |
#r!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from forms import *
#@cache_page(60*5)
def static(request, template):
return render_to_response(template, RequestContext(request,{}))
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
sender = form.cleaned_data['sender']
message = 'The following feedback was submitted from %s \n\n' % sender
message += form.cleaned_data['message']
cc_myself = form.cleaned_data['cc_myself']
recipients = settings.CONTACT_EMAILS
if cc_myself:
recipients.append(sender)
from django.core.mail import send_mail
send_mail(subject, message, sender, recipients)
return HttpResponseRedirect('/contact/thanks/') # Redirect after POST
else:
form = ContactForm() # An unbound form
return render_to_response('contact.html', {
'form': form,
})
| Python |
from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib import databrowse
from django.contrib import admin
admin.autodiscover()
#from shapeft.custom_admin import editor
#from registration.views import register
urlpatterns = patterns('',
(r'^_admin_/', include(admin.site.urls)),
# (r'^editor/(.*)', editor.root),
(r'^databrowse/(.*)', databrowse.site.root),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DATA}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
(r'^comments/', include('django.contrib.comments.urls')),
(r'^contact/', include('contact.urls')),
(r'^auth/', include('ft_auth.urls')),
(r'^', include('shapeft.urls')),
)
| Python |
#Please create a local_settings.py which should include, at least:
# - ADMINS
# - DEFAULT_FROM_EMAIL
# - DATABASES
# - SECRET_KEY
# - FT_DOMAIN_KEY
# - FT_DOMAIN_SECRET
# - EMAIL_HOST
# - EMAIL_HOST_USER
# - EMAIL_HOST_PASSWORD
# - EMAIL_PORT
# - EMAIL_USE_TLS
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
STATIC_DATA = os.path.join(os.path.dirname(__file__), 'static/')
SHP_UPLOAD_DIR = '/tmp/'
ADMINS = (
('Admin1', 'your_email_address'),
)
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL = 'your_email_addressm'
EMAIL_MANAGERS = False
CACHE_BACKEND = 'file:///tmp/shapeft_cache'
DATABASE_NAME = 'shapeft'
DATABASES = {
'default': {
'NAME': DATABASE_NAME,
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'USER': 'postgres',
'PASSWORD': 'foo'
}
}
TIME_ZONE = 'America/Vancouver'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media/')
MEDIA_URL = ''
ADMIN_MEDIA_PREFIX = '/admin_media/'
SECRET_KEY = 'store-in-local-settings'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.csrf.middleware.CsrfViewMiddleware',
'django.contrib.csrf.middleware.CsrfResponseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.databrowse',
'django.contrib.gis',
'django.contrib.humanize',
'django.contrib.webdesign',
'shapeft',
'shapes',
'contact',
'ft_auth',
)
FT_DOMAIN_KEY = 'shpescape.com'
FT_DOMAIN_SECRET = 'foo'
try:
from local_settings import *
except ImportError, exp:
pass
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.contrib.gis.geos import Point
class OAuthRequestToken(models.Model):
"""OAuth Request Token."""
session_key = models.CharField(max_length=250)
ft_token = models.CharField(max_length=250)
ft_token_secret = models.CharField(max_length=250)
created = models.DateTimeField(auto_now_add=True)
def is_complete(self):
if self.ft_token and self.md_token:
return True
class OAuthAccessToken(models.Model):
"""OAuth Access Token."""
session_key = models.CharField(max_length=250)
ft_token = models.CharField(max_length=250)
ft_token_secret = models.CharField(max_length=250)
created = models.DateTimeField(auto_now_add=True)
def is_complete(self):
if self.ft_token and self.md_token:
return True
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
from views import FTVerify, FTAuthenticate
urlpatterns = patterns('',
(r'^FTVerify/$', FTVerify),
(r'^FTAuthenticate/$', FTAuthenticate),
)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from datetime import datetime
from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.template import RequestContext
from django.conf import settings
from ftlibrary.authorization.oauth import OAuth
from ftlibrary.ftclient import OAuthFTClient
from ftlibrary.sql.sqlbuilder import SQL
import ftlibrary.oauth2 as oauth # httplib2 is required for this to work on AppEngine
from models import *
FT_OAUTH = {
'key': settings.FT_DOMAIN,
'secret': settings.FT_DOMAIN_SECRET,
'domain': settings.FT_DOMAIN
}
def get_token(request):
"""
see if we have an oauth token for them, based on their ip and session key
"""
try:
ft_session = request.session['ft_token']
token = OAuthAccessToken.objects.get(session_key=ft_session)
# invalidate any token > 24 hours old
now = datetime.now()
diff = now - token.created
if diff.days:
token.delete()
return False
# TODO check ip address matches
#oauthorize
return token
except KeyError:
print 'no session token..'
except OAuthAccessToken.DoesNotExist:
print 'no access token ...'
return False
def create_session_key(request):
ip = request.META['REMOTE_ADDR']
skey = ip + str(random.random())
return skey.replace('.','')
def FTVerify(request):
ft_session = create_session_key(request)
callback_url = 'http://' + request.META['HTTP_HOST'] + '/auth/FTAuthenticate'
url,token,secret = OAuth().generateAuthorizationURL(
consumer_key=FT_OAUTH['key'],
consumer_secret=FT_OAUTH['secret'],
domain=FT_OAUTH['domain'],
callback_url=callback_url)
#save the new token
request_token = OAuthRequestToken(
ft_token=token,
ft_token_secret=secret,
session_key=ft_session)
request_token.save()
#save session key
request.session['ft_token'] = ft_session
return HttpResponseRedirect(url)
def FTAuthenticate(request):
#get the old token and secret
try:
ft_session = request.session['ft_token']
except KeyError:
raise Exception('should not get here ... no session key')
HttpResponseRedirect('/FTVerify')
request_token = OAuthRequestToken.objects.filter(session_key=ft_session)
if not request_token:
raise Exception('should not get here ... no token key')
HttpResponseRedirect('/FTVerify')
token = request_token[0].ft_token
secret = request_token[0].ft_token_secret
#retrieve the access token and secret, these will be used in future requests
#so save them in the database for the user
access_token, access_secret = OAuth().authorize(
consumer_key=FT_OAUTH['key'],
consumer_secret=FT_OAUTH['secret'],
oauth_token=token,
oauth_token_secret=secret)
oauth_token = OAuthAccessToken(
ft_token=access_token,
ft_token_secret=access_secret,
session_key=ft_session)
oauth_token.save()
return HttpResponseRedirect('/upload')
| Python |
import random
import md5
from django.db import models
from ft_auth.models import OAuthAccessToken
STATUS_CODES = {
1 : 'In Queue (%s ahead of you)',
2 : 'Initial Processing',
3 : 'Importing into Fusion Tables',
4 : 'Complete',
6 : 'Error'
}
class shapeUpload(models.Model):
"""An upload -- includes location of initial shape, processing status, etc"""
auth_token = models.ForeignKey(OAuthAccessToken)
uid = models.CharField(max_length=250)
shapefile = models.CharField(max_length=250)
status = models.IntegerField()
status_msg = models.CharField(max_length=250,null=True)
total_rows = models.IntegerField(null=True)
rows_processed = models.IntegerField(null=True)
rows_imported = models.IntegerField(null=True)
ft_table_id = models.IntegerField(null=True)
uploaded = models.DateTimeField(auto_now_add=True)
create_simplify = models.BooleanField(default=True)
create_centroid = models.BooleanField(default=True)
create_centroid_poly = models.BooleanField(default=False)
def get_title(self):
return self.shapefile.split('/')[-1]
def get_status(self):
status = STATUS_CODES[self.status]
if self.status == 1:
queue_length = shapeUpload.objects.filter(status=1).count()
status = status % (queue_length - 1)
return status
def save(self):
salt = 'shapebar'
if not self.id:
super(shapeUpload, self).save()
hash = md5.new(salt + str(self.id))
self.uid = hash.hexdigest()
super(shapeUpload, self).save()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
from views import *
urlpatterns = patterns('',
(r'^upload/$', generic_import),
url(r'^uploads/(?P<upload_ids>[a-gA-F\d]*)/$', upload_detail, name="upload_detail"),
(r'^$', static, {'template':'index.html'}),
)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.contrib.gis.geos import fromstr, LineString
from django.contrib.gis.models import SpatialRefSys
from django.contrib.gis.gdal import DataSource, OGRGeometry
from django.utils.datastructures import SortedDict
import simplejson
from shapes.forms import UploadForm
from ft_auth.views import *
from shapeft.models import shapeUpload
#@cache_page(60*5)
def static(request, template):
if not template:
template = "index.html"
return render_to_response(template, RequestContext(request,{}))
def generic_import(request):
"""
accept an uploaded file and create associated shapeUpload obj
"""
token = get_token(request)
if not token:
return HttpResponseRedirect('/auth/FTVerify')
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
form.handle(request.FILES['file_obj'])
create_simplify = request.POST.get('create_simplify', False);
create_centroid = request.POST.get('create_centroid', False);
create_centroid_poly = request.POST.get('create_centroid_poly', False);
#save form info in a model, and run from cron
uids = []
for shapefile in form.shapefiles:
upload = shapeUpload()
upload.auth_token = token
upload.shapefile = shapefile
upload.status = 1
upload.save()
upload.create_simplify = bool(create_simplify)
upload.create_centroid = bool(create_centroid)
upload.create_centroid_poly = bool(create_centroid_poly)
uids.append(upload.uid)
url = '/uploads/%s/' % 'g'.join(uids)
return HttpResponseRedirect(url)
else:
form = UploadForm()
return render_to_response('upload.html', RequestContext(request,{
'form': form}))
def upload_detail(request, upload_ids):
"""
display status of one or more shapeUploads
"""
uids = upload_ids.split('g')
uploads = shapeUpload.objects.filter(uid__in=uids).order_by('id')
#upload = get_object_or_404(shapeUpload, id=upload_id)
return render_to_response('upload_detail.html', RequestContext(request,{
'uploads': uploads}))
def import_from_shape(upload,
start_row=0,
max_rows=200000,
create_int_style_cols=True):
"""
a shapeUpload object
max_rows - any more than this is ignored
centroid - if it's a (multi)polygon, should we also create a geometry_centroid field
"""
upload.status = 2 #set this right away so it doesn't get reprocessed
upload.save()
ds = DataSource(upload.shapefile)
layer = ds[0]
fields = layer.fields
num_features = len(layer)
#set max # of _style features
max_distinct_style_vals = max(min(num_features / 100, 50),10)
print 'there are %d features' % num_features
upload.total_rows = num_features
if not num_features:
print 'no rows, returning'
upload.status = 6
upload.save()
return
rows = []
#get field types
field_map = {
'OFTString':'STRING',
'OFTReal':'NUMBER',
'OFTInteger':'NUMBER',
'OFTDate':'DATETIME'
}
field_types = [field_map[f.__name__] for f in layer.field_types]
field_layers = layer.fields
#insert geometry layers first
field_layers.insert(0,'geometry')
field_types.insert(0,'LOCATION')
field_layers.insert(1,'geometry_vertex_count')
field_types.insert(1,'NUMBER')
if upload.create_simplify:
field_layers.insert(0,'geometry_simplified')
field_types.insert(0,'LOCATION')
field_layers.insert(1,'geometry_simplified_vertex_count')
field_types.insert(1,'NUMBER')
#use sorted dict so we can ensure table has geom columns upfront
field_dict = SortedDict(zip(field_layers, field_types))
#set up extra fields if creating int/style cols
if create_int_style_cols:
int_style_dict = {}
for field,field_type in field_dict.items():
if field_type == 'STRING':
field_dict[field + '_ft_style'] = 'NUMBER'
int_style_dict[field] = {}
print field_dict
#add some custom import fields
field_dict['import_notes'] = 'STRING'
print 'FIELD DICT', field_dict
print 'starting to process'
for i, feat in enumerate(layer):
if i > max_rows:
continue
if start_row and i < start_row:
continue
upload.rows_processed = i + 1
if not i % ((num_features / 50) or 5):
print upload.rows_processed,'rp'
upload.save()
upload.save()
rd = {}
#geom = fromstr(feat.geom.wkt,srid=srid)
if layer.srs:
try:
geom = OGRGeometry(feat.geom.wkt, layer.srs.proj4)
geom.transform(4326)
except Exception, e:
print 'FAIL GEOM'
print e,
geom = None
else:
geom = OGRGeometry(feat.geom.wkt)
if geom:
geom = fromstr(geom.wkt)
#create optional centroid for polys
if upload.create_centroid and 'oly' in geom.geom_type:
field_dict['geometry_pos'] = 'LOCATION'
rd['geometry_pos'] = geom.point_on_surface.kml
if upload.create_centroid_poly and 'oly' in geom.geom_type:
field_dict['geometry_pos_poly_2'] = 'LOCATION'
field_dict['geometry_pos_poly_3'] = 'LOCATION'
rd['geometry_pos_poly_2'] = geom.point_on_surface.buffer(.0001,10).kml
rd['geometry_pos_poly_3'] = geom.point_on_surface.buffer(.0005,10).kml
#if it's > 1M characters, we need to simplify it for FT
simplify_tolerance = .0001
while len(geom.kml) > 1000000:
geom = geom.simplify(simplify_tolerance)
print 'simplified to %f' % simplify_tolerance
rd['import_notes'] = 'simplified to %d DD' % simplify_tolerance
simplify_tolerance = simplify_tolerance * 1.5
if not geom.valid:
rd['import_notes'] = '<br>Geometry not valid'
kml = geom.kml
rd['geometry'] = kml
rd['geometry_vertex_count'] = geom.num_coords
if upload.create_simplify and not 'oint' in geom.geom_type:
amt = .002
if 'oly' in geom.geom_type:
buffer_geom = geom.buffer(amt)
buffer_geom = buffer_geom.buffer(amt * -1)
simple_geom = buffer_geom.simplify(amt)
else:
simple_geom = geom.simplify(amt)
rd['geometry_simplified'] = simple_geom.kml
rd['geometry_simplified_vertex_count'] = simple_geom.num_coords
for f in fields:
val = feat.get(f)
#make sure we have proper null type for diff fields
if val == '<Null>':
continue
if not val:
continue
if field_dict[f] == 'DATETIME':
val = val.isoformat().split('T')[0]
if field_dict[f] == 'STRING' \
and create_int_style_cols \
and field_dict.has_key(f + '_ft_style'):
#check to see if we have a number for this yet
try:
rd[f + '_ft_style'] = int_style_dict[f][val]
except:
int_style_dict[f][val] = len(int_style_dict[f])
rd[f + '_ft_style'] = int_style_dict[f][val]
#however if we have too many distinct vals, let's just not do this anymore
if len(int_style_dict[f]) > max_distinct_style_vals:
print 'DELETING FD %s' % f
del field_dict[f + '_ft_style']
del rd[f + '_ft_style']
#sucks, but now we should just remove all these fields from previous rows
for srow in rows:
try:del srow[f + '_ft_style']
except:
pass #probably this was a null value?
rd[f] = val
rows.append(rd)
#let's process 10k rows at a time.. not keep everything in memory
if len(rows) > 10000:
uploadRows(upload, field_dict, rows)
rows = []
uploadRows(upload, field_dict, rows)
def uploadRows(upload, field_dict, rows):
if not upload.ft_table_id:
upload = createTable(upload, field_dict)
upload.status = 3
upload.save()
print 'inserting %d rows' % len(rows)
insertData(upload, field_dict, rows)
upload.status = 4
upload.save()
def insertSql(client, sql, attempt_no=0):
try:resp = client.query(sql)
except:
print 'unable to query sql %s' % sql
resp = client.query(sql)
print resp[:50]
if 'Unable' in resp:
if attempt_no > 3:
return 'Error - failed after 3 attempts' + resp
#print sql
print resp
time.sleep(1)
print 'len: %d, attempt: %d' % (len(sql), attempt_no)
insertSql(client, sql, attempt_no + 1)
return resp
def getClient(upload):
ftClient = OAuthFTClient(
FT_OAUTH['key'],
FT_OAUTH['secret'],
upload.auth_token.ft_token,
upload.auth_token.ft_token_secret)
print 'client created'
return ftClient
def createTable(upload, field_dict):
ftClient = getClient(upload)
table_dictionary = {upload.get_title() : field_dict}
results = ftClient.query(SQL().createTable(table_dictionary))
table_id = results.split("\n")[1]
print 'new table: %s' % results
upload.ft_table_id = table_id
upload.save()
return upload
def insertData(upload, field_dict, rows):
ftClient = getClient(upload)
#insert rows
sql = []
sql_len = 0
for i, row in enumerate(rows):
upload.rows_imported = i + 1
if sql_len > 500000 or len(sql) > 100: # max upload is 1MB?
insertSql(ftClient, ';'.join(sql))
sql = []
sql_len = 0
upload.save()
try:
insert_statement = SQL().insert(upload.ft_table_id, row)
except Exception, e:
print 'FAIL SQL', row
print e
continue
sql.append(insert_statement)
sql_len += len( insert_statement)
insertSql(ftClient, ';'.join(sql))
upload.save()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import os
import time
import urllib
if len(sys.argv) < 3:
print "Usage: %s <path to settings module> <settings module name>" % sys.argv[0]
sys.exit()
class KeyboardException: pass
sys.path = [sys.argv[1]] + sys.path
os.environ['DJANGO_SETTINGS_MODULE'] = sys.argv[2]
from shapeft.models import *
from shapeft.views import *
def run():
while True:
uploads = shapeUpload.objects.filter(status=1)
for upload in uploads:
print 'working oni %d: %s' % (upload.id, upload.shapefile)
try:
import_from_shape(upload)
print "Finished with %s" % upload.shapefile
except Exception, E:
print "Error occurred (%s)" % E
upload.status = 6
upload.status_msg = str(E)
upload.save()
time.sleep(8)
if __name__ == "__main__":
run()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
# Operaciones de Adición
"""
Operaciones de adición
OpcodeInstructionDescription
D8 /0 FADD m32 realAdd m32real to ST(0) and store result in ST(0)
DC /0 FADD m64real Add m64real to ST(0) and store result in ST(0)
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDPAdd ST(0) to ST(1), store result in ST(1), and pop the
register stack
DA /0 FIADD m32int Add m32int to ST(0) and store result in ST(0)
DE /0 FIADD m16int Add m16int to ST(0) and store result in ST(0)
"""
#FADD
def FADD(self, *args):
assert 1 <= len(args) <= 2
st0 = args[0]
sti = args[1]
if len(args) == 2:
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
#print st0,";", sti
pila.setI(pila.head(), pila.getI(pila.head())[0]+pila.getI(1)[0])#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
elif len(args) == 1:
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
#aux = pila.pop()[0]
#print "num=", num
pila.push(pila.pop()[0]+args[0])
else:
print "Error de argumentos", args
#FADDP
def FADDP():
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
return uesp
def FADDP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIADD(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]+num)
"""
Opcode Instruction Description
D8 /4 FSUB m32real Subtract m32real from ST(0) and store result in ST(0)
DC /4 FSUB m64real Subtract m64real from ST(0) and store result in ST(0)
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
DA /4 FISUB m32int Subtract m32int from ST(0) and store result in ST(0)
DE /4 FISUB m16int Subtract m16int from ST(0) and store result in ST(0)
"""
#FSUB
def FSUB(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
'''
def FSUB(m64real)
pass
'''
def FSUB(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
pila.setI(pila.head(), pila.getI(pila.head())[0]-pila.getI(1)[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBP
def FSUBP():
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUB(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
"""
Opcode Instruction Description
D8 /5 FSUBR m32real Subtract ST(0) from m32real and store result in ST(0)
DC /5 FSUBR m64real Subtract ST(0) from m64real and store result in ST(0)
D8 E8+i FSUBR ST(0), ST(i) Subtract ST(0) from ST(i) and store result in ST(0)
DC E0+i FSUBR ST(i), ST(0) Subtract ST(i) from ST(0) and store result in ST(i)
DE E0+i FSUBRP ST(i), ST(0) Subtract ST(i) from ST(0), store result in ST(i), and pop
register stack
DE E1 FSUBRP Subtract ST(1) from ST(0), store result in ST(1), and pop
register stack
DA /5 FISUBR m32int Subtract ST(0) from m32int and store result in ST(0)
DE /5 FISUBR m16int Subtract ST(0) from m16int and store result in ST(0)
"""
#FSUBR
def FSUBR(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num - pila.pop()[0])
'''
def FSUBR(m64real)
pass
'''
def FSUBR(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUBR, st0"
#raise()
else:
pila.setI(pila.head(),pila.getI(1)[0]- pila.getI(pila.head())[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBRP
def FSUBRR():
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBRP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBRP, st0"
#raise()
else:
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUBR(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num-pila.pop()[0])
#Operaciones de BCD
def FBLD(bcd): #convertir bcd a real y hacerle push
#numreal = bcd
#acá hay que convertirlo
#acá se lo empuja
pila.push(BCD2dec(bcd))
def FBSTP(bcd):
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
#Operaciones de Registros (no de pila)
def FCLEX():
#TODO check first for and handles any pending unmasked floating-point exceptions before cleaning
#clean flags
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Movimientos condicionales (pag 137)
def FCMOVB(sti):
if statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVE():
if statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVBE():
if statusX86._CF or status._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVU():
if statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNB():
if not statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNE():
if not statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNBE():
if statusX86._CF == 0 and statusX86._ZF == 0:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNU():
if not statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM():
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
FCOMST(1)
def FCOMST(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOM(num):
#esto sobrepasa el encapsulamiento de la pila y está a propósito
#es para resolver un problema puntual, para que no salte excepción por el largo de pila
pila.__pst.append(num) #este es el valor necesario
pila.__ptag.append([0,0]) #este es solo por cuidado
FCOM()
#limpiando lo agregado extra
pila.__pst.pop()
pila.__ptag.pop()
def FCOMP():
FCOM()
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPST(sti):
FCOMST(sti)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMP(num):
FCOM(num)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM()
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Comparación de enteros
"""
Opcode Instruction Description
DB F0+i FCOMI ST, ST(i) Compare ST(0) with ST(i) and set status flags accordingly
DF F0+i FCOMIP ST, ST(i) Compare ST(0) with ST(i), set status flags accordingly, and
pop register stack
DB E8+i FUCOMI ST, ST(i) Compare ST(0) with ST(i), check for ordered values, and
set status flags accordingly
DF E8+i FUCOMIP ST, ST(i) Compare ST(0) with ST(i), check for ordered values, set
status flags accordingly, and pop register stack
"""
def FCOMI(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
statusX86._CF= 1
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 1
else:
statusX86._CF= 1
statusX86._PF= 1
statusX86._ZF= 1
def FCOMIP(sti):
FCOMI(sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
def FUCOMI(sti):
#TODO, check for ordered values
FCOMI(sti)
def FUCOMIP(sti):
#TODO, check for ordered values
FCOMIP(sti)
"""
Opcode Instruction Description
D9 E4 FTST Compare ST(0) with 0.0.
"""
def FTST():
FCOM(0.0)
"""
Opcode Instruction Description
DD E0+i FUCOM ST(i) Compare ST(0) with ST(i)
DD E1 FUCOM Compare ST(0) with ST(1)
DD E8+i FUCOMP ST(i) Compare ST(0) with ST(i) and pop register stack
DD E9 FUCOMP Compare ST(0) with ST(1) and pop register stack
DA E9 FUCOMPP Compare ST(0) with ST(1) and pop register stack twice
"""
def FUCOM():
FUCOM(1)
def FUCOM(sti):
FCOM(sti)
def FUCOMP():
return FUCOMP(1)
def FUCOMP(sti):
FCOMP(sti)
def FUCOMPP():
FUCOM()
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.cos(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
def FDECSTP():
pila.decTOP()
#TODO, faltan realizar las operaciones sonbre C1, el manual está incorrecto :S
#operaciones de división
"""
Opcode Instruction Description
D8 /6 FDIV m32real Divide ST(0) by m32real and store result in ST(0)
DC /6 FDIV m64real Divide ST(0) by m64real and store result in ST(0)
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE F9 FDIVP Divide ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /6 FIDIV m32int Divide ST(0) by m32int and store result in ST(0)
DE /6 FIDIV m16int Divide ST(0) by m64int and store result in ST(0)
"""
def FDIV(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]/num)
def FDIV (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIV (sti,st0):
pila.setI(i, pila.getI(pila.head()-sti)[0]/ pila.getI(pila.head())[0])
def FDIVP():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIV(num):
FDIV(num)
#Operaciones de división inversas
"""
Opcode Instruction Description
D8 /7 FDIVR m32real Divide m32real by ST(0) and store result in ST(0)
DC /7 FDIVR m64real Divide m64real by ST(0) and store result in ST(0)
D8 F8+i FDIVR ST(0), ST(i) Divide ST(i) by ST(0) and store result in ST(0)
DC F0+i FDIVR ST(i), ST(0) Divide ST(0) by ST(i) and store result in ST(i)
DE F0+i FDIVRP ST(i), ST(0) Divide ST(0) by ST(i), store result in ST(i), and pop the
register stack
DE F1 FDIVRP Divide ST(0) by ST(1), store result in ST(1), and pop the
register stack
DA /7 FIDIVR m32int Divide m32int by ST(0) and store result in ST(0)
DE /7 FIDIVR m16int Divide m64int by ST(0) and store result in ST(0)
"""
def FDIVR(num):
pila.setI(pila.head(),num/ pila.getI(pila.head())[0])
def FDIVR (sti):
pila.setI(pila.head(),pila.getI(i)[0]/ pila.getI(pila.head())[0])
def FDIVR (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIVPR():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVPR (sti,st0):
FDIVR(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIVR(num):
FDIVR(num)
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
#Operaciones de comparación de enteros
"""
Opcode Instruction Description
DE /2 FICOM m16int Compare ST(0) with m16int
DA /2 FICOM m32int Compare ST(0) with m32int
DE /3 FICOMP m16int Compare ST(0) with m16int and pop stack register
DA /3 FICOMP m32int Compare ST(0) with m32int and pop stack register
"""
def FICOM(num):
FCOM(num)
def FICOMP(num):
FICOM(num)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de carga de pila
"""
Opcode Instruction Description
DF /0 FILD m16int Push m16int onto the FPU register stack.
DB /0 FILD m32int Push m32int onto the FPU register stack.
DF /5 FILD m64int Push m64int onto the FPU register stack.
"""
def FILD(num):
status.decTOP()
pila.push(num)
"""
Opcode Instruction Description
D9 /0 FLD m32real Push m32real onto the FPU register stack.
DD /0 FLD m64real Push m64real onto the FPU register stack.
DB /5 FLD m80real Push m80real onto the FPU register stack.
D9 C0+i FLD ST(i) Push ST(i) onto the FPU register stack.
"""
def FLD(num):
pila.push(num)
status.decTOP()
def FLDST(sti): #¿esto es así? no es muy claro en el manual, pag 167
pila.push( pila.getI(pila.head()-sti))
status.decTOP()
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
def FLDPI():
FLD(math.pi)
def FLDLG2():
FLD(math.log10(2))
def FLDLN2():
FLD(math.log(2,math.e))
def FLDZ():
FLD(0.0)
"""
Opcode Instruction Description
D9 /5 FLDCW m2byte Load FPU control word from m2byte.
"""
def FLDCW(m2byte):
FLD(m2byte) #TODO, modelo de memoria, para poder cargar solo lo que hace falta
"""
Opcode Instruction Description
D9 /4 FLDENV m14/28byte Load FPU environment from m14byte or m28byte.
"""
def FLDENV(mbyte):
pass #TODO
#operaciones de extracción de stack
"""
Opcode Instruction Description
DF /2 FIST m16int Store ST(0) in m16int
DB /2 FIST m32int Store ST(0) in m32int
DF /3 FISTP m16int Store ST(0) in m16int and pop register stack
DB /3 FISTP m32int Store ST(0) in m32int and pop register stack
DF /7 FISTP m64int Store ST(0) in m64int and pop register stack
"""
def FIST(dirmem):
uesp = pila.getI(pila.head())[0]
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
def FISTP(dirmem):
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
return uesp
def FST_ST(i):
pila.setI(1, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
def FSTP(mreal):
uesp= pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSTP_ST(i):
FST_ST(i)
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Inicialización de la FPU
def FINIT():
#TODO, check for and handles any pending unmasked floating-point exceptions
FNINIT()
pass
def FNINIT():
#TODO, poner
# control en 1101111111 #037Fh
# TAG word en FFFFh
# los demás: status, data pointer instruction pointer, last instruction opcode, en 0 (cero)
pass
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]*num)
def FMUL_ST (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]* pila.getI(pila.head()-sti)[0])
def FMUL (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head()-sti)[0]* pila.getI(pila.head())[0])
def FMULP():
FMUL(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FMULP (sti,st0):
FMUL(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIMUL(num):
FMUL(num)
#No Oeration
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
"""
Opcode Instruction Description
D9 F8 FPREM Replace ST(0) with the remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM():
pila.setI(pila.head(), pila.getI(pila.head())[0]%pila.getI(1)[0])
#TODO, setear las variables status._C # pag 182
"""
Opcode Instruction Description
D9 F5 FPREM1 Replace ST(0) with the IEEE remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM1():
FPREM() #TODO, cuando se cambien los modelos, esto hay que cambiarlo para que cumpla con la IEEE que ahora no cumple
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
status.decTOP()
FLD1()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
"""
Opcode Instruction Description
DD /4 FRSTOR m94/108byte Load FPU state from m94byte or m108byte.
Restaura el estado de la FPU desde memoria
"""
def FRSTOR():
pass #TODO, pag190
"""
Opcode Instruction Description
9B DD /6 FSAVE m94/108byte Store FPU state to m94byte or m108byte after checking for
pending unmasked floating-point exceptions. Then re-
initialize the FPU.
DD /6 FNSAVE* m94/108byte Store FPU environment to m94byte or m108byte without
checking for pending unmasked floating-point exceptions.
Then re-initialize the FPU.
Guarda el estado de la FPU en la dirección memoria dada
"""
def FSAVE(m94_108byte):
pass #TODO
def FSAVE(m94_108byte):
pass #TODO
"""
Opcode Instruction Description
9B D9 /7 FSTCW m2byte Store FPU control word to m2byte after checking for
pending unmasked floating-point exceptions.
D9 /7 FNSTCW* m2byte Store FPU control word to m2byte without checking for
pending unmasked floating-point exceptions.
"""
def FSTCW(m2byte):
pass
def FNSTCW(m2byte):
pass
"""
Opcode Instruction Description
9B D9 /6 FSTENV m14/28byte Store FPU environment to m14byte or m28byte after
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
D9 /6 FNSTENV* m14/28byte Store FPU environment to m14byte or m28byte without
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
"""
def FSTENV(m14_28byte):
pass
def FNSTENV(m14_28byte):
pass
"""
Opcode Instruction Description
9B DD /7 FSTSW m2byte Store FPU status word at m2byte after checking for
pending unmasked floating-point exceptions.
9B DF E0 FSTSW AX Store FPU status word in AX register after checking for
pending unmasked floating-point exceptions.
DD /7 FNSTSW* m2byte Store FPU status word at m2byte without checking for
pending unmasked floating-point exceptions.
DF E0 FNSTSW* AX Store FPU status word in AX register without checking for
pending unmasked floating-point exceptions.
"""
def FSTSW(m2byte):
pass
def FSTSW(): #guarda en AX
pass
def FNSTSW(m2byte):
pass
def FNSTSW(): #guarda en AX
pass
"""
Opcode Instruction Description
D9 FD FSCALE Scale ST(0) by ST(1).
"""
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
#TODO, set flags
def FWAIT():
pass
"""
Opcode Instruction Description
D9 E5 FXAM Classify value or number in ST(0)
"""
#TODO
def FXAM():
"""
C1 ← sign bit of ST; (* 0 for positive, 1 for negative *)
CASE (class of value or number in ST(0)) OF
Unsupported:C3, C2, C0 ← 000;
NaN: C3, C2, C0 ← 001;
Normal: C3, C2, C0 ← 010;
Infinity: C3, C2, C0 ← 011;
Zero: C3, C2, C0 ← 100;
Empty: C3, C2, C0 ← 101;
Denormal: C3, C2, C0 ← 110;
ESAC;
"""
pass
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH():
FXCH(1)
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
"""
Opcode Instruction Description
D9 F4 FXTRACT Separate value in ST(0) into exponent and significand,
store exponent in ST(0), and push the significand onto the
register stack.
"""
def FXTRACT():
pass #TODO
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de isntruction_set.py
"""
import unittest
import random
from fpu_structure import Pila, StatusRegister, ControlRegister, StatusX86
"""
Test Pila
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Especificado en cada test en particular
Dependencias:
Pila
Método:
Especificado en cada test en particular
Esperado:
Test OK
"""
class TestPila(unittest.TestCase):
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_1(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual((pila._pst[len(pila._pst)-1],pila._ptag[len(pila._pst)-1]),(st,t)) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_2(self):
pila = Pila()
st = 11111111
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(pila._pst[len(pila._pst)-1],st) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_pop(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
for i in range (8):
self.assertEqual(pila.pop()[0],st[7-i])
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_getI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
#print pila._pst
#print pila._ptag
for i in range (8):
self.assertEqual(pila.getI(i)[0],st[i])
"""
Propósito:
Observar que se devuelva correctamente el índice de la cabeza de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Considera que Pila.getI(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el valor en ese punto
corresponda con el valor recién insertado (lo que corrobora que es la
cabeza de la pila)
"""
def test_head(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual(pila.getI(pila.head()),(st,t))
"""
Propósito:
Observar que se devuelva correctamente la longitud de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el índice actual (i+1 por
que este comienza en cero) corresponda con pila.length()
"""
def test_length(self):
pila = Pila()
st = 11111111
self.assertEqual(0,pila.length())
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(i+1,pila.length())
"""
Propósito:
Observar que se borren correctamente los valores de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego se borra siempre el primer valor de la pila con el comando
pila.delI(0) y se espera el retorno True
"""
def test_delI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
for i in range (8):
#print pila.delI(0)
self.assertEqual(True,pila.delI(0))
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_1(self):
pila = Pila()
st = 0000000
tag = [[0,0],[0,1],[1,0],[1,1]]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag[3])
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
i=0
for j in range (2):
for t in tag:
st.append(random.random())
pila.setI(i,st[i],t)
self.assertEqual(pila.getI(i),(st[i],t))
i+=1
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_2(self):
pila = Pila()
st = 0000000
tag = [1,1]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag)
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
for i in range (8):
st.append(random.random())
pila.setI(i,st[i])
self.assertEqual(pila.getI(i)[0],st[i])
#class TestStatusX86(unittest.TestCase):
# pass
class TestControlRegister(unittest.TestCase):
def test_setPC_1(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc)
self.assertEqual(pc,control._PC)
def test_setPC_2(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(pc,control._PC)
#asume que setPC() funciona correctamente
def getPC(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(control.getPC(),pc)
def test_setRC_1(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc)
self.assertEqual(rc,control._RC)
def test_setRC_2(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(rc,control._RC)
#asume que setRC() funciona correctamente
def getRC(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(control.getRC(),rc)
def test_setIC_1(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic)
self.assertEqual(ic,control._IC)
def test_setIC_2(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic[0],ic[1])
self.assertEqual(ic,control._IC)
#asume que setIC() funciona correctamente
def getIC(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(rc[0],rc[1])
self.assertEqual(control.getIC(),ic)
class TestStatusRegister(unittest.TestCase):
pass
"""
def test_setTOP_1(self):
def test_setTOP_2(self):
def getTOP(self):
def test_setC_1(self):
def test_setC_2(self):
def test_getC(self):
def test_decTOP(self):
def test_incTOP(self):
"""
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import re
import copy
#import instruction_set as iset #modificado por el momento
import reduced_instruction_set as iset
from fpu_structure import *
uesp = None #ultimo_elemento_sacado_de_pila
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
pinout_temp = None
statusX86_temp = None
def parse(text):
lines = text.splitlines()
i = 0
for l in lines:
lines[i]=l.split()
j=0
for param in lines[i]: #aca convierte a entero el elemento de entrada
try:
lines[i][j]=int(param)#por el momento solo soporta enteros, de todas formas, la entrada debe ser un número en decimal, binario, octal u hexadecimal, puesto que el fpu no sabe que es
except:
pass
j+=1
i+=1
return lines
def execute_command(commlista):
saveState()
comm = commlista[0]
params = commlista[1:]
#probando una nueva manera de hacer las cosas, con una cadena de texto
paramline = "("
i=0
for p in params:
if i>0:
paramline+=", "
paramline+=str(p)
i+=1
paramline += ")"
commline = "iset."+comm + paramline
try:
#iset.__getattribute__(comm)(params)
#eval(comm)(p1,p2,p3...)
exec commline
#print "uesp", iset.uesp
#print "res", iset.res
except:
#print "No existe la función", comm
#print "o los parámetros",params," son incorrectos"
print "línea incorrecta:",commline
def undo():
global uesp_temp, pila_temp, control_temp, status_temp
uesp = uesp_temp #ultimo_elemento_sacado_de_pila
iset.pila = pila_temp#copy.copy(pila_temp) #copy.deepcopy(pila_temp)
iset.control = control_temp#copy.copy(control_temp) # copy.deepcopy(control_temp)
iset.status = status_temp#copy.copy(status_temp) #copy.deepcopy(status_temp)
#iset.pinout = #copy.copy(pinout_temp) #copy.deepcopy(pinout_temp)
#iset.statusX86 = #copy.copy(statusX86_temp) #copy.deepcopy(statusX86_temp)
def rebootFPU():
iset.pila = None
iset.pila = Pila()
iset.control.iniciar()
iset.status.iniciar()
iset.pinout.iniciar()
iset.statusX86.iniciar()
def saveState():
global uesp_temp, pila_temp, control_temp, status_temp
#print "Guarda el estado"
uesp_temp = uesp #ultimo_elemento_sacado_de_pila
pila_temp = copy.deepcopy(iset.pila) #copy.copy(iset.pila) #
control_temp = copy.deepcopy(iset.control) #copy.copy(iset.control) #
status_temp = copy.deepcopy(iset.status) #copy.copy(iset.status) #
pinout_temp = copy.deepcopy(iset.pinout)# copy.copy(iset.pinout) #
statusX86_temp = copy.deepcopy(iset.statusX86) #copy.copy(iset.statusX86) #
def cleanState():
global uesp_temp, pila_temp, control_temp, status_temp
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
#pinout_temp = None
#statusX86_temp = None
#si es llamado como ejecutable
#Realizar la instanciación de los módulos necesarios
if __name__ == "__main__":
pass
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
import math
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = a + b
pila.push(a)
pila.push(b)
#print pila._pst
FADD(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
#Test FSUB
class TestFSUB(unittest.TestCase):
def testFSUB(self):
for i in range(8):
pila.pop()
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = b - a
pila.push(a)
pila.push(b)
#print pila._pst
FSUB(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFMUL(unittest.TestCase):
def testFMUL(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = a * b
pila.push(a)
pila.push(b)
print pila._pst
FMUL(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFDIV(unittest.TestCase):
def testFDIV(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = b / a
pila.push(a)
pila.push(b)
print pila._pst
FDIV(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFCOS(unittest.TestCase):
def testFCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
pila.push(a)
#print pila._pst
FCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSIN(unittest.TestCase):
def testFSIN(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.sin(a)
pila.push(a)
#print pila._pst
FSIN()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSINCOS(unittest.TestCase):
def testFSINCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
c = math.sin(a)
pila.push(a)
#print pila._pst
FSINCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
self.assertEqual(pila.getI(pila.head()-1)[0],c)
class TestFSQRT(unittest.TestCase):
def testFSQRT(self):
for i in range(8):
pila.pop()
a = random.randint(0,2**6)
b = math.sqrt(a)
pila.push(a)
#print pila._pst
FSQRT()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSTP(unittest.TestCase):
def testFSTP(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
pila.push(b)
pila.push(a)
self.assertEqual(FSTP(1111),a)
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFCOM(unittest.TestCase):
def testFCOM(self):
for i in range(8):
pila.pop()
a = [2,1,0]
b = [1,2,0]
c= [[0,0,0,0],[1,0,0,0],[0,0,0,1],[1,0,1,1]]
for i in range(3):
pila.push(b[i])
pila.push(a[i])
FCOM(1)
self.assertEqual(status.getC(),c[i])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datahandling import *
"""
Pin Configuration
"""
"""
pin[ 1 ]='GND'
pin[ 2:16 ]= AD[14:0]
pin[ 17 ]= 'NC'
pin[ 18 ]= 'NC'
pin[ 19 ]= 'CLK'
pin[ 20 ]= 'GND'
pin[ 21 ]= 'RESET'
pin[ 22 ]= 'READY'
pin[ 23 ]= 'BUSY'
pin[ 24 ]= QS1
pin[ 25 ]= QS0
pin[ 26 ]= S0 #neg
pin[ 27 ]= S1 #neg
pin[ 28 ]= S2 #neg
pin[ 29 ]= 'NC'
pin[ 30 ]= 'NC'
pin[ 31 ]= RQ/GT0 #neg
pin[ 32 ]= INT
pin[ 33 ]= RQ/GT1 #neg
pin[ 34 ]= BHE #neg
pin[ 35 : 38 ]= S[6:3]
pin[ 39 ]= AD[15]
pin[ 40 ]= 'VCC'
"""
class Pinout:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en ...
self._AD =[0 for i in range(16)] #Líneas de dirección
self._pin=[None for i in range(40)]
""" self.pin[ 1 ]='GND'
self.pin[ 2:16 ]= _AD[14:0]
self.pin[ 17 ]= 'NC'
self.pin[ 18 ]= 'NC'
self.pin[ 19 ]= 'CLK'
self.pin[ 20 ]= 'GND'
self.pin[ 21 ]= 'RESET'
self.pin[ 22 ]= 'READY'
self.pin[ 23 ]= 'BUSY'
self.pin[ 24 ]= 'QS1'
self.pin[ 25 ]= 'QS0'
self.pin[ 26 ]= 'S0' #neg
self.pin[ 27 ]= 'S1' #neg
self.pin[ 28 ]= 'S2' #neg
self.pin[ 29 ]= 'NC'
self.pin[ 30 ]= 'NC'
self.pin[ 31 ]= 'RQ/GT0' #neg
self.pin[ 32 ]= 'INT'
self.pin[ 33 ]= 'RQ/GT1' #neg
self.pin[ 34 ]= 'BHE' #neg
self.pin[ 35 : 38 ]= [0,0,0,0]#S[6:3]
self.pin[ 39 ]= self._AD[15]
self.pin[ 40 ]= 'VCC'
"""
"""
Control Unit (CU)
Recibe las instrucciones
Decodifica los operandos
Ejecuta rutinas de control
"""
"""
Numeric Execution Unit (NEU)
Ejecuta las instrucciones numéricas
"""
"""
Data Field:
Compuesto por la Pila
"""
"""
Pila
Esta está compuesta de 7 registros de 80 bits.
Cada registro consta de
64 bits mas bajos de significand
15 bits de exponente
1 bit de signo
"""
"""
Tag Field
Cada registro tiene correspondencia uno a uno con un registro del data field
"""
class Pila:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._pst=[] #pila st
self._ptag=[] #pila de tags
def push(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
st,tag = args
if len(args) == 2:
st,tag = args
elif len(args) == 1:
st=args[0]
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) < 8 :
self._pst.append(st)
self._ptag.append(tag)
else:
print "fallo al empujar valor a la pila, demasiados valores"
#raise #excepción
def pop(self):
try:
return(self._pst.pop(),self._ptag.pop())
except:
return(0,[1,1])
def getI(self,i):
if len(self._pst) > 8 or i<0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
try:
return(self._pst[i],self._ptag[i])
except:
return(0,[1,1])
def setI(self,*args):
assert 2 <= len(args) <= 3
if len(args) == 3:
i,st,tag = args
elif len(args) == 2:
i,st = args
tag = [0,0]
elif len(args) == 1:
i=args[0]
st=0
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) > 8 or i <0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
self._pst[i]=st
self._ptag[i]=tag
def delI(self,i):
try:
del(self._pst[i])
del(self._ptag[i])
return True
except:
return False
def length(self):
return len(self._pst)
def head(self):
return (len(self._pst)-1)
def getRegs(self):
return [ self.getI(i) for i in range (8)]
def setRegs(self,pilatemp):
print pilatemp
print "holaaa"
for st in pilatemp:
print st
self.setI(st[0],st[1])
"""
Control Register (16 bits)
"""
class ControlRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 037Fh
self._IM=1 #invalid operation
self._DM=1 #Denormalized Operand
self._ZM=1 #Zero Divide
self._OM=1 #Overflow
self._UM=1 #Underflow
self._PM=1 #Precision
self._X=1 #Reserved
self._M=0 #Interrupt Mask
self._PC = [1, 1] #Precition Control
self._PC0= self._PC[0] #
self._PC1= self._PC[0] #
self._RC=[0, 0] #Rounding Control
self._RC0=self._RC[0] #
self._RC1=self._RC[1] #
self._IC =[0, 0] #Infinity Control (0=projective, 1= affine)
self._IC0 =self._IC[0]
self._IC1 =self._IC[1]
self._XX=[0,0] #últimos 3 bits reservados
def setPC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._PC[0] =args[0]
self._PC[1] =args[1]
elif len(args) == 1:
self._PC = args[0]
else:
print "Error de argumentos", args
def getPC(self):
return _PC
def setRC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._RC[0] =args[0]
self._RC[1] =args[1]
elif len(args) == 1:
self._RC = args[0]
else:
print "Error de argumentos", args
def getRC(self):
return _RC
def setIC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._IC[0] =args[0]
self._IC[1] =args[1]
elif len(args) == 1:
self._IC = args[0]
else:
print "Error de argumentos", args
def getIC(self):
return _IC
def getRegs(self):
return [self._IM, self._DM, self._ZM,self._OM,self._UM, self._PM,self._X,self._M,self._PC[0], self._PC[1],self._RC[0],self._RC[1],self._IC[0],self._IC[1],self._XX[0],self._XX[1]]
def setRegs(IM,DM,ZM,OM,UM,PM,X0,M,PC0,PC1,RC0,RC1,IC0,IC1,X1,X2):
self._IM=IM
self._DM=DM
self._ZM=ZM
self._OM=OM
self._UM=UM
self._PM=PM
self._X=X0
self._M=M
self._PC[0]=PC0
self._PC[1]=PC1
self._RC[0]=RC0
self._RC[1]=RC1
self._IC[0]=IC0
self._IC[1]=IC1
self._XX[0]=X1
self._XX[1]=X2
def getRegNames(self):
return ['IM','DM','ZM','OM','UM','PM','X0','M','PC0','PC1','RC0','RC1','IC0','IC1','X1','X2']
"""
Status Register (16 bits)
"""
class StatusRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._IE=0 #invalid operation
self._DE=0 #Denormalized Operand
self._ZE=0 #Zero Divide
self._OE=0 #Overflow
self._UE=0 #Underflow
self._PE=0 #Precision
self._X=0 #Reserved
self._IR=0 #Interrupt Request
self._C=[0, 0, 0, 0 ] #Condition Code
self._C0=0 #Condition Code
self._C1=0 #
self._C2=0 #
self._TOP=[0, 0, 0]#Top Of Stack Pointer
self._C3=0 #
self._B=0 #NEU busy
def setTOP(self,*args):
assert 1 <= len(args) <= 3
if len(args) == 3:
self._TOP[0] = args[0]
self._TOP[1] = args[1]
self._TOP[2] = args[2]
elif len(args) == 1:
self._TOP = args[0]
else:
print "Error de argumentos", args
def getTOP(self):
return self._TOP
def setC(self,*args):
assert 1 <= len(args) <= 4
if len(args) == 4:
self._C[0] = args[0]
self._C[1] = args[1]
self._C[2] = args[2]
self._C[3] = args[3]
elif len(args) == 1:
self._C = args[0]
else:
print "Error de argumentos", args
def getC(self):
return self._C
def decTOP(self):
aux=bin2dec(self._TOP)
if aux== 0:
aux=7
else:
aux-=1
self._TOP=dec2bin(aux)
def incTOP(self):
aux=bin2dec(self._TOP)
if aux== 7:
aux=0
else:
aux+=1
self._TOP=dec2bin(aux)
def getRegs(self):
return [self._IE, self._DE, self._ZE, self._OE, self._UE, self._PE, self._X, self._IR, self._C[0], self._C[1],self._C[2], self._TOP[0], self._TOP[1], self._TOP[2], self._C[3], self._B]
def setRegs(IE,DE,ZE,OE,UE,PE,X,IR,C0,C1,C2,TOP0,TOP1,TOP2,C3,B):
self._IE=IE
self._DE=DE
self._ZE=ZE
self._OE=OE
self._UE=UE
self._PE=PE
self._X = X
self._IR=IR
self._C[0]=C0
self._C[1]=C1
self._C[2]=C2
self._TOP[0]=TOP0
self._TOP[1]=TOP1
self._TOP[2]=TOP2
self._C[3]=C3
self._B=B
def getRegNames(self):
return ['IE','DE','ZE','OE','UE','PE','X','IR','C0','C1','C2','TOP0','TOP1','TOP2','C3','B']
"""
Tag Word (16 bits) #listo
"""
"""
Instruction Pointer (32 bits)
"""
"""
Data Pointer (32 bits)
"""
"""
Registros necesarios del procesador 8086
"""
class StatusX86:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._CF=0
self._PF=0
self._AF=0
self._ZF=0
self._SF=0
self._TF=0
self._IF=0
self._DF=0
self._OF=0
def getRegs(self):
return [self._CF,self._PF,self._AF,self._ZF,self._SF,self._TF,self._IF,self._DF,self._OF ]
def setRegs(CF,PF,AF,ZF,SF,TF,IF,DF,OF):
self._CF= CF
self._PF= PF
self._AF= AF
self._ZF= ZF
self._SF= SF
self._TF= TF
self._IF= IF
self._DF= DF
self._OF= OF
def getRegNames(self):
return ['CF','PF','DF','AF','ZF','SF','TF','IF','DF','OF']
#Si es llamado como ejecutable, entonces decir que esto es una librería que contiene las estructuras básicas de una fpu 8087 (pilas y registros), mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
pila.push(b)
FADD(0,1)
self.assertEqual(pila._pst[pila.head()],c)
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
FADD(b)
self.assertEqual(pila._pst[pila.head()],c)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
from fpu_structure import * #mas comentarios
#from instruction_set import * #Por el momento queda así
import reduced_instruction_set as iset
from datahandling import *
import main #debe estar después de la instanciación de iset
class FPU_GUI:
def __init__(self):
main.saveState()
self._last_instruction = ""
#Set the Glade file
self.gladefile = "gui/fpugui.glade/fpuguiglade"
self.wTree = gtk.glade.XML(self.gladefile)
#Get the Main Window, and connect the "destroy" event
self.windowST = self.wTree.get_widget("ST_Stack")
if (self.windowST):
self.windowST.connect("destroy", gtk.main_quit)
#self.windowReg = self.wTree.get_widget("Registers")
#if (self.windowReg):
# self.windowReg.connect("destroy", gtk.main_quit)
self.windowConsole = self.wTree.get_widget("Consola")
if (self.windowConsole):
self.windowConsole.connect("destroy", gtk.main_quit)
#Create our dictionay and connect it
dic = {
"on_Salir_destroy" : gtk.main_quit,
"on_Ejecutar_clicked" : self.ejecutar,
"on_Deshacer_clicked" : self.deshacer,
"on_Reiniciar_clicked" : self.reiniciar,
"on_Salir_clicked" : gtk.main_quit,
}
self.wTree.signal_autoconnect(dic)
def ejecutar(self, widget):
lines = [] #cargar las líneas de manera auxiliar acá,
#para sacarlas en orden hay que usar pop(0) (ojo con el 0 que debe estar)
#obtener la consola de entrada
consola=self.wTree.get_widget("entrada_consola")
buffConsola = consola.get_buffer()
numlines=buffConsola.get_line_count()
beginIter = buffConsola.get_start_iter() #buffConsola.get_iter_at_line(0)
endIter = buffConsola.get_end_iter()
text= buffConsola.get_text(beginIter,endIter)
#parsear los datos de entrada
#verificar que sean datos válidos
#enviarselos a main para su ejecución
commands = main.parse(text)
for comm in commands:
main.execute_command(comm)
self._last_instruction = comm
#actualizar registros
self.actualizarRegs()
self.actualizarPila()
self.actualizarResultados()
def deshacer(self, widget):
main.undo()
self.actualizarRegs()
self.actualizarPila()
def reiniciar(self, widget):
main.rebootFPU()
self.actualizarRegs()
self.actualizarPila()
#actualiza los valores de la salida de los registros
def actualizarRegs(self):
try:
#actualizar registros de status
#print "actualizando registros de status"
regs_vals = iset.status.getRegs()
regs_noms = iset.status.getRegNames()
#print regs_vals
#print regs_noms
for i in range (16):
self.wTree.get_widget(regs_noms[i]).set_text(str(regs_vals[i]))
except:
pass
try:
#actualizar registros de control
#print "actualizando registros de control"
regc_vals = iset.control.getRegs()
regc_noms = iset.control.getRegNames()
#print regc_vals
#print regc_noms
for i in range (16):
self.wTree.get_widget(regc_noms[i]).set_text(str(regc_vals[i]))
#actualizar registros de statusX86
except:
pass
def actualizarResultados(self):
nom_res = "resultados"
self.wTree.get_widget(nom_res).set_text(str(iset.pila.getI(iset.pila.head())[0]))#(str(iset.res))
nom_text = "lastInstruction"
lastI = ""
for el in self._last_instruction:
lastI+=" "
lastI+=str(el)
self.wTree.get_widget(nom_text).set_text(lastI)
#actualiza los valores de la salida de la Pila
def actualizarPila(self):
for i in range(8):
reg=[None,None]
nom_bin = "ST"+str(i)+"_bin"
nom_rep = "ST"+str(i)+"_rep"
nom_tag = "tag"+str(i)
#print nom_bin
#print nom_rep
#print nom_tag
head = iset.pila.head()-i
#print head
try:
#print "pila.head()= ", pila.head()
reg=iset.pila.getI(head)
except:
reg[0] = 00000000000000000000
reg[1] = [1,1]
#print reg
#print i
self.wTree.get_widget(nom_bin).set_text(str(f2bin(reg[0])))
self.wTree.get_widget(nom_rep).set_text(str(reg[0]))
self.wTree.get_widget(nom_tag).set_text(str(reg[1]))
if __name__ == "__main__":
fpugui = FPU_GUI()
gtk.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
res = None #resultado de la última operación
#hay que poner a res y uesp como global en cada una de las funciones, debo escribir un script que lo haga :P: global uesp,res
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
res = pila.getI(pila.head())[0]
return res
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
res = pila.getI(pila.head())[0]
if res == 0 :
statusX86._ZF=1
return res
# Operaciones de Adición
"""
Operaciones de adición
Opcode Instruction Description
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDP Add ST(0) to ST(1), store result in ST(1), and pop the
"""
#FADD
def FADD(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
#print st0,";", sti
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return res
#FADDP
def FADDP(sti=1,st0=0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
pila.setI(pila.head()-1,res) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
if res == 0 :
statusX86._ZF=1
return uesp
"""
Opcode Instruction Description
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
"""
def FSUB(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FSUBP(st0=0,sti=1):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
res = pila.getI(pila.head())[0]
return res
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOMP(sti):
FCOM(sti)
uesp = pila.pop()[0]
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM(1)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs(pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.cos(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
"""
def FDIV (st0,sti):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
if a == 0:
status._ZE = 1
res = b / a
pila.setI(pila.head(),res)
if b == 0:
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
res =pila.getI(pila.head())[0]
def FLD(num):
pila.push(num)
status.decTOP()
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
#status.decTOP()
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
#status.decTOP()
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
#status.decTOP()
def FLDPI():
FLD(math.pi)
#status.decTOP()
def FLDLG2():
FLD(math.log10(2))
#status.decTOP()
def FLDLN2():
FLD(math.log(2,math.e))
#status.decTOP()
def FLDZ():
FLD(0.0)
#status.decTOP()
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
res =uesp
return uesp
def FSTP(mreal):
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
res = uesp
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL (st0=0,sti=1):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
res = a * b
pila.setI(pila.head(),res)
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FMULP (st0,sti):
FMUL(st0,sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#No Operation
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
if uesp == 0 :
statusX86._ZF=1
res = uesp
return uesp
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
FLD1()
status.decTOP()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
res =pila.getI(pila.head())[0]
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
res =pila.getI(pila.head())[0]
#TODO, set flags
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
# Operaciones de Adición
"""
Operaciones de adición
OpcodeInstructionDescription
D8 /0 FADD m32 realAdd m32real to ST(0) and store result in ST(0)
DC /0 FADD m64real Add m64real to ST(0) and store result in ST(0)
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDPAdd ST(0) to ST(1), store result in ST(1), and pop the
register stack
DA /0 FIADD m32int Add m32int to ST(0) and store result in ST(0)
DE /0 FIADD m16int Add m16int to ST(0) and store result in ST(0)
"""
#FADD
def FADD(self, *args):
assert 1 <= len(args) <= 2
st0 = args[0]
sti = args[1]
if len(args) == 2:
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
#print st0,";", sti
pila.setI(pila.head(), pila.getI(pila.head())[0]+pila.getI(1)[0])#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
elif len(args) == 1:
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
#aux = pila.pop()[0]
#print "num=", num
pila.push(pila.pop()[0]+args[0])
else:
print "Error de argumentos", args
#FADDP
def FADDP():
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
return uesp
def FADDP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIADD(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]+num)
"""
Opcode Instruction Description
D8 /4 FSUB m32real Subtract m32real from ST(0) and store result in ST(0)
DC /4 FSUB m64real Subtract m64real from ST(0) and store result in ST(0)
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
DA /4 FISUB m32int Subtract m32int from ST(0) and store result in ST(0)
DE /4 FISUB m16int Subtract m16int from ST(0) and store result in ST(0)
"""
#FSUB
def FSUB(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
'''
def FSUB(m64real)
pass
'''
def FSUB(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
pila.setI(pila.head(), pila.getI(pila.head())[0]-pila.getI(1)[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBP
def FSUBP():
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUB(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
"""
Opcode Instruction Description
D8 /5 FSUBR m32real Subtract ST(0) from m32real and store result in ST(0)
DC /5 FSUBR m64real Subtract ST(0) from m64real and store result in ST(0)
D8 E8+i FSUBR ST(0), ST(i) Subtract ST(0) from ST(i) and store result in ST(0)
DC E0+i FSUBR ST(i), ST(0) Subtract ST(i) from ST(0) and store result in ST(i)
DE E0+i FSUBRP ST(i), ST(0) Subtract ST(i) from ST(0), store result in ST(i), and pop
register stack
DE E1 FSUBRP Subtract ST(1) from ST(0), store result in ST(1), and pop
register stack
DA /5 FISUBR m32int Subtract ST(0) from m32int and store result in ST(0)
DE /5 FISUBR m16int Subtract ST(0) from m16int and store result in ST(0)
"""
#FSUBR
def FSUBR(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num - pila.pop()[0])
'''
def FSUBR(m64real)
pass
'''
def FSUBR(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUBR, st0"
#raise()
else:
pila.setI(pila.head(),pila.getI(1)[0]- pila.getI(pila.head())[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBRP
def FSUBRR():
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBRP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBRP, st0"
#raise()
else:
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUBR(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num-pila.pop()[0])
#Operaciones de BCD
def FBLD(bcd): #convertir bcd a real y hacerle push
#numreal = bcd
#acá hay que convertirlo
#acá se lo empuja
pila.push(BCD2dec(bcd))
def FBSTP(bcd):
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
#Operaciones de Registros (no de pila)
def FCLEX():
#TODO check first for and handles any pending unmasked floating-point exceptions before cleaning
#clean flags
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Movimientos condicionales (pag 137)
def FCMOVB(sti):
if statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVE():
if statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVBE():
if statusX86._CF or status._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVU():
if statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNB():
if not statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNE():
if not statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNBE():
if statusX86._CF == 0 and statusX86._ZF == 0:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNU():
if not statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM():
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
FCOMST(1)
def FCOMST(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOM(num):
#esto sobrepasa el encapsulamiento de la pila y está a propósito
#es para resolver un problema puntual, para que no salte excepción por el largo de pila
pila.__pst.append(num) #este es el valor necesario
pila.__ptag.append([0,0]) #este es solo por cuidado
FCOM()
#limpiando lo agregado extra
pila.__pst.pop()
pila.__ptag.pop()
def FCOMP():
FCOM()
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPST(sti):
FCOMST(sti)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMP(num):
FCOM(num)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM()
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Comparación de enteros
"""
Opcode Instruction Description
DB F0+i FCOMI ST, ST(i) Compare ST(0) with ST(i) and set status flags accordingly
DF F0+i FCOMIP ST, ST(i) Compare ST(0) with ST(i), set status flags accordingly, and
pop register stack
DB E8+i FUCOMI ST, ST(i) Compare ST(0) with ST(i), check for ordered values, and
set status flags accordingly
DF E8+i FUCOMIP ST, ST(i) Compare ST(0) with ST(i), check for ordered values, set
status flags accordingly, and pop register stack
"""
def FCOMI(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
statusX86._CF= 1
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 1
else:
statusX86._CF= 1
statusX86._PF= 1
statusX86._ZF= 1
def FCOMIP(sti):
FCOMI(sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
def FUCOMI(sti):
#TODO, check for ordered values
FCOMI(sti)
def FUCOMIP(sti):
#TODO, check for ordered values
FCOMIP(sti)
"""
Opcode Instruction Description
D9 E4 FTST Compare ST(0) with 0.0.
"""
def FTST():
FCOM(0.0)
"""
Opcode Instruction Description
DD E0+i FUCOM ST(i) Compare ST(0) with ST(i)
DD E1 FUCOM Compare ST(0) with ST(1)
DD E8+i FUCOMP ST(i) Compare ST(0) with ST(i) and pop register stack
DD E9 FUCOMP Compare ST(0) with ST(1) and pop register stack
DA E9 FUCOMPP Compare ST(0) with ST(1) and pop register stack twice
"""
def FUCOM():
FUCOM(1)
def FUCOM(sti):
FCOM(sti)
def FUCOMP():
return FUCOMP(1)
def FUCOMP(sti):
FCOMP(sti)
def FUCOMPP():
FUCOM()
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.cos(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
def FDECSTP():
pila.decTOP()
#TODO, faltan realizar las operaciones sonbre C1, el manual está incorrecto :S
#operaciones de división
"""
Opcode Instruction Description
D8 /6 FDIV m32real Divide ST(0) by m32real and store result in ST(0)
DC /6 FDIV m64real Divide ST(0) by m64real and store result in ST(0)
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE F9 FDIVP Divide ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /6 FIDIV m32int Divide ST(0) by m32int and store result in ST(0)
DE /6 FIDIV m16int Divide ST(0) by m64int and store result in ST(0)
"""
def FDIV(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]/num)
def FDIV (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIV (sti,st0):
pila.setI(i, pila.getI(pila.head()-sti)[0]/ pila.getI(pila.head())[0])
def FDIVP():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIV(num):
FDIV(num)
#Operaciones de división inversas
"""
Opcode Instruction Description
D8 /7 FDIVR m32real Divide m32real by ST(0) and store result in ST(0)
DC /7 FDIVR m64real Divide m64real by ST(0) and store result in ST(0)
D8 F8+i FDIVR ST(0), ST(i) Divide ST(i) by ST(0) and store result in ST(0)
DC F0+i FDIVR ST(i), ST(0) Divide ST(0) by ST(i) and store result in ST(i)
DE F0+i FDIVRP ST(i), ST(0) Divide ST(0) by ST(i), store result in ST(i), and pop the
register stack
DE F1 FDIVRP Divide ST(0) by ST(1), store result in ST(1), and pop the
register stack
DA /7 FIDIVR m32int Divide m32int by ST(0) and store result in ST(0)
DE /7 FIDIVR m16int Divide m64int by ST(0) and store result in ST(0)
"""
def FDIVR(num):
pila.setI(pila.head(),num/ pila.getI(pila.head())[0])
def FDIVR (sti):
pila.setI(pila.head(),pila.getI(i)[0]/ pila.getI(pila.head())[0])
def FDIVR (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIVPR():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVPR (sti,st0):
FDIVR(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIVR(num):
FDIVR(num)
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
#Operaciones de comparación de enteros
"""
Opcode Instruction Description
DE /2 FICOM m16int Compare ST(0) with m16int
DA /2 FICOM m32int Compare ST(0) with m32int
DE /3 FICOMP m16int Compare ST(0) with m16int and pop stack register
DA /3 FICOMP m32int Compare ST(0) with m32int and pop stack register
"""
def FICOM(num):
FCOM(num)
def FICOMP(num):
FICOM(num)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de carga de pila
"""
Opcode Instruction Description
DF /0 FILD m16int Push m16int onto the FPU register stack.
DB /0 FILD m32int Push m32int onto the FPU register stack.
DF /5 FILD m64int Push m64int onto the FPU register stack.
"""
def FILD(num):
status.decTOP()
pila.push(num)
"""
Opcode Instruction Description
D9 /0 FLD m32real Push m32real onto the FPU register stack.
DD /0 FLD m64real Push m64real onto the FPU register stack.
DB /5 FLD m80real Push m80real onto the FPU register stack.
D9 C0+i FLD ST(i) Push ST(i) onto the FPU register stack.
"""
def FLD(num):
pila.push(num)
status.decTOP()
def FLDST(sti): #¿esto es así? no es muy claro en el manual, pag 167
pila.push( pila.getI(pila.head()-sti))
status.decTOP()
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
def FLDPI():
FLD(math.pi)
def FLDLG2():
FLD(math.log10(2))
def FLDLN2():
FLD(math.log(2,math.e))
def FLDZ():
FLD(0.0)
"""
Opcode Instruction Description
D9 /5 FLDCW m2byte Load FPU control word from m2byte.
"""
def FLDCW(m2byte):
FLD(m2byte) #TODO, modelo de memoria, para poder cargar solo lo que hace falta
"""
Opcode Instruction Description
D9 /4 FLDENV m14/28byte Load FPU environment from m14byte or m28byte.
"""
def FLDENV(mbyte):
pass #TODO
#operaciones de extracción de stack
"""
Opcode Instruction Description
DF /2 FIST m16int Store ST(0) in m16int
DB /2 FIST m32int Store ST(0) in m32int
DF /3 FISTP m16int Store ST(0) in m16int and pop register stack
DB /3 FISTP m32int Store ST(0) in m32int and pop register stack
DF /7 FISTP m64int Store ST(0) in m64int and pop register stack
"""
def FIST(dirmem):
uesp = pila.getI(pila.head())[0]
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
def FISTP(dirmem):
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
return uesp
def FST_ST(i):
pila.setI(1, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
def FSTP(mreal):
uesp= pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSTP_ST(i):
FST_ST(i)
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Inicialización de la FPU
def FINIT():
#TODO, check for and handles any pending unmasked floating-point exceptions
FNINIT()
pass
def FNINIT():
#TODO, poner
# control en 1101111111 #037Fh
# TAG word en FFFFh
# los demás: status, data pointer instruction pointer, last instruction opcode, en 0 (cero)
pass
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]*num)
def FMUL_ST (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]* pila.getI(pila.head()-sti)[0])
def FMUL (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head()-sti)[0]* pila.getI(pila.head())[0])
def FMULP():
FMUL(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FMULP (sti,st0):
FMUL(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIMUL(num):
FMUL(num)
#No Oeration
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
"""
Opcode Instruction Description
D9 F8 FPREM Replace ST(0) with the remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM():
pila.setI(pila.head(), pila.getI(pila.head())[0]%pila.getI(1)[0])
#TODO, setear las variables status._C # pag 182
"""
Opcode Instruction Description
D9 F5 FPREM1 Replace ST(0) with the IEEE remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM1():
FPREM() #TODO, cuando se cambien los modelos, esto hay que cambiarlo para que cumpla con la IEEE que ahora no cumple
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
status.decTOP()
FLD1()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
"""
Opcode Instruction Description
DD /4 FRSTOR m94/108byte Load FPU state from m94byte or m108byte.
Restaura el estado de la FPU desde memoria
"""
def FRSTOR():
pass #TODO, pag190
"""
Opcode Instruction Description
9B DD /6 FSAVE m94/108byte Store FPU state to m94byte or m108byte after checking for
pending unmasked floating-point exceptions. Then re-
initialize the FPU.
DD /6 FNSAVE* m94/108byte Store FPU environment to m94byte or m108byte without
checking for pending unmasked floating-point exceptions.
Then re-initialize the FPU.
Guarda el estado de la FPU en la dirección memoria dada
"""
def FSAVE(m94_108byte):
pass #TODO
def FSAVE(m94_108byte):
pass #TODO
"""
Opcode Instruction Description
9B D9 /7 FSTCW m2byte Store FPU control word to m2byte after checking for
pending unmasked floating-point exceptions.
D9 /7 FNSTCW* m2byte Store FPU control word to m2byte without checking for
pending unmasked floating-point exceptions.
"""
def FSTCW(m2byte):
pass
def FNSTCW(m2byte):
pass
"""
Opcode Instruction Description
9B D9 /6 FSTENV m14/28byte Store FPU environment to m14byte or m28byte after
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
D9 /6 FNSTENV* m14/28byte Store FPU environment to m14byte or m28byte without
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
"""
def FSTENV(m14_28byte):
pass
def FNSTENV(m14_28byte):
pass
"""
Opcode Instruction Description
9B DD /7 FSTSW m2byte Store FPU status word at m2byte after checking for
pending unmasked floating-point exceptions.
9B DF E0 FSTSW AX Store FPU status word in AX register after checking for
pending unmasked floating-point exceptions.
DD /7 FNSTSW* m2byte Store FPU status word at m2byte without checking for
pending unmasked floating-point exceptions.
DF E0 FNSTSW* AX Store FPU status word in AX register without checking for
pending unmasked floating-point exceptions.
"""
def FSTSW(m2byte):
pass
def FSTSW(): #guarda en AX
pass
def FNSTSW(m2byte):
pass
def FNSTSW(): #guarda en AX
pass
"""
Opcode Instruction Description
D9 FD FSCALE Scale ST(0) by ST(1).
"""
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
#TODO, set flags
def FWAIT():
pass
"""
Opcode Instruction Description
D9 E5 FXAM Classify value or number in ST(0)
"""
#TODO
def FXAM():
"""
C1 ← sign bit of ST; (* 0 for positive, 1 for negative *)
CASE (class of value or number in ST(0)) OF
Unsupported:C3, C2, C0 ← 000;
NaN: C3, C2, C0 ← 001;
Normal: C3, C2, C0 ← 010;
Infinity: C3, C2, C0 ← 011;
Zero: C3, C2, C0 ← 100;
Empty: C3, C2, C0 ← 101;
Denormal: C3, C2, C0 ← 110;
ESAC;
"""
pass
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH():
FXCH(1)
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
"""
Opcode Instruction Description
D9 F4 FXTRACT Separate value in ST(0) into exponent and significand,
store exponent in ST(0), and push the significand onto the
register stack.
"""
def FXTRACT():
pass #TODO
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
pila.push(b)
FADD(0,1)
self.assertEqual(pila._pst[pila.head()],c)
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
FADD(b)
self.assertEqual(pila._pst[pila.head()],c)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import re
import copy
#import instruction_set as iset #modificado por el momento
import reduced_instruction_set as iset
from fpu_structure import *
uesp = None #ultimo_elemento_sacado_de_pila
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
pinout_temp = None
statusX86_temp = None
def parse(text):
lines = text.splitlines()
i = 0
for l in lines:
lines[i]=l.split()
j=0
for param in lines[i]: #aca convierte a entero el elemento de entrada
try:
lines[i][j]=int(param)#por el momento solo soporta enteros, de todas formas, la entrada debe ser un número en decimal, binario, octal u hexadecimal, puesto que el fpu no sabe que es
except:
pass
j+=1
i+=1
return lines
def execute_command(commlista):
saveState()
comm = commlista[0]
params = commlista[1:]
#probando una nueva manera de hacer las cosas, con una cadena de texto
paramline = "("
i=0
for p in params:
if i>0:
paramline+=", "
paramline+=str(p)
i+=1
paramline += ")"
commline = "iset."+comm + paramline
try:
#iset.__getattribute__(comm)(params)
#eval(comm)(p1,p2,p3...)
exec commline
#print "uesp", iset.uesp
#print "res", iset.res
except:
#print "No existe la función", comm
#print "o los parámetros",params," son incorrectos"
print "línea incorrecta:",commline
def undo():
global uesp_temp, pila_temp, control_temp, status_temp
uesp = uesp_temp #ultimo_elemento_sacado_de_pila
iset.pila = pila_temp#copy.copy(pila_temp) #copy.deepcopy(pila_temp)
iset.control = control_temp#copy.copy(control_temp) # copy.deepcopy(control_temp)
iset.status = status_temp#copy.copy(status_temp) #copy.deepcopy(status_temp)
#iset.pinout = #copy.copy(pinout_temp) #copy.deepcopy(pinout_temp)
#iset.statusX86 = #copy.copy(statusX86_temp) #copy.deepcopy(statusX86_temp)
def rebootFPU():
iset.pila = None
iset.pila = Pila()
iset.control.iniciar()
iset.status.iniciar()
iset.pinout.iniciar()
iset.statusX86.iniciar()
def saveState():
global uesp_temp, pila_temp, control_temp, status_temp
#print "Guarda el estado"
uesp_temp = uesp #ultimo_elemento_sacado_de_pila
pila_temp = copy.deepcopy(iset.pila) #copy.copy(iset.pila) #
control_temp = copy.deepcopy(iset.control) #copy.copy(iset.control) #
status_temp = copy.deepcopy(iset.status) #copy.copy(iset.status) #
pinout_temp = copy.deepcopy(iset.pinout)# copy.copy(iset.pinout) #
statusX86_temp = copy.deepcopy(iset.statusX86) #copy.copy(iset.statusX86) #
def cleanState():
global uesp_temp, pila_temp, control_temp, status_temp
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
#pinout_temp = None
#statusX86_temp = None
#si es llamado como ejecutable
#Realizar la instanciación de los módulos necesarios
if __name__ == "__main__":
pass
| Python |
# -*- coding: utf-8 -*-
"""
Tipos de Datos
"""
"""
BCD, será considerado como una lista ordenada de números enteros entre 0 y 9
Para convertirlo se pasará multiplicando
sea a = [1,2,3,4,5,6,7...] donde el último dígito es el más significativo y el primero el menos
sea b el número decimal, entonces
b=0
j=0
for i in a:
b+=i*(10**j)
j+=1
b ahora es el número en decimal
conversión de decimal a bcd
a = lista
b= número decimal
while c >0:
a.append(c%10)
c/=10
"""
def BCD2dec(bcd):
dec=0
j=0
for i in bcd:
dec+=i*(10**j)
j+=1
return dec
def dec2BCD(dec):
bcd=[]
while dec >0:
bcd.append(dec%10)
dec/=10
return bcd
"""
representación binaria
arreglo de unos y ceros
[b0,b1,b2,b3 .... ]
"""
def dec2bin(dec):
bin=[]
while dec >0:
bin.append(dec%2)
dec/=2
return bin
def bin2dec(bin):
dec=0
j=0
for i in bin:
dec+=i*(2**j)
j+=1
return dec
def f2bin(num):
return dec2bin(num*(10000000))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
from fpu_structure import * #mas comentarios
#from instruction_set import * #Por el momento queda así
import reduced_instruction_set as iset
from datahandling import *
import main #debe estar después de la instanciación de iset
class FPU_GUI:
def __init__(self):
main.saveState()
self._last_instruction = ""
#Set the Glade file
self.gladefile = "gui/fpugui.glade/fpuguiglade"
self.wTree = gtk.glade.XML(self.gladefile)
#Get the Main Window, and connect the "destroy" event
self.windowST = self.wTree.get_widget("ST_Stack")
if (self.windowST):
self.windowST.connect("destroy", gtk.main_quit)
#self.windowReg = self.wTree.get_widget("Registers")
#if (self.windowReg):
# self.windowReg.connect("destroy", gtk.main_quit)
self.windowConsole = self.wTree.get_widget("Consola")
if (self.windowConsole):
self.windowConsole.connect("destroy", gtk.main_quit)
#Create our dictionay and connect it
dic = {
"on_Salir_destroy" : gtk.main_quit,
"on_Ejecutar_clicked" : self.ejecutar,
"on_Deshacer_clicked" : self.deshacer,
"on_Reiniciar_clicked" : self.reiniciar,
"on_Salir_clicked" : gtk.main_quit,
}
self.wTree.signal_autoconnect(dic)
def ejecutar(self, widget):
lines = [] #cargar las líneas de manera auxiliar acá,
#para sacarlas en orden hay que usar pop(0) (ojo con el 0 que debe estar)
#obtener la consola de entrada
consola=self.wTree.get_widget("entrada_consola")
buffConsola = consola.get_buffer()
numlines=buffConsola.get_line_count()
beginIter = buffConsola.get_start_iter() #buffConsola.get_iter_at_line(0)
endIter = buffConsola.get_end_iter()
text= buffConsola.get_text(beginIter,endIter)
#parsear los datos de entrada
#verificar que sean datos válidos
#enviarselos a main para su ejecución
commands = main.parse(text)
for comm in commands:
main.execute_command(comm)
self._last_instruction = comm
#actualizar registros
self.actualizarRegs()
self.actualizarPila()
self.actualizarResultados()
def deshacer(self, widget):
main.undo()
self.actualizarRegs()
self.actualizarPila()
def reiniciar(self, widget):
main.rebootFPU()
self.actualizarRegs()
self.actualizarPila()
#actualiza los valores de la salida de los registros
def actualizarRegs(self):
try:
#actualizar registros de status
#print "actualizando registros de status"
regs_vals = iset.status.getRegs()
regs_noms = iset.status.getRegNames()
#print regs_vals
#print regs_noms
for i in range (16):
self.wTree.get_widget(regs_noms[i]).set_text(str(regs_vals[i]))
except:
pass
try:
#actualizar registros de control
#print "actualizando registros de control"
regc_vals = iset.control.getRegs()
regc_noms = iset.control.getRegNames()
#print regc_vals
#print regc_noms
for i in range (16):
self.wTree.get_widget(regc_noms[i]).set_text(str(regc_vals[i]))
#actualizar registros de statusX86
except:
pass
def actualizarResultados(self):
nom_res = "resultados"
self.wTree.get_widget(nom_res).set_text(str(iset.pila.getI(iset.pila.head())[0]))#(str(iset.res))
nom_text = "lastInstruction"
lastI = ""
for el in self._last_instruction:
lastI+=" "
lastI+=str(el)
self.wTree.get_widget(nom_text).set_text(lastI)
#actualiza los valores de la salida de la Pila
def actualizarPila(self):
for i in range(8):
reg=[None,None]
nom_bin = "ST"+str(i)+"_bin"
nom_rep = "ST"+str(i)+"_rep"
nom_tag = "tag"+str(i)
#print nom_bin
#print nom_rep
#print nom_tag
head = iset.pila.head()-i
#print head
try:
#print "pila.head()= ", pila.head()
reg=iset.pila.getI(head)
except:
reg[0] = 00000000000000000000
reg[1] = [1,1]
#print reg
#print i
self.wTree.get_widget(nom_bin).set_text(str(f2bin(reg[0])))
self.wTree.get_widget(nom_rep).set_text(str(reg[0]))
self.wTree.get_widget(nom_tag).set_text(str(reg[1]))
if __name__ == "__main__":
fpugui = FPU_GUI()
gtk.main()
| Python |
from ctypes import *
# load the shared object
libtest = cdll.LoadLibrary('./libtest.so.1.0')
# call the function, yes it is as simple as that!
print libtest.add(10, 20)
#llamo a hello de asm
libtest.hello_asm()
n = int(raw_input(u'ingrese limite de sumantoria: '))
print "La sumatoria de 1 a %i es %i" % (n, libtest.sum(c_uint(n)))
#devuelve los primeros 'max' primos
max = int(raw_input(u'cuantos primos queres encontrar?'))
a = c_int * max
primes_result = a()
ok = libtest.find_primes(primes_result,c_uint(max))
for i in primes_result: print i
# call the sum_values() function
# we have to create a c int array for this
array_of_5_ints = c_int * 5
nums = array_of_5_ints()
# fill up array with values
for i in xrange(5): nums[i] = i
# since the function expects an array pointer, we pass is byref (provided by ctypes)
print libtest.sum_values(byref(nums), 5)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datahandling import *
"""
Pin Configuration
"""
"""
pin[ 1 ]='GND'
pin[ 2:16 ]= AD[14:0]
pin[ 17 ]= 'NC'
pin[ 18 ]= 'NC'
pin[ 19 ]= 'CLK'
pin[ 20 ]= 'GND'
pin[ 21 ]= 'RESET'
pin[ 22 ]= 'READY'
pin[ 23 ]= 'BUSY'
pin[ 24 ]= QS1
pin[ 25 ]= QS0
pin[ 26 ]= S0 #neg
pin[ 27 ]= S1 #neg
pin[ 28 ]= S2 #neg
pin[ 29 ]= 'NC'
pin[ 30 ]= 'NC'
pin[ 31 ]= RQ/GT0 #neg
pin[ 32 ]= INT
pin[ 33 ]= RQ/GT1 #neg
pin[ 34 ]= BHE #neg
pin[ 35 : 38 ]= S[6:3]
pin[ 39 ]= AD[15]
pin[ 40 ]= 'VCC'
"""
class Pinout:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en ...
self._AD =[0 for i in range(16)] #Líneas de dirección
self._pin=[None for i in range(40)]
""" self.pin[ 1 ]='GND'
self.pin[ 2:16 ]= _AD[14:0]
self.pin[ 17 ]= 'NC'
self.pin[ 18 ]= 'NC'
self.pin[ 19 ]= 'CLK'
self.pin[ 20 ]= 'GND'
self.pin[ 21 ]= 'RESET'
self.pin[ 22 ]= 'READY'
self.pin[ 23 ]= 'BUSY'
self.pin[ 24 ]= 'QS1'
self.pin[ 25 ]= 'QS0'
self.pin[ 26 ]= 'S0' #neg
self.pin[ 27 ]= 'S1' #neg
self.pin[ 28 ]= 'S2' #neg
self.pin[ 29 ]= 'NC'
self.pin[ 30 ]= 'NC'
self.pin[ 31 ]= 'RQ/GT0' #neg
self.pin[ 32 ]= 'INT'
self.pin[ 33 ]= 'RQ/GT1' #neg
self.pin[ 34 ]= 'BHE' #neg
self.pin[ 35 : 38 ]= [0,0,0,0]#S[6:3]
self.pin[ 39 ]= self._AD[15]
self.pin[ 40 ]= 'VCC'
"""
"""
Control Unit (CU)
Recibe las instrucciones
Decodifica los operandos
Ejecuta rutinas de control
"""
"""
Numeric Execution Unit (NEU)
Ejecuta las instrucciones numéricas
"""
"""
Data Field:
Compuesto por la Pila
"""
"""
Pila
Esta está compuesta de 7 registros de 80 bits.
Cada registro consta de
64 bits mas bajos de significand
15 bits de exponente
1 bit de signo
"""
"""
Tag Field
Cada registro tiene correspondencia uno a uno con un registro del data field
"""
class Pila:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._pst=[] #pila st
self._ptag=[] #pila de tags
def push(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
st,tag = args
if len(args) == 2:
st,tag = args
elif len(args) == 1:
st=args[0]
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) < 8 :
self._pst.append(st)
self._ptag.append(tag)
else:
print "fallo al empujar valor a la pila, demasiados valores"
#raise #excepción
def pop(self):
try:
return(self._pst.pop(),self._ptag.pop())
except:
return(0,[1,1])
def getI(self,i):
if len(self._pst) > 8 or i<0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
try:
return(self._pst[i],self._ptag[i])
except:
return(0,[1,1])
def setI(self,*args):
assert 2 <= len(args) <= 3
if len(args) == 3:
i,st,tag = args
elif len(args) == 2:
i,st = args
tag = [0,0]
elif len(args) == 1:
i=args[0]
st=0
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) > 8 or i <0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
self._pst[i]=st
self._ptag[i]=tag
def delI(self,i):
try:
del(self._pst[i])
del(self._ptag[i])
return True
except:
return False
def length(self):
return len(self._pst)
def head(self):
return (len(self._pst)-1)
def getRegs(self):
return [ self.getI(i) for i in range (8)]
def setRegs(self,pilatemp):
print pilatemp
print "holaaa"
for st in pilatemp:
print st
self.setI(st[0],st[1])
"""
Control Register (16 bits)
"""
class ControlRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 037Fh
self._IM=1 #invalid operation
self._DM=1 #Denormalized Operand
self._ZM=1 #Zero Divide
self._OM=1 #Overflow
self._UM=1 #Underflow
self._PM=1 #Precision
self._X=1 #Reserved
self._M=0 #Interrupt Mask
self._PC = [1, 1] #Precition Control
self._PC0= self._PC[0] #
self._PC1= self._PC[0] #
self._RC=[0, 0] #Rounding Control
self._RC0=self._RC[0] #
self._RC1=self._RC[1] #
self._IC =[0, 0] #Infinity Control (0=projective, 1= affine)
self._IC0 =self._IC[0]
self._IC1 =self._IC[1]
self._XX=[0,0] #últimos 3 bits reservados
def setPC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._PC[0] =args[0]
self._PC[1] =args[1]
elif len(args) == 1:
self._PC = args[0]
else:
print "Error de argumentos", args
def getPC(self):
return _PC
def setRC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._RC[0] =args[0]
self._RC[1] =args[1]
elif len(args) == 1:
self._RC = args[0]
else:
print "Error de argumentos", args
def getRC(self):
return _RC
def setIC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._IC[0] =args[0]
self._IC[1] =args[1]
elif len(args) == 1:
self._IC = args[0]
else:
print "Error de argumentos", args
def getIC(self):
return _IC
def getRegs(self):
return [self._IM, self._DM, self._ZM,self._OM,self._UM, self._PM,self._X,self._M,self._PC[0], self._PC[1],self._RC[0],self._RC[1],self._IC[0],self._IC[1],self._XX[0],self._XX[1]]
def setRegs(IM,DM,ZM,OM,UM,PM,X0,M,PC0,PC1,RC0,RC1,IC0,IC1,X1,X2):
self._IM=IM
self._DM=DM
self._ZM=ZM
self._OM=OM
self._UM=UM
self._PM=PM
self._X=X0
self._M=M
self._PC[0]=PC0
self._PC[1]=PC1
self._RC[0]=RC0
self._RC[1]=RC1
self._IC[0]=IC0
self._IC[1]=IC1
self._XX[0]=X1
self._XX[1]=X2
def getRegNames(self):
return ['IM','DM','ZM','OM','UM','PM','X0','M','PC0','PC1','RC0','RC1','IC0','IC1','X1','X2']
"""
Status Register (16 bits)
"""
class StatusRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._IE=0 #invalid operation
self._DE=0 #Denormalized Operand
self._ZE=0 #Zero Divide
self._OE=0 #Overflow
self._UE=0 #Underflow
self._PE=0 #Precision
self._X=0 #Reserved
self._IR=0 #Interrupt Request
self._C=[0, 0, 0, 0 ] #Condition Code
self._C0=0 #Condition Code
self._C1=0 #
self._C2=0 #
self._TOP=[0, 0, 0]#Top Of Stack Pointer
self._C3=0 #
self._B=0 #NEU busy
def setTOP(self,*args):
assert 1 <= len(args) <= 3
if len(args) == 3:
self._TOP[0] = args[0]
self._TOP[1] = args[1]
self._TOP[2] = args[2]
elif len(args) == 1:
self._TOP = args[0]
else:
print "Error de argumentos", args
def getTOP(self):
return self._TOP
def setC(self,*args):
assert 1 <= len(args) <= 4
if len(args) == 4:
self._C[0] = args[0]
self._C[1] = args[1]
self._C[2] = args[2]
self._C[3] = args[3]
elif len(args) == 1:
self._C = args[0]
else:
print "Error de argumentos", args
def getC(self):
return self._C
def decTOP(self):
aux=bin2dec(self._TOP)
if aux== 0:
aux=7
else:
aux-=1
self._TOP=dec2bin(aux)
def incTOP(self):
aux=bin2dec(self._TOP)
if aux== 7:
aux=0
else:
aux+=1
self._TOP=dec2bin(aux)
def getRegs(self):
return [self._IE, self._DE, self._ZE, self._OE, self._UE, self._PE, self._X, self._IR, self._C[0], self._C[1],self._C[2], self._TOP[0], self._TOP[1], self._TOP[2], self._C[3], self._B]
def setRegs(IE,DE,ZE,OE,UE,PE,X,IR,C0,C1,C2,TOP0,TOP1,TOP2,C3,B):
self._IE=IE
self._DE=DE
self._ZE=ZE
self._OE=OE
self._UE=UE
self._PE=PE
self._X = X
self._IR=IR
self._C[0]=C0
self._C[1]=C1
self._C[2]=C2
self._TOP[0]=TOP0
self._TOP[1]=TOP1
self._TOP[2]=TOP2
self._C[3]=C3
self._B=B
def getRegNames(self):
return ['IE','DE','ZE','OE','UE','PE','X','IR','C0','C1','C2','TOP0','TOP1','TOP2','C3','B']
"""
Tag Word (16 bits) #listo
"""
"""
Instruction Pointer (32 bits)
"""
"""
Data Pointer (32 bits)
"""
"""
Registros necesarios del procesador 8086
"""
class StatusX86:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._CF=0
self._PF=0
self._AF=0
self._ZF=0
self._SF=0
self._TF=0
self._IF=0
self._DF=0
self._OF=0
def getRegs(self):
return [self._CF,self._PF,self._AF,self._ZF,self._SF,self._TF,self._IF,self._DF,self._OF ]
def setRegs(CF,PF,AF,ZF,SF,TF,IF,DF,OF):
self._CF= CF
self._PF= PF
self._AF= AF
self._ZF= ZF
self._SF= SF
self._TF= TF
self._IF= IF
self._DF= DF
self._OF= OF
def getRegNames(self):
return ['CF','PF','DF','AF','ZF','SF','TF','IF','DF','OF']
#Si es llamado como ejecutable, entonces decir que esto es una librería que contiene las estructuras básicas de una fpu 8087 (pilas y registros), mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de isntruction_set.py
"""
import unittest
import random
from fpu_structure import Pila, StatusRegister, ControlRegister, StatusX86
"""
Test Pila
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Especificado en cada test en particular
Dependencias:
Pila
Método:
Especificado en cada test en particular
Esperado:
Test OK
"""
class TestPila(unittest.TestCase):
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_1(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual((pila._pst[len(pila._pst)-1],pila._ptag[len(pila._pst)-1]),(st,t)) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_2(self):
pila = Pila()
st = 11111111
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(pila._pst[len(pila._pst)-1],st) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_pop(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
for i in range (8):
self.assertEqual(pila.pop()[0],st[7-i])
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_getI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
#print pila._pst
#print pila._ptag
for i in range (8):
self.assertEqual(pila.getI(i)[0],st[i])
"""
Propósito:
Observar que se devuelva correctamente el índice de la cabeza de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Considera que Pila.getI(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el valor en ese punto
corresponda con el valor recién insertado (lo que corrobora que es la
cabeza de la pila)
"""
def test_head(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual(pila.getI(pila.head()),(st,t))
"""
Propósito:
Observar que se devuelva correctamente la longitud de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el índice actual (i+1 por
que este comienza en cero) corresponda con pila.length()
"""
def test_length(self):
pila = Pila()
st = 11111111
self.assertEqual(0,pila.length())
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(i+1,pila.length())
"""
Propósito:
Observar que se borren correctamente los valores de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego se borra siempre el primer valor de la pila con el comando
pila.delI(0) y se espera el retorno True
"""
def test_delI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
for i in range (8):
#print pila.delI(0)
self.assertEqual(True,pila.delI(0))
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_1(self):
pila = Pila()
st = 0000000
tag = [[0,0],[0,1],[1,0],[1,1]]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag[3])
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
i=0
for j in range (2):
for t in tag:
st.append(random.random())
pila.setI(i,st[i],t)
self.assertEqual(pila.getI(i),(st[i],t))
i+=1
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_2(self):
pila = Pila()
st = 0000000
tag = [1,1]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag)
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
for i in range (8):
st.append(random.random())
pila.setI(i,st[i])
self.assertEqual(pila.getI(i)[0],st[i])
#class TestStatusX86(unittest.TestCase):
# pass
class TestControlRegister(unittest.TestCase):
def test_setPC_1(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc)
self.assertEqual(pc,control._PC)
def test_setPC_2(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(pc,control._PC)
#asume que setPC() funciona correctamente
def getPC(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(control.getPC(),pc)
def test_setRC_1(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc)
self.assertEqual(rc,control._RC)
def test_setRC_2(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(rc,control._RC)
#asume que setRC() funciona correctamente
def getRC(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(control.getRC(),rc)
def test_setIC_1(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic)
self.assertEqual(ic,control._IC)
def test_setIC_2(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic[0],ic[1])
self.assertEqual(ic,control._IC)
#asume que setIC() funciona correctamente
def getIC(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(rc[0],rc[1])
self.assertEqual(control.getIC(),ic)
class TestStatusRegister(unittest.TestCase):
pass
"""
def test_setTOP_1(self):
def test_setTOP_2(self):
def getTOP(self):
def test_setC_1(self):
def test_setC_2(self):
def test_getC(self):
def test_decTOP(self):
def test_incTOP(self):
"""
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
import math
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = a + b
pila.push(a)
pila.push(b)
#print pila._pst
FADD(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
#Test FSUB
class TestFSUB(unittest.TestCase):
def testFSUB(self):
for i in range(8):
pila.pop()
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = b - a
pila.push(a)
pila.push(b)
#print pila._pst
FSUB(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFMUL(unittest.TestCase):
def testFMUL(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = a * b
pila.push(a)
pila.push(b)
print pila._pst
FMUL(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFDIV(unittest.TestCase):
def testFDIV(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = b / a
pila.push(a)
pila.push(b)
print pila._pst
FDIV(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFCOS(unittest.TestCase):
def testFCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
pila.push(a)
#print pila._pst
FCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSIN(unittest.TestCase):
def testFSIN(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.sin(a)
pila.push(a)
#print pila._pst
FSIN()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSINCOS(unittest.TestCase):
def testFSINCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
c = math.sin(a)
pila.push(a)
#print pila._pst
FSINCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
self.assertEqual(pila.getI(pila.head()-1)[0],c)
class TestFSQRT(unittest.TestCase):
def testFSQRT(self):
for i in range(8):
pila.pop()
a = random.randint(0,2**6)
b = math.sqrt(a)
pila.push(a)
#print pila._pst
FSQRT()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSTP(unittest.TestCase):
def testFSTP(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
pila.push(b)
pila.push(a)
self.assertEqual(FSTP(1111),a)
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFCOM(unittest.TestCase):
def testFCOM(self):
for i in range(8):
pila.pop()
a = [2,1,0]
b = [1,2,0]
c= [[0,0,0,0],[1,0,0,0],[0,0,0,1],[1,0,1,1]]
for i in range(3):
pila.push(b[i])
pila.push(a[i])
FCOM(1)
self.assertEqual(status.getC(),c[i])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
res = None #resultado de la última operación
#hay que poner a res y uesp como global en cada una de las funciones, debo escribir un script que lo haga :P: global uesp,res
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
res = pila.getI(pila.head())[0]
return res
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
res = pila.getI(pila.head())[0]
if res == 0 :
statusX86._ZF=1
return res
# Operaciones de Adición
"""
Operaciones de adición
Opcode Instruction Description
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDP Add ST(0) to ST(1), store result in ST(1), and pop the
"""
#FADD
def FADD(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
#print st0,";", sti
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return res
#FADDP
def FADDP(sti=1,st0=0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
pila.setI(pila.head()-1,res) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
if res == 0 :
statusX86._ZF=1
return uesp
"""
Opcode Instruction Description
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
"""
def FSUB(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FSUBP(st0=0,sti=1):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
res = pila.getI(pila.head())[0]
return res
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOMP(sti):
FCOM(sti)
uesp = pila.pop()[0]
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM(1)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs(pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.cos(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
"""
def FDIV (st0,sti):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
if a == 0:
status._ZE = 1
res = b / a
pila.setI(pila.head(),res)
if b == 0:
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
res =pila.getI(pila.head())[0]
def FLD(num):
pila.push(num)
status.decTOP()
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
#status.decTOP()
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
#status.decTOP()
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
#status.decTOP()
def FLDPI():
FLD(math.pi)
#status.decTOP()
def FLDLG2():
FLD(math.log10(2))
#status.decTOP()
def FLDLN2():
FLD(math.log(2,math.e))
#status.decTOP()
def FLDZ():
FLD(0.0)
#status.decTOP()
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
res =uesp
return uesp
def FSTP(mreal):
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
res = uesp
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL (st0=0,sti=1):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
res = a * b
pila.setI(pila.head(),res)
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FMULP (st0,sti):
FMUL(st0,sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#No Operation
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
if uesp == 0 :
statusX86._ZF=1
res = uesp
return uesp
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
FLD1()
status.decTOP()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
res =pila.getI(pila.head())[0]
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
res =pila.getI(pila.head())[0]
#TODO, set flags
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
# Operaciones de Adición
"""
Operaciones de adición
OpcodeInstructionDescription
D8 /0 FADD m32 realAdd m32real to ST(0) and store result in ST(0)
DC /0 FADD m64real Add m64real to ST(0) and store result in ST(0)
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDPAdd ST(0) to ST(1), store result in ST(1), and pop the
register stack
DA /0 FIADD m32int Add m32int to ST(0) and store result in ST(0)
DE /0 FIADD m16int Add m16int to ST(0) and store result in ST(0)
"""
#FADD
def FADD(self, *args):
assert 1 <= len(args) <= 2
st0 = args[0]
sti = args[1]
if len(args) == 2:
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
#print st0,";", sti
pila.setI(pila.head(), pila.getI(pila.head())[0]+pila.getI(1)[0])#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
elif len(args) == 1:
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
#aux = pila.pop()[0]
#print "num=", num
pila.push(pila.pop()[0]+args[0])
else:
print "Error de argumentos", args
#FADDP
def FADDP():
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
return uesp
def FADDP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIADD(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]+num)
"""
Opcode Instruction Description
D8 /4 FSUB m32real Subtract m32real from ST(0) and store result in ST(0)
DC /4 FSUB m64real Subtract m64real from ST(0) and store result in ST(0)
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
DA /4 FISUB m32int Subtract m32int from ST(0) and store result in ST(0)
DE /4 FISUB m16int Subtract m16int from ST(0) and store result in ST(0)
"""
#FSUB
def FSUB(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
'''
def FSUB(m64real)
pass
'''
def FSUB(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
pila.setI(pila.head(), pila.getI(pila.head())[0]-pila.getI(1)[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBP
def FSUBP():
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUB(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
"""
Opcode Instruction Description
D8 /5 FSUBR m32real Subtract ST(0) from m32real and store result in ST(0)
DC /5 FSUBR m64real Subtract ST(0) from m64real and store result in ST(0)
D8 E8+i FSUBR ST(0), ST(i) Subtract ST(0) from ST(i) and store result in ST(0)
DC E0+i FSUBR ST(i), ST(0) Subtract ST(i) from ST(0) and store result in ST(i)
DE E0+i FSUBRP ST(i), ST(0) Subtract ST(i) from ST(0), store result in ST(i), and pop
register stack
DE E1 FSUBRP Subtract ST(1) from ST(0), store result in ST(1), and pop
register stack
DA /5 FISUBR m32int Subtract ST(0) from m32int and store result in ST(0)
DE /5 FISUBR m16int Subtract ST(0) from m16int and store result in ST(0)
"""
#FSUBR
def FSUBR(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num - pila.pop()[0])
'''
def FSUBR(m64real)
pass
'''
def FSUBR(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUBR, st0"
#raise()
else:
pila.setI(pila.head(),pila.getI(1)[0]- pila.getI(pila.head())[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBRP
def FSUBRR():
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBRP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBRP, st0"
#raise()
else:
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUBR(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num-pila.pop()[0])
#Operaciones de BCD
def FBLD(bcd): #convertir bcd a real y hacerle push
#numreal = bcd
#acá hay que convertirlo
#acá se lo empuja
pila.push(BCD2dec(bcd))
def FBSTP(bcd):
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
#Operaciones de Registros (no de pila)
def FCLEX():
#TODO check first for and handles any pending unmasked floating-point exceptions before cleaning
#clean flags
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Movimientos condicionales (pag 137)
def FCMOVB(sti):
if statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVE():
if statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVBE():
if statusX86._CF or status._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVU():
if statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNB():
if not statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNE():
if not statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNBE():
if statusX86._CF == 0 and statusX86._ZF == 0:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNU():
if not statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM():
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
FCOMST(1)
def FCOMST(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOM(num):
#esto sobrepasa el encapsulamiento de la pila y está a propósito
#es para resolver un problema puntual, para que no salte excepción por el largo de pila
pila.__pst.append(num) #este es el valor necesario
pila.__ptag.append([0,0]) #este es solo por cuidado
FCOM()
#limpiando lo agregado extra
pila.__pst.pop()
pila.__ptag.pop()
def FCOMP():
FCOM()
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPST(sti):
FCOMST(sti)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMP(num):
FCOM(num)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM()
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Comparación de enteros
"""
Opcode Instruction Description
DB F0+i FCOMI ST, ST(i) Compare ST(0) with ST(i) and set status flags accordingly
DF F0+i FCOMIP ST, ST(i) Compare ST(0) with ST(i), set status flags accordingly, and
pop register stack
DB E8+i FUCOMI ST, ST(i) Compare ST(0) with ST(i), check for ordered values, and
set status flags accordingly
DF E8+i FUCOMIP ST, ST(i) Compare ST(0) with ST(i), check for ordered values, set
status flags accordingly, and pop register stack
"""
def FCOMI(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
statusX86._CF= 1
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 1
else:
statusX86._CF= 1
statusX86._PF= 1
statusX86._ZF= 1
def FCOMIP(sti):
FCOMI(sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
def FUCOMI(sti):
#TODO, check for ordered values
FCOMI(sti)
def FUCOMIP(sti):
#TODO, check for ordered values
FCOMIP(sti)
"""
Opcode Instruction Description
D9 E4 FTST Compare ST(0) with 0.0.
"""
def FTST():
FCOM(0.0)
"""
Opcode Instruction Description
DD E0+i FUCOM ST(i) Compare ST(0) with ST(i)
DD E1 FUCOM Compare ST(0) with ST(1)
DD E8+i FUCOMP ST(i) Compare ST(0) with ST(i) and pop register stack
DD E9 FUCOMP Compare ST(0) with ST(1) and pop register stack
DA E9 FUCOMPP Compare ST(0) with ST(1) and pop register stack twice
"""
def FUCOM():
FUCOM(1)
def FUCOM(sti):
FCOM(sti)
def FUCOMP():
return FUCOMP(1)
def FUCOMP(sti):
FCOMP(sti)
def FUCOMPP():
FUCOM()
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.cos(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
def FDECSTP():
pila.decTOP()
#TODO, faltan realizar las operaciones sonbre C1, el manual está incorrecto :S
#operaciones de división
"""
Opcode Instruction Description
D8 /6 FDIV m32real Divide ST(0) by m32real and store result in ST(0)
DC /6 FDIV m64real Divide ST(0) by m64real and store result in ST(0)
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE F9 FDIVP Divide ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /6 FIDIV m32int Divide ST(0) by m32int and store result in ST(0)
DE /6 FIDIV m16int Divide ST(0) by m64int and store result in ST(0)
"""
def FDIV(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]/num)
def FDIV (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIV (sti,st0):
pila.setI(i, pila.getI(pila.head()-sti)[0]/ pila.getI(pila.head())[0])
def FDIVP():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIV(num):
FDIV(num)
#Operaciones de división inversas
"""
Opcode Instruction Description
D8 /7 FDIVR m32real Divide m32real by ST(0) and store result in ST(0)
DC /7 FDIVR m64real Divide m64real by ST(0) and store result in ST(0)
D8 F8+i FDIVR ST(0), ST(i) Divide ST(i) by ST(0) and store result in ST(0)
DC F0+i FDIVR ST(i), ST(0) Divide ST(0) by ST(i) and store result in ST(i)
DE F0+i FDIVRP ST(i), ST(0) Divide ST(0) by ST(i), store result in ST(i), and pop the
register stack
DE F1 FDIVRP Divide ST(0) by ST(1), store result in ST(1), and pop the
register stack
DA /7 FIDIVR m32int Divide m32int by ST(0) and store result in ST(0)
DE /7 FIDIVR m16int Divide m64int by ST(0) and store result in ST(0)
"""
def FDIVR(num):
pila.setI(pila.head(),num/ pila.getI(pila.head())[0])
def FDIVR (sti):
pila.setI(pila.head(),pila.getI(i)[0]/ pila.getI(pila.head())[0])
def FDIVR (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIVPR():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVPR (sti,st0):
FDIVR(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIVR(num):
FDIVR(num)
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
#Operaciones de comparación de enteros
"""
Opcode Instruction Description
DE /2 FICOM m16int Compare ST(0) with m16int
DA /2 FICOM m32int Compare ST(0) with m32int
DE /3 FICOMP m16int Compare ST(0) with m16int and pop stack register
DA /3 FICOMP m32int Compare ST(0) with m32int and pop stack register
"""
def FICOM(num):
FCOM(num)
def FICOMP(num):
FICOM(num)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de carga de pila
"""
Opcode Instruction Description
DF /0 FILD m16int Push m16int onto the FPU register stack.
DB /0 FILD m32int Push m32int onto the FPU register stack.
DF /5 FILD m64int Push m64int onto the FPU register stack.
"""
def FILD(num):
status.decTOP()
pila.push(num)
"""
Opcode Instruction Description
D9 /0 FLD m32real Push m32real onto the FPU register stack.
DD /0 FLD m64real Push m64real onto the FPU register stack.
DB /5 FLD m80real Push m80real onto the FPU register stack.
D9 C0+i FLD ST(i) Push ST(i) onto the FPU register stack.
"""
def FLD(num):
pila.push(num)
status.decTOP()
def FLDST(sti): #¿esto es así? no es muy claro en el manual, pag 167
pila.push( pila.getI(pila.head()-sti))
status.decTOP()
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
def FLDPI():
FLD(math.pi)
def FLDLG2():
FLD(math.log10(2))
def FLDLN2():
FLD(math.log(2,math.e))
def FLDZ():
FLD(0.0)
"""
Opcode Instruction Description
D9 /5 FLDCW m2byte Load FPU control word from m2byte.
"""
def FLDCW(m2byte):
FLD(m2byte) #TODO, modelo de memoria, para poder cargar solo lo que hace falta
"""
Opcode Instruction Description
D9 /4 FLDENV m14/28byte Load FPU environment from m14byte or m28byte.
"""
def FLDENV(mbyte):
pass #TODO
#operaciones de extracción de stack
"""
Opcode Instruction Description
DF /2 FIST m16int Store ST(0) in m16int
DB /2 FIST m32int Store ST(0) in m32int
DF /3 FISTP m16int Store ST(0) in m16int and pop register stack
DB /3 FISTP m32int Store ST(0) in m32int and pop register stack
DF /7 FISTP m64int Store ST(0) in m64int and pop register stack
"""
def FIST(dirmem):
uesp = pila.getI(pila.head())[0]
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
def FISTP(dirmem):
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
return uesp
def FST_ST(i):
pila.setI(1, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
def FSTP(mreal):
uesp= pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSTP_ST(i):
FST_ST(i)
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Inicialización de la FPU
def FINIT():
#TODO, check for and handles any pending unmasked floating-point exceptions
FNINIT()
pass
def FNINIT():
#TODO, poner
# control en 1101111111 #037Fh
# TAG word en FFFFh
# los demás: status, data pointer instruction pointer, last instruction opcode, en 0 (cero)
pass
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]*num)
def FMUL_ST (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]* pila.getI(pila.head()-sti)[0])
def FMUL (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head()-sti)[0]* pila.getI(pila.head())[0])
def FMULP():
FMUL(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FMULP (sti,st0):
FMUL(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIMUL(num):
FMUL(num)
#No Oeration
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
"""
Opcode Instruction Description
D9 F8 FPREM Replace ST(0) with the remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM():
pila.setI(pila.head(), pila.getI(pila.head())[0]%pila.getI(1)[0])
#TODO, setear las variables status._C # pag 182
"""
Opcode Instruction Description
D9 F5 FPREM1 Replace ST(0) with the IEEE remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM1():
FPREM() #TODO, cuando se cambien los modelos, esto hay que cambiarlo para que cumpla con la IEEE que ahora no cumple
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
status.decTOP()
FLD1()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
"""
Opcode Instruction Description
DD /4 FRSTOR m94/108byte Load FPU state from m94byte or m108byte.
Restaura el estado de la FPU desde memoria
"""
def FRSTOR():
pass #TODO, pag190
"""
Opcode Instruction Description
9B DD /6 FSAVE m94/108byte Store FPU state to m94byte or m108byte after checking for
pending unmasked floating-point exceptions. Then re-
initialize the FPU.
DD /6 FNSAVE* m94/108byte Store FPU environment to m94byte or m108byte without
checking for pending unmasked floating-point exceptions.
Then re-initialize the FPU.
Guarda el estado de la FPU en la dirección memoria dada
"""
def FSAVE(m94_108byte):
pass #TODO
def FSAVE(m94_108byte):
pass #TODO
"""
Opcode Instruction Description
9B D9 /7 FSTCW m2byte Store FPU control word to m2byte after checking for
pending unmasked floating-point exceptions.
D9 /7 FNSTCW* m2byte Store FPU control word to m2byte without checking for
pending unmasked floating-point exceptions.
"""
def FSTCW(m2byte):
pass
def FNSTCW(m2byte):
pass
"""
Opcode Instruction Description
9B D9 /6 FSTENV m14/28byte Store FPU environment to m14byte or m28byte after
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
D9 /6 FNSTENV* m14/28byte Store FPU environment to m14byte or m28byte without
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
"""
def FSTENV(m14_28byte):
pass
def FNSTENV(m14_28byte):
pass
"""
Opcode Instruction Description
9B DD /7 FSTSW m2byte Store FPU status word at m2byte after checking for
pending unmasked floating-point exceptions.
9B DF E0 FSTSW AX Store FPU status word in AX register after checking for
pending unmasked floating-point exceptions.
DD /7 FNSTSW* m2byte Store FPU status word at m2byte without checking for
pending unmasked floating-point exceptions.
DF E0 FNSTSW* AX Store FPU status word in AX register without checking for
pending unmasked floating-point exceptions.
"""
def FSTSW(m2byte):
pass
def FSTSW(): #guarda en AX
pass
def FNSTSW(m2byte):
pass
def FNSTSW(): #guarda en AX
pass
"""
Opcode Instruction Description
D9 FD FSCALE Scale ST(0) by ST(1).
"""
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
#TODO, set flags
def FWAIT():
pass
"""
Opcode Instruction Description
D9 E5 FXAM Classify value or number in ST(0)
"""
#TODO
def FXAM():
"""
C1 ← sign bit of ST; (* 0 for positive, 1 for negative *)
CASE (class of value or number in ST(0)) OF
Unsupported:C3, C2, C0 ← 000;
NaN: C3, C2, C0 ← 001;
Normal: C3, C2, C0 ← 010;
Infinity: C3, C2, C0 ← 011;
Zero: C3, C2, C0 ← 100;
Empty: C3, C2, C0 ← 101;
Denormal: C3, C2, C0 ← 110;
ESAC;
"""
pass
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH():
FXCH(1)
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
"""
Opcode Instruction Description
D9 F4 FXTRACT Separate value in ST(0) into exponent and significand,
store exponent in ST(0), and push the significand onto the
register stack.
"""
def FXTRACT():
pass #TODO
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de isntruction_set.py
"""
import unittest
import random
from fpu_structure import Pila, StatusRegister, ControlRegister, StatusX86
"""
Test Pila
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Especificado en cada test en particular
Dependencias:
Pila
Método:
Especificado en cada test en particular
Esperado:
Test OK
"""
class TestPila(unittest.TestCase):
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_1(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual((pila._pst[len(pila._pst)-1],pila._ptag[len(pila._pst)-1]),(st,t)) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_2(self):
pila = Pila()
st = 11111111
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(pila._pst[len(pila._pst)-1],st) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_pop(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
for i in range (8):
self.assertEqual(pila.pop()[0],st[7-i])
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_getI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
#print pila._pst
#print pila._ptag
for i in range (8):
self.assertEqual(pila.getI(i)[0],st[i])
"""
Propósito:
Observar que se devuelva correctamente el índice de la cabeza de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Considera que Pila.getI(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el valor en ese punto
corresponda con el valor recién insertado (lo que corrobora que es la
cabeza de la pila)
"""
def test_head(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual(pila.getI(pila.head()),(st,t))
"""
Propósito:
Observar que se devuelva correctamente la longitud de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el índice actual (i+1 por
que este comienza en cero) corresponda con pila.length()
"""
def test_length(self):
pila = Pila()
st = 11111111
self.assertEqual(0,pila.length())
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(i+1,pila.length())
"""
Propósito:
Observar que se borren correctamente los valores de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego se borra siempre el primer valor de la pila con el comando
pila.delI(0) y se espera el retorno True
"""
def test_delI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
for i in range (8):
#print pila.delI(0)
self.assertEqual(True,pila.delI(0))
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_1(self):
pila = Pila()
st = 0000000
tag = [[0,0],[0,1],[1,0],[1,1]]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag[3])
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
i=0
for j in range (2):
for t in tag:
st.append(random.random())
pila.setI(i,st[i],t)
self.assertEqual(pila.getI(i),(st[i],t))
i+=1
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_2(self):
pila = Pila()
st = 0000000
tag = [1,1]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag)
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
for i in range (8):
st.append(random.random())
pila.setI(i,st[i])
self.assertEqual(pila.getI(i)[0],st[i])
#class TestStatusX86(unittest.TestCase):
# pass
class TestControlRegister(unittest.TestCase):
def test_setPC_1(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc)
self.assertEqual(pc,control._PC)
def test_setPC_2(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(pc,control._PC)
#asume que setPC() funciona correctamente
def getPC(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(control.getPC(),pc)
def test_setRC_1(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc)
self.assertEqual(rc,control._RC)
def test_setRC_2(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(rc,control._RC)
#asume que setRC() funciona correctamente
def getRC(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(control.getRC(),rc)
def test_setIC_1(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic)
self.assertEqual(ic,control._IC)
def test_setIC_2(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic[0],ic[1])
self.assertEqual(ic,control._IC)
#asume que setIC() funciona correctamente
def getIC(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(rc[0],rc[1])
self.assertEqual(control.getIC(),ic)
class TestStatusRegister(unittest.TestCase):
pass
"""
def test_setTOP_1(self):
def test_setTOP_2(self):
def getTOP(self):
def test_setC_1(self):
def test_setC_2(self):
def test_getC(self):
def test_decTOP(self):
def test_incTOP(self):
"""
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import re
import copy
#import instruction_set as iset #modificado por el momento
import reduced_instruction_set as iset
from fpu_structure import *
uesp = None #ultimo_elemento_sacado_de_pila
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
pinout_temp = None
statusX86_temp = None
def parse(text):
lines = text.splitlines()
i = 0
for l in lines:
lines[i]=l.split()
j=0
for param in lines[i]: #aca convierte a entero el elemento de entrada
try:
lines[i][j]=int(param)#por el momento solo soporta enteros, de todas formas, la entrada debe ser un número en decimal, binario, octal u hexadecimal, puesto que el fpu no sabe que es
except:
pass
j+=1
i+=1
return lines
def execute_command(commlista):
saveState()
comm = commlista[0]
params = commlista[1:]
#probando una nueva manera de hacer las cosas, con una cadena de texto
paramline = "("
i=0
for p in params:
if i>0:
paramline+=", "
paramline+=str(p)
i+=1
paramline += ")"
commline = "iset."+comm + paramline
try:
#iset.__getattribute__(comm)(params)
#eval(comm)(p1,p2,p3...)
exec commline
#print "uesp", iset.uesp
#print "res", iset.res
except:
#print "No existe la función", comm
#print "o los parámetros",params," son incorrectos"
print "línea incorrecta:",commline
def undo():
global uesp_temp, pila_temp, control_temp, status_temp
uesp = uesp_temp #ultimo_elemento_sacado_de_pila
iset.pila = pila_temp#copy.copy(pila_temp) #copy.deepcopy(pila_temp)
iset.control = control_temp#copy.copy(control_temp) # copy.deepcopy(control_temp)
iset.status = status_temp#copy.copy(status_temp) #copy.deepcopy(status_temp)
#iset.pinout = #copy.copy(pinout_temp) #copy.deepcopy(pinout_temp)
#iset.statusX86 = #copy.copy(statusX86_temp) #copy.deepcopy(statusX86_temp)
def rebootFPU():
iset.pila = None
iset.pila = Pila()
iset.control.iniciar()
iset.status.iniciar()
iset.pinout.iniciar()
iset.statusX86.iniciar()
def saveState():
global uesp_temp, pila_temp, control_temp, status_temp
#print "Guarda el estado"
uesp_temp = uesp #ultimo_elemento_sacado_de_pila
pila_temp = copy.deepcopy(iset.pila) #copy.copy(iset.pila) #
control_temp = copy.deepcopy(iset.control) #copy.copy(iset.control) #
status_temp = copy.deepcopy(iset.status) #copy.copy(iset.status) #
pinout_temp = copy.deepcopy(iset.pinout)# copy.copy(iset.pinout) #
statusX86_temp = copy.deepcopy(iset.statusX86) #copy.copy(iset.statusX86) #
def cleanState():
global uesp_temp, pila_temp, control_temp, status_temp
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
#pinout_temp = None
#statusX86_temp = None
#si es llamado como ejecutable
#Realizar la instanciación de los módulos necesarios
if __name__ == "__main__":
pass
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
import math
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = a + b
pila.push(a)
pila.push(b)
#print pila._pst
FADD(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
#Test FSUB
class TestFSUB(unittest.TestCase):
def testFSUB(self):
for i in range(8):
pila.pop()
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = b - a
pila.push(a)
pila.push(b)
#print pila._pst
FSUB(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFMUL(unittest.TestCase):
def testFMUL(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = a * b
pila.push(a)
pila.push(b)
print pila._pst
FMUL(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFDIV(unittest.TestCase):
def testFDIV(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = b / a
pila.push(a)
pila.push(b)
print pila._pst
FDIV(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFCOS(unittest.TestCase):
def testFCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
pila.push(a)
#print pila._pst
FCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSIN(unittest.TestCase):
def testFSIN(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.sin(a)
pila.push(a)
#print pila._pst
FSIN()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSINCOS(unittest.TestCase):
def testFSINCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
c = math.sin(a)
pila.push(a)
#print pila._pst
FSINCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
self.assertEqual(pila.getI(pila.head()-1)[0],c)
class TestFSQRT(unittest.TestCase):
def testFSQRT(self):
for i in range(8):
pila.pop()
a = random.randint(0,2**6)
b = math.sqrt(a)
pila.push(a)
#print pila._pst
FSQRT()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSTP(unittest.TestCase):
def testFSTP(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
pila.push(b)
pila.push(a)
self.assertEqual(FSTP(1111),a)
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFCOM(unittest.TestCase):
def testFCOM(self):
for i in range(8):
pila.pop()
a = [2,1,0]
b = [1,2,0]
c= [[0,0,0,0],[1,0,0,0],[0,0,0,1],[1,0,1,1]]
for i in range(3):
pila.push(b[i])
pila.push(a[i])
FCOM(1)
self.assertEqual(status.getC(),c[i])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datahandling import *
"""
Pin Configuration
"""
"""
pin[ 1 ]='GND'
pin[ 2:16 ]= AD[14:0]
pin[ 17 ]= 'NC'
pin[ 18 ]= 'NC'
pin[ 19 ]= 'CLK'
pin[ 20 ]= 'GND'
pin[ 21 ]= 'RESET'
pin[ 22 ]= 'READY'
pin[ 23 ]= 'BUSY'
pin[ 24 ]= QS1
pin[ 25 ]= QS0
pin[ 26 ]= S0 #neg
pin[ 27 ]= S1 #neg
pin[ 28 ]= S2 #neg
pin[ 29 ]= 'NC'
pin[ 30 ]= 'NC'
pin[ 31 ]= RQ/GT0 #neg
pin[ 32 ]= INT
pin[ 33 ]= RQ/GT1 #neg
pin[ 34 ]= BHE #neg
pin[ 35 : 38 ]= S[6:3]
pin[ 39 ]= AD[15]
pin[ 40 ]= 'VCC'
"""
class Pinout:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en ...
self._AD =[0 for i in range(16)] #Líneas de dirección
self._pin=[None for i in range(40)]
""" self.pin[ 1 ]='GND'
self.pin[ 2:16 ]= _AD[14:0]
self.pin[ 17 ]= 'NC'
self.pin[ 18 ]= 'NC'
self.pin[ 19 ]= 'CLK'
self.pin[ 20 ]= 'GND'
self.pin[ 21 ]= 'RESET'
self.pin[ 22 ]= 'READY'
self.pin[ 23 ]= 'BUSY'
self.pin[ 24 ]= 'QS1'
self.pin[ 25 ]= 'QS0'
self.pin[ 26 ]= 'S0' #neg
self.pin[ 27 ]= 'S1' #neg
self.pin[ 28 ]= 'S2' #neg
self.pin[ 29 ]= 'NC'
self.pin[ 30 ]= 'NC'
self.pin[ 31 ]= 'RQ/GT0' #neg
self.pin[ 32 ]= 'INT'
self.pin[ 33 ]= 'RQ/GT1' #neg
self.pin[ 34 ]= 'BHE' #neg
self.pin[ 35 : 38 ]= [0,0,0,0]#S[6:3]
self.pin[ 39 ]= self._AD[15]
self.pin[ 40 ]= 'VCC'
"""
"""
Control Unit (CU)
Recibe las instrucciones
Decodifica los operandos
Ejecuta rutinas de control
"""
"""
Numeric Execution Unit (NEU)
Ejecuta las instrucciones numéricas
"""
"""
Data Field:
Compuesto por la Pila
"""
"""
Pila
Esta está compuesta de 7 registros de 80 bits.
Cada registro consta de
64 bits mas bajos de significand
15 bits de exponente
1 bit de signo
"""
"""
Tag Field
Cada registro tiene correspondencia uno a uno con un registro del data field
"""
class Pila:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._pst=[] #pila st
self._ptag=[] #pila de tags
def push(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
st,tag = args
if len(args) == 2:
st,tag = args
elif len(args) == 1:
st=args[0]
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) < 8 :
self._pst.append(st)
self._ptag.append(tag)
else:
print "fallo al empujar valor a la pila, demasiados valores"
#raise #excepción
def pop(self):
try:
return(self._pst.pop(),self._ptag.pop())
except:
return(0,[1,1])
def getI(self,i):
if len(self._pst) > 8 or i<0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
try:
return(self._pst[i],self._ptag[i])
except:
return(0,[1,1])
def setI(self,*args):
assert 2 <= len(args) <= 3
if len(args) == 3:
i,st,tag = args
elif len(args) == 2:
i,st = args
tag = [0,0]
elif len(args) == 1:
i=args[0]
st=0
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) > 8 or i <0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
self._pst[i]=st
self._ptag[i]=tag
def delI(self,i):
try:
del(self._pst[i])
del(self._ptag[i])
return True
except:
return False
def length(self):
return len(self._pst)
def head(self):
return (len(self._pst)-1)
def getRegs(self):
return [ self.getI(i) for i in range (8)]
def setRegs(self,pilatemp):
print pilatemp
print "holaaa"
for st in pilatemp:
print st
self.setI(st[0],st[1])
"""
Control Register (16 bits)
"""
class ControlRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 037Fh
self._IM=1 #invalid operation
self._DM=1 #Denormalized Operand
self._ZM=1 #Zero Divide
self._OM=1 #Overflow
self._UM=1 #Underflow
self._PM=1 #Precision
self._X=1 #Reserved
self._M=0 #Interrupt Mask
self._PC = [1, 1] #Precition Control
self._PC0= self._PC[0] #
self._PC1= self._PC[0] #
self._RC=[0, 0] #Rounding Control
self._RC0=self._RC[0] #
self._RC1=self._RC[1] #
self._IC =[0, 0] #Infinity Control (0=projective, 1= affine)
self._IC0 =self._IC[0]
self._IC1 =self._IC[1]
self._XX=[0,0] #últimos 3 bits reservados
def setPC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._PC[0] =args[0]
self._PC[1] =args[1]
elif len(args) == 1:
self._PC = args[0]
else:
print "Error de argumentos", args
def getPC(self):
return _PC
def setRC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._RC[0] =args[0]
self._RC[1] =args[1]
elif len(args) == 1:
self._RC = args[0]
else:
print "Error de argumentos", args
def getRC(self):
return _RC
def setIC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._IC[0] =args[0]
self._IC[1] =args[1]
elif len(args) == 1:
self._IC = args[0]
else:
print "Error de argumentos", args
def getIC(self):
return _IC
def getRegs(self):
return [self._IM, self._DM, self._ZM,self._OM,self._UM, self._PM,self._X,self._M,self._PC[0], self._PC[1],self._RC[0],self._RC[1],self._IC[0],self._IC[1],self._XX[0],self._XX[1]]
def setRegs(IM,DM,ZM,OM,UM,PM,X0,M,PC0,PC1,RC0,RC1,IC0,IC1,X1,X2):
self._IM=IM
self._DM=DM
self._ZM=ZM
self._OM=OM
self._UM=UM
self._PM=PM
self._X=X0
self._M=M
self._PC[0]=PC0
self._PC[1]=PC1
self._RC[0]=RC0
self._RC[1]=RC1
self._IC[0]=IC0
self._IC[1]=IC1
self._XX[0]=X1
self._XX[1]=X2
def getRegNames(self):
return ['IM','DM','ZM','OM','UM','PM','X0','M','PC0','PC1','RC0','RC1','IC0','IC1','X1','X2']
"""
Status Register (16 bits)
"""
class StatusRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._IE=0 #invalid operation
self._DE=0 #Denormalized Operand
self._ZE=0 #Zero Divide
self._OE=0 #Overflow
self._UE=0 #Underflow
self._PE=0 #Precision
self._X=0 #Reserved
self._IR=0 #Interrupt Request
self._C=[0, 0, 0, 0 ] #Condition Code
self._C0=0 #Condition Code
self._C1=0 #
self._C2=0 #
self._TOP=[0, 0, 0]#Top Of Stack Pointer
self._C3=0 #
self._B=0 #NEU busy
def setTOP(self,*args):
assert 1 <= len(args) <= 3
if len(args) == 3:
self._TOP[0] = args[0]
self._TOP[1] = args[1]
self._TOP[2] = args[2]
elif len(args) == 1:
self._TOP = args[0]
else:
print "Error de argumentos", args
def getTOP(self):
return self._TOP
def setC(self,*args):
assert 1 <= len(args) <= 4
if len(args) == 4:
self._C[0] = args[0]
self._C[1] = args[1]
self._C[2] = args[2]
self._C[3] = args[3]
elif len(args) == 1:
self._C = args[0]
else:
print "Error de argumentos", args
def getC(self):
return self._C
def decTOP(self):
aux=bin2dec(self._TOP)
if aux== 0:
aux=7
else:
aux-=1
self._TOP=dec2bin(aux)
def incTOP(self):
aux=bin2dec(self._TOP)
if aux== 7:
aux=0
else:
aux+=1
self._TOP=dec2bin(aux)
def getRegs(self):
return [self._IE, self._DE, self._ZE, self._OE, self._UE, self._PE, self._X, self._IR, self._C[0], self._C[1],self._C[2], self._TOP[0], self._TOP[1], self._TOP[2], self._C[3], self._B]
def setRegs(IE,DE,ZE,OE,UE,PE,X,IR,C0,C1,C2,TOP0,TOP1,TOP2,C3,B):
self._IE=IE
self._DE=DE
self._ZE=ZE
self._OE=OE
self._UE=UE
self._PE=PE
self._X = X
self._IR=IR
self._C[0]=C0
self._C[1]=C1
self._C[2]=C2
self._TOP[0]=TOP0
self._TOP[1]=TOP1
self._TOP[2]=TOP2
self._C[3]=C3
self._B=B
def getRegNames(self):
return ['IE','DE','ZE','OE','UE','PE','X','IR','C0','C1','C2','TOP0','TOP1','TOP2','C3','B']
"""
Tag Word (16 bits) #listo
"""
"""
Instruction Pointer (32 bits)
"""
"""
Data Pointer (32 bits)
"""
"""
Registros necesarios del procesador 8086
"""
class StatusX86:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._CF=0
self._PF=0
self._AF=0
self._ZF=0
self._SF=0
self._TF=0
self._IF=0
self._DF=0
self._OF=0
def getRegs(self):
return [self._CF,self._PF,self._AF,self._ZF,self._SF,self._TF,self._IF,self._DF,self._OF ]
def setRegs(CF,PF,AF,ZF,SF,TF,IF,DF,OF):
self._CF= CF
self._PF= PF
self._AF= AF
self._ZF= ZF
self._SF= SF
self._TF= TF
self._IF= IF
self._DF= DF
self._OF= OF
def getRegNames(self):
return ['CF','PF','DF','AF','ZF','SF','TF','IF','DF','OF']
#Si es llamado como ejecutable, entonces decir que esto es una librería que contiene las estructuras básicas de una fpu 8087 (pilas y registros), mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
pila.push(b)
FADD(0,1)
self.assertEqual(pila._pst[pila.head()],c)
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
FADD(b)
self.assertEqual(pila._pst[pila.head()],c)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
from fpu_structure import * #mas comentarios
#from instruction_set import * #Por el momento queda así
import reduced_instruction_set as iset
from datahandling import *
import main #debe estar después de la instanciación de iset
class FPU_GUI:
def __init__(self):
main.saveState()
self._last_instruction = ""
#Set the Glade file
self.gladefile = "gui/fpugui.glade/fpuguiglade"
self.wTree = gtk.glade.XML(self.gladefile)
#Get the Main Window, and connect the "destroy" event
self.windowST = self.wTree.get_widget("ST_Stack")
if (self.windowST):
self.windowST.connect("destroy", gtk.main_quit)
#self.windowReg = self.wTree.get_widget("Registers")
#if (self.windowReg):
# self.windowReg.connect("destroy", gtk.main_quit)
self.windowConsole = self.wTree.get_widget("Consola")
if (self.windowConsole):
self.windowConsole.connect("destroy", gtk.main_quit)
#Create our dictionay and connect it
dic = {
"on_Salir_destroy" : gtk.main_quit,
"on_Ejecutar_clicked" : self.ejecutar,
"on_Deshacer_clicked" : self.deshacer,
"on_Reiniciar_clicked" : self.reiniciar,
"on_Salir_clicked" : gtk.main_quit,
}
self.wTree.signal_autoconnect(dic)
def ejecutar(self, widget):
lines = [] #cargar las líneas de manera auxiliar acá,
#para sacarlas en orden hay que usar pop(0) (ojo con el 0 que debe estar)
#obtener la consola de entrada
consola=self.wTree.get_widget("entrada_consola")
buffConsola = consola.get_buffer()
numlines=buffConsola.get_line_count()
beginIter = buffConsola.get_start_iter() #buffConsola.get_iter_at_line(0)
endIter = buffConsola.get_end_iter()
text= buffConsola.get_text(beginIter,endIter)
#parsear los datos de entrada
#verificar que sean datos válidos
#enviarselos a main para su ejecución
commands = main.parse(text)
for comm in commands:
main.execute_command(comm)
self._last_instruction = comm
#actualizar registros
self.actualizarRegs()
self.actualizarPila()
self.actualizarResultados()
def deshacer(self, widget):
main.undo()
self.actualizarRegs()
self.actualizarPila()
def reiniciar(self, widget):
main.rebootFPU()
self.actualizarRegs()
self.actualizarPila()
#actualiza los valores de la salida de los registros
def actualizarRegs(self):
try:
#actualizar registros de status
#print "actualizando registros de status"
regs_vals = iset.status.getRegs()
regs_noms = iset.status.getRegNames()
#print regs_vals
#print regs_noms
for i in range (16):
self.wTree.get_widget(regs_noms[i]).set_text(str(regs_vals[i]))
except:
pass
try:
#actualizar registros de control
#print "actualizando registros de control"
regc_vals = iset.control.getRegs()
regc_noms = iset.control.getRegNames()
#print regc_vals
#print regc_noms
for i in range (16):
self.wTree.get_widget(regc_noms[i]).set_text(str(regc_vals[i]))
#actualizar registros de statusX86
except:
pass
def actualizarResultados(self):
nom_res = "resultados"
self.wTree.get_widget(nom_res).set_text(str(iset.pila.getI(iset.pila.head())[0]))#(str(iset.res))
nom_text = "lastInstruction"
lastI = ""
for el in self._last_instruction:
lastI+=" "
lastI+=str(el)
self.wTree.get_widget(nom_text).set_text(lastI)
#actualiza los valores de la salida de la Pila
def actualizarPila(self):
for i in range(8):
reg=[None,None]
nom_bin = "ST"+str(i)+"_bin"
nom_rep = "ST"+str(i)+"_rep"
nom_tag = "tag"+str(i)
#print nom_bin
#print nom_rep
#print nom_tag
head = iset.pila.head()-i
#print head
try:
#print "pila.head()= ", pila.head()
reg=iset.pila.getI(head)
except:
reg[0] = 00000000000000000000
reg[1] = [1,1]
#print reg
#print i
self.wTree.get_widget(nom_bin).set_text(str(f2bin(reg[0])))
self.wTree.get_widget(nom_rep).set_text(str(reg[0]))
self.wTree.get_widget(nom_tag).set_text(str(reg[1]))
if __name__ == "__main__":
fpugui = FPU_GUI()
gtk.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
res = None #resultado de la última operación
#hay que poner a res y uesp como global en cada una de las funciones, debo escribir un script que lo haga :P: global uesp,res
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
res = pila.getI(pila.head())[0]
return res
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
res = pila.getI(pila.head())[0]
if res == 0 :
statusX86._ZF=1
return res
# Operaciones de Adición
"""
Operaciones de adición
Opcode Instruction Description
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDP Add ST(0) to ST(1), store result in ST(1), and pop the
"""
#FADD
def FADD(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
#print st0,";", sti
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return res
#FADDP
def FADDP(sti=1,st0=0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
pila.setI(pila.head()-1,res) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
if res == 0 :
statusX86._ZF=1
return uesp
"""
Opcode Instruction Description
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
"""
def FSUB(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FSUBP(st0=0,sti=1):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
res = pila.getI(pila.head())[0]
return res
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOMP(sti):
FCOM(sti)
uesp = pila.pop()[0]
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM(1)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs(pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.cos(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
"""
def FDIV (st0,sti):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
if a == 0:
status._ZE = 1
res = b / a
pila.setI(pila.head(),res)
if b == 0:
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
res =pila.getI(pila.head())[0]
def FLD(num):
pila.push(num)
status.decTOP()
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
#status.decTOP()
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
#status.decTOP()
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
#status.decTOP()
def FLDPI():
FLD(math.pi)
#status.decTOP()
def FLDLG2():
FLD(math.log10(2))
#status.decTOP()
def FLDLN2():
FLD(math.log(2,math.e))
#status.decTOP()
def FLDZ():
FLD(0.0)
#status.decTOP()
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
res =uesp
return uesp
def FSTP(mreal):
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
res = uesp
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL (st0=0,sti=1):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
res = a * b
pila.setI(pila.head(),res)
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FMULP (st0,sti):
FMUL(st0,sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#No Operation
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
if uesp == 0 :
statusX86._ZF=1
res = uesp
return uesp
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
FLD1()
status.decTOP()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
res =pila.getI(pila.head())[0]
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
res =pila.getI(pila.head())[0]
#TODO, set flags
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
# Operaciones de Adición
"""
Operaciones de adición
OpcodeInstructionDescription
D8 /0 FADD m32 realAdd m32real to ST(0) and store result in ST(0)
DC /0 FADD m64real Add m64real to ST(0) and store result in ST(0)
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDPAdd ST(0) to ST(1), store result in ST(1), and pop the
register stack
DA /0 FIADD m32int Add m32int to ST(0) and store result in ST(0)
DE /0 FIADD m16int Add m16int to ST(0) and store result in ST(0)
"""
#FADD
def FADD(self, *args):
assert 1 <= len(args) <= 2
st0 = args[0]
sti = args[1]
if len(args) == 2:
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
#print st0,";", sti
pila.setI(pila.head(), pila.getI(pila.head())[0]+pila.getI(1)[0])#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
elif len(args) == 1:
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
#aux = pila.pop()[0]
#print "num=", num
pila.push(pila.pop()[0]+args[0])
else:
print "Error de argumentos", args
#FADDP
def FADDP():
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
return uesp
def FADDP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]+ pila.getI(pila.head())[0]) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIADD(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]+num)
"""
Opcode Instruction Description
D8 /4 FSUB m32real Subtract m32real from ST(0) and store result in ST(0)
DC /4 FSUB m64real Subtract m64real from ST(0) and store result in ST(0)
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
DA /4 FISUB m32int Subtract m32int from ST(0) and store result in ST(0)
DE /4 FISUB m16int Subtract m16int from ST(0) and store result in ST(0)
"""
#FSUB
def FSUB(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
'''
def FSUB(m64real)
pass
'''
def FSUB(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
pila.setI(pila.head(), pila.getI(pila.head())[0]-pila.getI(1)[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBP
def FSUBP():
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
pila.setI(1,pila.getI(1)[0]- pila.getI(pila.head())[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUB(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(pila.pop()[0]-num)
"""
Opcode Instruction Description
D8 /5 FSUBR m32real Subtract ST(0) from m32real and store result in ST(0)
DC /5 FSUBR m64real Subtract ST(0) from m64real and store result in ST(0)
D8 E8+i FSUBR ST(0), ST(i) Subtract ST(0) from ST(i) and store result in ST(0)
DC E0+i FSUBR ST(i), ST(0) Subtract ST(i) from ST(0) and store result in ST(i)
DE E0+i FSUBRP ST(i), ST(0) Subtract ST(i) from ST(0), store result in ST(i), and pop
register stack
DE E1 FSUBRP Subtract ST(1) from ST(0), store result in ST(1), and pop
register stack
DA /5 FISUBR m32int Subtract ST(0) from m32int and store result in ST(0)
DE /5 FISUBR m16int Subtract ST(0) from m16int and store result in ST(0)
"""
#FSUBR
def FSUBR(num):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num - pila.pop()[0])
'''
def FSUBR(m64real)
pass
'''
def FSUBR(st0=0,sti=0):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUBR, st0"
#raise()
else:
pila.setI(pila.head(),pila.getI(1)[0]- pila.getI(pila.head())[0])#pila[0] = pila[st0] - pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
#FSUBRP
def FSUBRR():
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSUBRP(sti,st0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBRP, st0"
#raise()
else:
pila.setI(1, pila.getI(pila.head())[0]-pila.getI(1)[0]) #pila[1]=pila[1]-pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FISUBR(num): #operación entera
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
pila.push(num-pila.pop()[0])
#Operaciones de BCD
def FBLD(bcd): #convertir bcd a real y hacerle push
#numreal = bcd
#acá hay que convertirlo
#acá se lo empuja
pila.push(BCD2dec(bcd))
def FBSTP(bcd):
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
#Operaciones de Registros (no de pila)
def FCLEX():
#TODO check first for and handles any pending unmasked floating-point exceptions before cleaning
#clean flags
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Movimientos condicionales (pag 137)
def FCMOVB(sti):
if statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVE():
if statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVBE():
if statusX86._CF or status._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVU():
if statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNB():
if not statusX86._CF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNE():
if not statusX86._ZF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNBE():
if statusX86._CF == 0 and statusX86._ZF == 0:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
def FCMOVNU():
if not statusX86._PF:
pila.setI(pila.head(), pila.getI(pila.head()-sti)[0])
pila.delI(pila.head()-sti)
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM():
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
FCOMST(1)
def FCOMST(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOM(num):
#esto sobrepasa el encapsulamiento de la pila y está a propósito
#es para resolver un problema puntual, para que no salte excepción por el largo de pila
pila.__pst.append(num) #este es el valor necesario
pila.__ptag.append([0,0]) #este es solo por cuidado
FCOM()
#limpiando lo agregado extra
pila.__pst.pop()
pila.__ptag.pop()
def FCOMP():
FCOM()
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPST(sti):
FCOMST(sti)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMP(num):
FCOM(num)
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM()
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de Comparación de enteros
"""
Opcode Instruction Description
DB F0+i FCOMI ST, ST(i) Compare ST(0) with ST(i) and set status flags accordingly
DF F0+i FCOMIP ST, ST(i) Compare ST(0) with ST(i), set status flags accordingly, and
pop register stack
DB E8+i FUCOMI ST, ST(i) Compare ST(0) with ST(i), check for ordered values, and
set status flags accordingly
DF E8+i FUCOMIP ST, ST(i) Compare ST(0) with ST(i), check for ordered values, set
status flags accordingly, and pop register stack
"""
def FCOMI(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
statusX86._CF= 1
statusX86._PF= 0
statusX86._ZF= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
statusX86._CF= 0
statusX86._PF= 0
statusX86._ZF= 1
else:
statusX86._CF= 1
statusX86._PF= 1
statusX86._ZF= 1
def FCOMIP(sti):
FCOMI(sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
def FUCOMI(sti):
#TODO, check for ordered values
FCOMI(sti)
def FUCOMIP(sti):
#TODO, check for ordered values
FCOMIP(sti)
"""
Opcode Instruction Description
D9 E4 FTST Compare ST(0) with 0.0.
"""
def FTST():
FCOM(0.0)
"""
Opcode Instruction Description
DD E0+i FUCOM ST(i) Compare ST(0) with ST(i)
DD E1 FUCOM Compare ST(0) with ST(1)
DD E8+i FUCOMP ST(i) Compare ST(0) with ST(i) and pop register stack
DD E9 FUCOMP Compare ST(0) with ST(1) and pop register stack
DA E9 FUCOMPP Compare ST(0) with ST(1) and pop register stack twice
"""
def FUCOM():
FUCOM(1)
def FUCOM(sti):
FCOM(sti)
def FUCOMP():
return FUCOMP(1)
def FUCOMP(sti):
FCOMP(sti)
def FUCOMPP():
FUCOM()
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si esto está bien
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.cos(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
status.setC(caux)
else:
caux[2]=0
status.setC(caux)
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
def FDECSTP():
pila.decTOP()
#TODO, faltan realizar las operaciones sonbre C1, el manual está incorrecto :S
#operaciones de división
"""
Opcode Instruction Description
D8 /6 FDIV m32real Divide ST(0) by m32real and store result in ST(0)
DC /6 FDIV m64real Divide ST(0) by m64real and store result in ST(0)
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE F9 FDIVP Divide ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /6 FIDIV m32int Divide ST(0) by m32int and store result in ST(0)
DE /6 FIDIV m16int Divide ST(0) by m64int and store result in ST(0)
"""
def FDIV(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]/num)
def FDIV (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIV (sti,st0):
pila.setI(i, pila.getI(pila.head()-sti)[0]/ pila.getI(pila.head())[0])
def FDIVP():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIV(num):
FDIV(num)
#Operaciones de división inversas
"""
Opcode Instruction Description
D8 /7 FDIVR m32real Divide m32real by ST(0) and store result in ST(0)
DC /7 FDIVR m64real Divide m64real by ST(0) and store result in ST(0)
D8 F8+i FDIVR ST(0), ST(i) Divide ST(i) by ST(0) and store result in ST(0)
DC F0+i FDIVR ST(i), ST(0) Divide ST(0) by ST(i) and store result in ST(i)
DE F0+i FDIVRP ST(i), ST(0) Divide ST(0) by ST(i), store result in ST(i), and pop the
register stack
DE F1 FDIVRP Divide ST(0) by ST(1), store result in ST(1), and pop the
register stack
DA /7 FIDIVR m32int Divide m32int by ST(0) and store result in ST(0)
DE /7 FIDIVR m16int Divide m64int by ST(0) and store result in ST(0)
"""
def FDIVR(num):
pila.setI(pila.head(),num/ pila.getI(pila.head())[0])
def FDIVR (sti):
pila.setI(pila.head(),pila.getI(i)[0]/ pila.getI(pila.head())[0])
def FDIVR (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head())[0]/ pila.getI(pila.head()-sti)[0])
def FDIVPR():
FDIV(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FDIVPR (sti,st0):
FDIVR(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIDIVR(num):
FDIVR(num)
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
#Operaciones de comparación de enteros
"""
Opcode Instruction Description
DE /2 FICOM m16int Compare ST(0) with m16int
DA /2 FICOM m32int Compare ST(0) with m32int
DE /3 FICOMP m16int Compare ST(0) with m16int and pop stack register
DA /3 FICOMP m32int Compare ST(0) with m32int and pop stack register
"""
def FICOM(num):
FCOM(num)
def FICOMP(num):
FICOM(num)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones de carga de pila
"""
Opcode Instruction Description
DF /0 FILD m16int Push m16int onto the FPU register stack.
DB /0 FILD m32int Push m32int onto the FPU register stack.
DF /5 FILD m64int Push m64int onto the FPU register stack.
"""
def FILD(num):
status.decTOP()
pila.push(num)
"""
Opcode Instruction Description
D9 /0 FLD m32real Push m32real onto the FPU register stack.
DD /0 FLD m64real Push m64real onto the FPU register stack.
DB /5 FLD m80real Push m80real onto the FPU register stack.
D9 C0+i FLD ST(i) Push ST(i) onto the FPU register stack.
"""
def FLD(num):
pila.push(num)
status.decTOP()
def FLDST(sti): #¿esto es así? no es muy claro en el manual, pag 167
pila.push( pila.getI(pila.head()-sti))
status.decTOP()
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
def FLDPI():
FLD(math.pi)
def FLDLG2():
FLD(math.log10(2))
def FLDLN2():
FLD(math.log(2,math.e))
def FLDZ():
FLD(0.0)
"""
Opcode Instruction Description
D9 /5 FLDCW m2byte Load FPU control word from m2byte.
"""
def FLDCW(m2byte):
FLD(m2byte) #TODO, modelo de memoria, para poder cargar solo lo que hace falta
"""
Opcode Instruction Description
D9 /4 FLDENV m14/28byte Load FPU environment from m14byte or m28byte.
"""
def FLDENV(mbyte):
pass #TODO
#operaciones de extracción de stack
"""
Opcode Instruction Description
DF /2 FIST m16int Store ST(0) in m16int
DB /2 FIST m32int Store ST(0) in m32int
DF /3 FISTP m16int Store ST(0) in m16int and pop register stack
DB /3 FISTP m32int Store ST(0) in m32int and pop register stack
DF /7 FISTP m64int Store ST(0) in m64int and pop register stack
"""
def FIST(dirmem):
uesp = pila.getI(pila.head())[0]
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
def FISTP(dirmem):
uesp = pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
#acá falta agregar un modelo de memoria RAM para poder cargar el valor donde corresponde
return uesp
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
return uesp
def FST_ST(i):
pila.setI(1, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
def FSTP(mreal):
uesp= pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FSTP_ST(i):
FST_ST(i)
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Inicialización de la FPU
def FINIT():
#TODO, check for and handles any pending unmasked floating-point exceptions
FNINIT()
pass
def FNINIT():
#TODO, poner
# control en 1101111111 #037Fh
# TAG word en FFFFh
# los demás: status, data pointer instruction pointer, last instruction opcode, en 0 (cero)
pass
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL(num):
pila.setI(pila.head(), pila.getI(pila.head())[0]*num)
def FMUL_ST (sti):
pila.setI(pila.head(), pila.getI(pila.head())[0]* pila.getI(pila.head()-sti)[0])
def FMUL (sti,st0):
pila.setI(pila.head()-sti, pila.getI(pila.head()-sti)[0]* pila.getI(pila.head())[0])
def FMULP():
FMUL(1,0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FMULP (sti,st0):
FMUL(sti,st0)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
def FIMUL(num):
FMUL(num)
#No Oeration
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
#status.incTOP() #TODO, revisar si no hay fallo acá
"""
Opcode Instruction Description
D9 F8 FPREM Replace ST(0) with the remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM():
pila.setI(pila.head(), pila.getI(pila.head())[0]%pila.getI(1)[0])
#TODO, setear las variables status._C # pag 182
"""
Opcode Instruction Description
D9 F5 FPREM1 Replace ST(0) with the IEEE remainder obtained from
dividing ST(0) by ST(1)
"""
def FPREM1():
FPREM() #TODO, cuando se cambien los modelos, esto hay que cambiarlo para que cumpla con la IEEE que ahora no cumple
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
status.decTOP()
FLD1()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
"""
Opcode Instruction Description
DD /4 FRSTOR m94/108byte Load FPU state from m94byte or m108byte.
Restaura el estado de la FPU desde memoria
"""
def FRSTOR():
pass #TODO, pag190
"""
Opcode Instruction Description
9B DD /6 FSAVE m94/108byte Store FPU state to m94byte or m108byte after checking for
pending unmasked floating-point exceptions. Then re-
initialize the FPU.
DD /6 FNSAVE* m94/108byte Store FPU environment to m94byte or m108byte without
checking for pending unmasked floating-point exceptions.
Then re-initialize the FPU.
Guarda el estado de la FPU en la dirección memoria dada
"""
def FSAVE(m94_108byte):
pass #TODO
def FSAVE(m94_108byte):
pass #TODO
"""
Opcode Instruction Description
9B D9 /7 FSTCW m2byte Store FPU control word to m2byte after checking for
pending unmasked floating-point exceptions.
D9 /7 FNSTCW* m2byte Store FPU control word to m2byte without checking for
pending unmasked floating-point exceptions.
"""
def FSTCW(m2byte):
pass
def FNSTCW(m2byte):
pass
"""
Opcode Instruction Description
9B D9 /6 FSTENV m14/28byte Store FPU environment to m14byte or m28byte after
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
D9 /6 FNSTENV* m14/28byte Store FPU environment to m14byte or m28byte without
checking for pending unmasked floating-point exceptions.
Then mask all floating-point exceptions.
"""
def FSTENV(m14_28byte):
pass
def FNSTENV(m14_28byte):
pass
"""
Opcode Instruction Description
9B DD /7 FSTSW m2byte Store FPU status word at m2byte after checking for
pending unmasked floating-point exceptions.
9B DF E0 FSTSW AX Store FPU status word in AX register after checking for
pending unmasked floating-point exceptions.
DD /7 FNSTSW* m2byte Store FPU status word at m2byte without checking for
pending unmasked floating-point exceptions.
DF E0 FNSTSW* AX Store FPU status word in AX register without checking for
pending unmasked floating-point exceptions.
"""
def FSTSW(m2byte):
pass
def FSTSW(): #guarda en AX
pass
def FNSTSW(m2byte):
pass
def FNSTSW(): #guarda en AX
pass
"""
Opcode Instruction Description
D9 FD FSCALE Scale ST(0) by ST(1).
"""
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
#TODO, set flags
def FWAIT():
pass
"""
Opcode Instruction Description
D9 E5 FXAM Classify value or number in ST(0)
"""
#TODO
def FXAM():
"""
C1 ← sign bit of ST; (* 0 for positive, 1 for negative *)
CASE (class of value or number in ST(0)) OF
Unsupported:C3, C2, C0 ← 000;
NaN: C3, C2, C0 ← 001;
Normal: C3, C2, C0 ← 010;
Infinity: C3, C2, C0 ← 011;
Zero: C3, C2, C0 ← 100;
Empty: C3, C2, C0 ← 101;
Denormal: C3, C2, C0 ← 110;
ESAC;
"""
pass
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH():
FXCH(1)
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
"""
Opcode Instruction Description
D9 F4 FXTRACT Separate value in ST(0) into exponent and significand,
store exponent in ST(0), and push the significand onto the
register stack.
"""
def FXTRACT():
pass #TODO
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
#status.incTOP() #TODO, ver si está bien esto
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
pila.push(b)
FADD(0,1)
self.assertEqual(pila._pst[pila.head()],c)
def testFADD_1(self):
a = random.random()
b = random.random()
c = a + b
pila.push(a)
FADD(b)
self.assertEqual(pila._pst[pila.head()],c)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import re
import copy
#import instruction_set as iset #modificado por el momento
import reduced_instruction_set as iset
from fpu_structure import *
uesp = None #ultimo_elemento_sacado_de_pila
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
pinout_temp = None
statusX86_temp = None
def parse(text):
lines = text.splitlines()
i = 0
for l in lines:
lines[i]=l.split()
j=0
for param in lines[i]: #aca convierte a entero el elemento de entrada
try:
lines[i][j]=int(param)#por el momento solo soporta enteros, de todas formas, la entrada debe ser un número en decimal, binario, octal u hexadecimal, puesto que el fpu no sabe que es
except:
pass
j+=1
i+=1
return lines
def execute_command(commlista):
saveState()
comm = commlista[0]
params = commlista[1:]
#probando una nueva manera de hacer las cosas, con una cadena de texto
paramline = "("
i=0
for p in params:
if i>0:
paramline+=", "
paramline+=str(p)
i+=1
paramline += ")"
commline = "iset."+comm + paramline
try:
#iset.__getattribute__(comm)(params)
#eval(comm)(p1,p2,p3...)
exec commline
#print "uesp", iset.uesp
#print "res", iset.res
except:
#print "No existe la función", comm
#print "o los parámetros",params," son incorrectos"
print "línea incorrecta:",commline
def undo():
global uesp_temp, pila_temp, control_temp, status_temp
uesp = uesp_temp #ultimo_elemento_sacado_de_pila
iset.pila = pila_temp#copy.copy(pila_temp) #copy.deepcopy(pila_temp)
iset.control = control_temp#copy.copy(control_temp) # copy.deepcopy(control_temp)
iset.status = status_temp#copy.copy(status_temp) #copy.deepcopy(status_temp)
#iset.pinout = #copy.copy(pinout_temp) #copy.deepcopy(pinout_temp)
#iset.statusX86 = #copy.copy(statusX86_temp) #copy.deepcopy(statusX86_temp)
def rebootFPU():
iset.pila = None
iset.pila = Pila()
iset.control.iniciar()
iset.status.iniciar()
iset.pinout.iniciar()
iset.statusX86.iniciar()
def saveState():
global uesp_temp, pila_temp, control_temp, status_temp
#print "Guarda el estado"
uesp_temp = uesp #ultimo_elemento_sacado_de_pila
pila_temp = copy.deepcopy(iset.pila) #copy.copy(iset.pila) #
control_temp = copy.deepcopy(iset.control) #copy.copy(iset.control) #
status_temp = copy.deepcopy(iset.status) #copy.copy(iset.status) #
pinout_temp = copy.deepcopy(iset.pinout)# copy.copy(iset.pinout) #
statusX86_temp = copy.deepcopy(iset.statusX86) #copy.copy(iset.statusX86) #
def cleanState():
global uesp_temp, pila_temp, control_temp, status_temp
uesp_temp = None #ultimo_elemento_sacado_de_pila
pila_temp = None
control_temp = None
status_temp = None
#pinout_temp = None
#statusX86_temp = None
#si es llamado como ejecutable
#Realizar la instanciación de los módulos necesarios
if __name__ == "__main__":
pass
| Python |
# -*- coding: utf-8 -*-
"""
Tipos de Datos
"""
"""
BCD, será considerado como una lista ordenada de números enteros entre 0 y 9
Para convertirlo se pasará multiplicando
sea a = [1,2,3,4,5,6,7...] donde el último dígito es el más significativo y el primero el menos
sea b el número decimal, entonces
b=0
j=0
for i in a:
b+=i*(10**j)
j+=1
b ahora es el número en decimal
conversión de decimal a bcd
a = lista
b= número decimal
while c >0:
a.append(c%10)
c/=10
"""
def BCD2dec(bcd):
dec=0
j=0
for i in bcd:
dec+=i*(10**j)
j+=1
return dec
def dec2BCD(dec):
bcd=[]
while dec >0:
bcd.append(dec%10)
dec/=10
return bcd
"""
representación binaria
arreglo de unos y ceros
[b0,b1,b2,b3 .... ]
"""
def dec2bin(dec):
bin=[]
while dec >0:
bin.append(dec%2)
dec/=2
return bin
def bin2dec(bin):
dec=0
j=0
for i in bin:
dec+=i*(2**j)
j+=1
return dec
def f2bin(num):
return dec2bin(num*(10000000))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
from fpu_structure import * #mas comentarios
#from instruction_set import * #Por el momento queda así
import reduced_instruction_set as iset
from datahandling import *
import main #debe estar después de la instanciación de iset
class FPU_GUI:
def __init__(self):
main.saveState()
self._last_instruction = ""
#Set the Glade file
self.gladefile = "gui/fpugui.glade/fpuguiglade"
self.wTree = gtk.glade.XML(self.gladefile)
#Get the Main Window, and connect the "destroy" event
self.windowST = self.wTree.get_widget("ST_Stack")
if (self.windowST):
self.windowST.connect("destroy", gtk.main_quit)
#self.windowReg = self.wTree.get_widget("Registers")
#if (self.windowReg):
# self.windowReg.connect("destroy", gtk.main_quit)
self.windowConsole = self.wTree.get_widget("Consola")
if (self.windowConsole):
self.windowConsole.connect("destroy", gtk.main_quit)
#Create our dictionay and connect it
dic = {
"on_Salir_destroy" : gtk.main_quit,
"on_Ejecutar_clicked" : self.ejecutar,
"on_Deshacer_clicked" : self.deshacer,
"on_Reiniciar_clicked" : self.reiniciar,
"on_Salir_clicked" : gtk.main_quit,
}
self.wTree.signal_autoconnect(dic)
def ejecutar(self, widget):
lines = [] #cargar las líneas de manera auxiliar acá,
#para sacarlas en orden hay que usar pop(0) (ojo con el 0 que debe estar)
#obtener la consola de entrada
consola=self.wTree.get_widget("entrada_consola")
buffConsola = consola.get_buffer()
numlines=buffConsola.get_line_count()
beginIter = buffConsola.get_start_iter() #buffConsola.get_iter_at_line(0)
endIter = buffConsola.get_end_iter()
text= buffConsola.get_text(beginIter,endIter)
#parsear los datos de entrada
#verificar que sean datos válidos
#enviarselos a main para su ejecución
commands = main.parse(text)
for comm in commands:
main.execute_command(comm)
self._last_instruction = comm
#actualizar registros
self.actualizarRegs()
self.actualizarPila()
self.actualizarResultados()
def deshacer(self, widget):
main.undo()
self.actualizarRegs()
self.actualizarPila()
def reiniciar(self, widget):
main.rebootFPU()
self.actualizarRegs()
self.actualizarPila()
#actualiza los valores de la salida de los registros
def actualizarRegs(self):
try:
#actualizar registros de status
#print "actualizando registros de status"
regs_vals = iset.status.getRegs()
regs_noms = iset.status.getRegNames()
#print regs_vals
#print regs_noms
for i in range (16):
self.wTree.get_widget(regs_noms[i]).set_text(str(regs_vals[i]))
except:
pass
try:
#actualizar registros de control
#print "actualizando registros de control"
regc_vals = iset.control.getRegs()
regc_noms = iset.control.getRegNames()
#print regc_vals
#print regc_noms
for i in range (16):
self.wTree.get_widget(regc_noms[i]).set_text(str(regc_vals[i]))
#actualizar registros de statusX86
except:
pass
def actualizarResultados(self):
nom_res = "resultados"
self.wTree.get_widget(nom_res).set_text(str(iset.pila.getI(iset.pila.head())[0]))#(str(iset.res))
nom_text = "lastInstruction"
lastI = ""
for el in self._last_instruction:
lastI+=" "
lastI+=str(el)
self.wTree.get_widget(nom_text).set_text(lastI)
#actualiza los valores de la salida de la Pila
def actualizarPila(self):
for i in range(8):
reg=[None,None]
nom_bin = "ST"+str(i)+"_bin"
nom_rep = "ST"+str(i)+"_rep"
nom_tag = "tag"+str(i)
#print nom_bin
#print nom_rep
#print nom_tag
head = iset.pila.head()-i
#print head
try:
#print "pila.head()= ", pila.head()
reg=iset.pila.getI(head)
except:
reg[0] = 00000000000000000000
reg[1] = [1,1]
#print reg
#print i
self.wTree.get_widget(nom_bin).set_text(str(f2bin(reg[0])))
self.wTree.get_widget(nom_rep).set_text(str(reg[0]))
self.wTree.get_widget(nom_tag).set_text(str(reg[1]))
if __name__ == "__main__":
fpugui = FPU_GUI()
gtk.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datahandling import *
"""
Pin Configuration
"""
"""
pin[ 1 ]='GND'
pin[ 2:16 ]= AD[14:0]
pin[ 17 ]= 'NC'
pin[ 18 ]= 'NC'
pin[ 19 ]= 'CLK'
pin[ 20 ]= 'GND'
pin[ 21 ]= 'RESET'
pin[ 22 ]= 'READY'
pin[ 23 ]= 'BUSY'
pin[ 24 ]= QS1
pin[ 25 ]= QS0
pin[ 26 ]= S0 #neg
pin[ 27 ]= S1 #neg
pin[ 28 ]= S2 #neg
pin[ 29 ]= 'NC'
pin[ 30 ]= 'NC'
pin[ 31 ]= RQ/GT0 #neg
pin[ 32 ]= INT
pin[ 33 ]= RQ/GT1 #neg
pin[ 34 ]= BHE #neg
pin[ 35 : 38 ]= S[6:3]
pin[ 39 ]= AD[15]
pin[ 40 ]= 'VCC'
"""
class Pinout:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en ...
self._AD =[0 for i in range(16)] #Líneas de dirección
self._pin=[None for i in range(40)]
""" self.pin[ 1 ]='GND'
self.pin[ 2:16 ]= _AD[14:0]
self.pin[ 17 ]= 'NC'
self.pin[ 18 ]= 'NC'
self.pin[ 19 ]= 'CLK'
self.pin[ 20 ]= 'GND'
self.pin[ 21 ]= 'RESET'
self.pin[ 22 ]= 'READY'
self.pin[ 23 ]= 'BUSY'
self.pin[ 24 ]= 'QS1'
self.pin[ 25 ]= 'QS0'
self.pin[ 26 ]= 'S0' #neg
self.pin[ 27 ]= 'S1' #neg
self.pin[ 28 ]= 'S2' #neg
self.pin[ 29 ]= 'NC'
self.pin[ 30 ]= 'NC'
self.pin[ 31 ]= 'RQ/GT0' #neg
self.pin[ 32 ]= 'INT'
self.pin[ 33 ]= 'RQ/GT1' #neg
self.pin[ 34 ]= 'BHE' #neg
self.pin[ 35 : 38 ]= [0,0,0,0]#S[6:3]
self.pin[ 39 ]= self._AD[15]
self.pin[ 40 ]= 'VCC'
"""
"""
Control Unit (CU)
Recibe las instrucciones
Decodifica los operandos
Ejecuta rutinas de control
"""
"""
Numeric Execution Unit (NEU)
Ejecuta las instrucciones numéricas
"""
"""
Data Field:
Compuesto por la Pila
"""
"""
Pila
Esta está compuesta de 7 registros de 80 bits.
Cada registro consta de
64 bits mas bajos de significand
15 bits de exponente
1 bit de signo
"""
"""
Tag Field
Cada registro tiene correspondencia uno a uno con un registro del data field
"""
class Pila:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._pst=[] #pila st
self._ptag=[] #pila de tags
def push(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
st,tag = args
if len(args) == 2:
st,tag = args
elif len(args) == 1:
st=args[0]
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) < 8 :
self._pst.append(st)
self._ptag.append(tag)
else:
print "fallo al empujar valor a la pila, demasiados valores"
#raise #excepción
def pop(self):
try:
return(self._pst.pop(),self._ptag.pop())
except:
return(0,[1,1])
def getI(self,i):
if len(self._pst) > 8 or i<0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
try:
return(self._pst[i],self._ptag[i])
except:
return(0,[1,1])
def setI(self,*args):
assert 2 <= len(args) <= 3
if len(args) == 3:
i,st,tag = args
elif len(args) == 2:
i,st = args
tag = [0,0]
elif len(args) == 1:
i=args[0]
st=0
tag = [0,0]
else:
print "Error de argumentos", args
if len(self._pst) > 8 or i <0:
#print "Valor de índice fuera de la pila"
return(0,[1,1])
self._pst[i]=st
self._ptag[i]=tag
def delI(self,i):
try:
del(self._pst[i])
del(self._ptag[i])
return True
except:
return False
def length(self):
return len(self._pst)
def head(self):
return (len(self._pst)-1)
def getRegs(self):
return [ self.getI(i) for i in range (8)]
def setRegs(self,pilatemp):
print pilatemp
print "holaaa"
for st in pilatemp:
print st
self.setI(st[0],st[1])
"""
Control Register (16 bits)
"""
class ControlRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 037Fh
self._IM=1 #invalid operation
self._DM=1 #Denormalized Operand
self._ZM=1 #Zero Divide
self._OM=1 #Overflow
self._UM=1 #Underflow
self._PM=1 #Precision
self._X=1 #Reserved
self._M=0 #Interrupt Mask
self._PC = [1, 1] #Precition Control
self._PC0= self._PC[0] #
self._PC1= self._PC[0] #
self._RC=[0, 0] #Rounding Control
self._RC0=self._RC[0] #
self._RC1=self._RC[1] #
self._IC =[0, 0] #Infinity Control (0=projective, 1= affine)
self._IC0 =self._IC[0]
self._IC1 =self._IC[1]
self._XX=[0,0] #últimos 3 bits reservados
def setPC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._PC[0] =args[0]
self._PC[1] =args[1]
elif len(args) == 1:
self._PC = args[0]
else:
print "Error de argumentos", args
def getPC(self):
return _PC
def setRC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._RC[0] =args[0]
self._RC[1] =args[1]
elif len(args) == 1:
self._RC = args[0]
else:
print "Error de argumentos", args
def getRC(self):
return _RC
def setIC(self, *args):
assert 1 <= len(args) <= 2
if len(args) == 2:
self._IC[0] =args[0]
self._IC[1] =args[1]
elif len(args) == 1:
self._IC = args[0]
else:
print "Error de argumentos", args
def getIC(self):
return _IC
def getRegs(self):
return [self._IM, self._DM, self._ZM,self._OM,self._UM, self._PM,self._X,self._M,self._PC[0], self._PC[1],self._RC[0],self._RC[1],self._IC[0],self._IC[1],self._XX[0],self._XX[1]]
def setRegs(IM,DM,ZM,OM,UM,PM,X0,M,PC0,PC1,RC0,RC1,IC0,IC1,X1,X2):
self._IM=IM
self._DM=DM
self._ZM=ZM
self._OM=OM
self._UM=UM
self._PM=PM
self._X=X0
self._M=M
self._PC[0]=PC0
self._PC[1]=PC1
self._RC[0]=RC0
self._RC[1]=RC1
self._IC[0]=IC0
self._IC[1]=IC1
self._XX[0]=X1
self._XX[1]=X2
def getRegNames(self):
return ['IM','DM','ZM','OM','UM','PM','X0','M','PC0','PC1','RC0','RC1','IC0','IC1','X1','X2']
"""
Status Register (16 bits)
"""
class StatusRegister:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._IE=0 #invalid operation
self._DE=0 #Denormalized Operand
self._ZE=0 #Zero Divide
self._OE=0 #Overflow
self._UE=0 #Underflow
self._PE=0 #Precision
self._X=0 #Reserved
self._IR=0 #Interrupt Request
self._C=[0, 0, 0, 0 ] #Condition Code
self._C0=0 #Condition Code
self._C1=0 #
self._C2=0 #
self._TOP=[0, 0, 0]#Top Of Stack Pointer
self._C3=0 #
self._B=0 #NEU busy
def setTOP(self,*args):
assert 1 <= len(args) <= 3
if len(args) == 3:
self._TOP[0] = args[0]
self._TOP[1] = args[1]
self._TOP[2] = args[2]
elif len(args) == 1:
self._TOP = args[0]
else:
print "Error de argumentos", args
def getTOP(self):
return self._TOP
def setC(self,*args):
assert 1 <= len(args) <= 4
if len(args) == 4:
self._C[0] = args[0]
self._C[1] = args[1]
self._C[2] = args[2]
self._C[3] = args[3]
elif len(args) == 1:
self._C = args[0]
else:
print "Error de argumentos", args
def getC(self):
return self._C
def decTOP(self):
aux=bin2dec(self._TOP)
if aux== 0:
aux=7
else:
aux-=1
self._TOP=dec2bin(aux)
def incTOP(self):
aux=bin2dec(self._TOP)
if aux== 7:
aux=0
else:
aux+=1
self._TOP=dec2bin(aux)
def getRegs(self):
return [self._IE, self._DE, self._ZE, self._OE, self._UE, self._PE, self._X, self._IR, self._C[0], self._C[1],self._C[2], self._TOP[0], self._TOP[1], self._TOP[2], self._C[3], self._B]
def setRegs(IE,DE,ZE,OE,UE,PE,X,IR,C0,C1,C2,TOP0,TOP1,TOP2,C3,B):
self._IE=IE
self._DE=DE
self._ZE=ZE
self._OE=OE
self._UE=UE
self._PE=PE
self._X = X
self._IR=IR
self._C[0]=C0
self._C[1]=C1
self._C[2]=C2
self._TOP[0]=TOP0
self._TOP[1]=TOP1
self._TOP[2]=TOP2
self._C[3]=C3
self._B=B
def getRegNames(self):
return ['IE','DE','ZE','OE','UE','PE','X','IR','C0','C1','C2','TOP0','TOP1','TOP2','C3','B']
"""
Tag Word (16 bits) #listo
"""
"""
Instruction Pointer (32 bits)
"""
"""
Data Pointer (32 bits)
"""
"""
Registros necesarios del procesador 8086
"""
class StatusX86:
def __init__(self):
self.iniciar()
def iniciar(self): #iniciar en 0000h
self._CF=0
self._PF=0
self._AF=0
self._ZF=0
self._SF=0
self._TF=0
self._IF=0
self._DF=0
self._OF=0
def getRegs(self):
return [self._CF,self._PF,self._AF,self._ZF,self._SF,self._TF,self._IF,self._DF,self._OF ]
def setRegs(CF,PF,AF,ZF,SF,TF,IF,DF,OF):
self._CF= CF
self._PF= PF
self._AF= AF
self._ZF= ZF
self._SF= SF
self._TF= TF
self._IF= IF
self._DF= DF
self._OF= OF
def getRegNames(self):
return ['CF','PF','DF','AF','ZF','SF','TF','IF','DF','OF']
#Si es llamado como ejecutable, entonces decir que esto es una librería que contiene las estructuras básicas de una fpu 8087 (pilas y registros), mostrar la doc y salir.
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de isntruction_set.py
"""
import unittest
import random
from fpu_structure import Pila, StatusRegister, ControlRegister, StatusX86
"""
Test Pila
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Especificado en cada test en particular
Dependencias:
Pila
Método:
Especificado en cada test en particular
Esperado:
Test OK
"""
class TestPila(unittest.TestCase):
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_1(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual((pila._pst[len(pila._pst)-1],pila._ptag[len(pila._pst)-1]),(st,t)) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_push_2(self):
pila = Pila()
st = 11111111
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(pila._pst[len(pila._pst)-1],st) #compara la cabeza con lo insertó previamente
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_pop(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
for i in range (8):
self.assertEqual(pila.pop()[0],st[7-i])
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar push
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se extraen los valores y se comprueba que correspondan con los
introducidos previamente
"""
def test_getI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
#print st
#print pila._pst
#print pila._ptag
for i in range (8):
self.assertEqual(pila.getI(i)[0],st[i])
"""
Propósito:
Observar que se devuelva correctamente el índice de la cabeza de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Considera que Pila.getI(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el valor en ese punto
corresponda con el valor recién insertado (lo que corrobora que es la
cabeza de la pila)
"""
def test_head(self):
pila = Pila()
st = 11111111
tag = [[0,0],[0,1],[1,0],[1,1]]
for i in range(2):
for t in tag:
st = random.randint(-2e10,2e10)
pila.push(st,t)
self.assertEqual(pila.getI(pila.head()),(st,t))
"""
Propósito:
Observar que se devuelva correctamente la longitud de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego de cada valor insertado se corrobora que el índice actual (i+1 por
que este comienza en cero) corresponda con pila.length()
"""
def test_length(self):
pila = Pila()
st = 11111111
self.assertEqual(0,pila.length())
for i in range (8):
st = random.random()
pila.push(st)
self.assertEqual(i+1,pila.length())
"""
Propósito:
Observar que se borren correctamente los valores de la pila
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila.
Luego se borra siempre el primer valor de la pila con el comando
pila.delI(0) y se espera el retorno True
"""
def test_delI(self):
#primero Deben introducirse elementos en la pila
pila = Pila()
st =[]
for i in range (8):
st.append(random.random())
pila.push(st[i])
#luego se extraen y comparan los valores
for i in range (8):
#print pila.delI(0)
self.assertEqual(True,pila.delI(0))
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la primer forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_1(self):
pila = Pila()
st = 0000000
tag = [[0,0],[0,1],[1,0],[1,1]]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag[3])
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
i=0
for j in range (2):
for t in tag:
st.append(random.random())
pila.setI(i,st[i],t)
self.assertEqual(pila.getI(i),(st[i],t))
i+=1
"""
Propósito:
Observar que el dato que se introduce en la pila mediante
la segunda forma de realizar Pila.setI() (con 2 argumentos)
se corresponda con los datos que se lee de la misma
Dependencias:
Pila
Considera que Pila.push(*args) funciona correctamente
Método:
Se crean valores enteros aleatorios y todos los posibles de TAG y se los
inserta en la pila
Se comprueba que el valor almacenado corresponda con el introducido
"""
def test_setI_2(self):
pila = Pila()
st = 0000000
tag = [1,1]
#se llena la pila con valores conocidos
for i in range(8):
pila.push(st,tag)
#se cambian todos los valores y se corrobora que hayan sido cambiados
#exitosamente
st = []
for i in range (8):
st.append(random.random())
pila.setI(i,st[i])
self.assertEqual(pila.getI(i)[0],st[i])
#class TestStatusX86(unittest.TestCase):
# pass
class TestControlRegister(unittest.TestCase):
def test_setPC_1(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc)
self.assertEqual(pc,control._PC)
def test_setPC_2(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(pc,control._PC)
#asume que setPC() funciona correctamente
def getPC(self):
control = ControlRegister()
PC =[[0,0],[0,1],[1,0],[1,1]]
for pc in PC:
control.setPC(pc[0],pc[1])
self.assertEqual(control.getPC(),pc)
def test_setRC_1(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc)
self.assertEqual(rc,control._RC)
def test_setRC_2(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(rc,control._RC)
#asume que setRC() funciona correctamente
def getRC(self):
control = ControlRegister()
RC =[[0,0],[0,1],[1,0],[1,1]]
for rc in RC:
control.setRC(rc[0],rc[1])
self.assertEqual(control.getRC(),rc)
def test_setIC_1(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic)
self.assertEqual(ic,control._IC)
def test_setIC_2(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(ic[0],ic[1])
self.assertEqual(ic,control._IC)
#asume que setIC() funciona correctamente
def getIC(self):
control = ControlRegister()
IC =[[0,0],[0,1],[1,0],[1,1]]
for ic in IC:
control.setIC(rc[0],rc[1])
self.assertEqual(control.getIC(),ic)
class TestStatusRegister(unittest.TestCase):
pass
"""
def test_setTOP_1(self):
def test_setTOP_2(self):
def getTOP(self):
def test_setC_1(self):
def test_setC_2(self):
def test_getC(self):
def test_decTOP(self):
def test_incTOP(self):
"""
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Casos de prueba de instruction_set.py
"""
import random
import math
#módulo de Tests Unitarios
import unittest
#importa el módulo a testear:
from reduced_instruction_set import *
"""
Test FLD
Fecha: 28/07/2008
Leonardo Manuel Rocha
Propósito:
Observar que el dato que se introduce mediante FLD de instruction_set.py
se corresponda con los datos que se almacenan en la pila
Dependencias:
Pila
StatusRegister
Método:
Se crean valores enteros positivos, cero y negativos los que se introducirán
mediante FLD.
Se comprueba que el valor almacenado corresponda con el introducido
Esperado:
Test OK
"""
class TestFLD(unittest.TestCase):
def testFLDpos(self):
pos = 111111 #usar un valor positivo
#usar FLD
FLD(pos)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],pos)
def testFLDneg(self):
neg = -111111 #usar un valor negativo
#usar FLD
FLD(neg)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],neg)
def testFLDcero(self):
cero = 0 #usar el cero
#usar FLD
FLD(cero)
#verificar que el valor de la pila sea el que se ingresó
self.assertEqual(pila.getI(pila.head())[0],cero)
#Test ABS
#
#class TestFABS(unittest.TestCase):
# pass
#Test FADD
class TestFADD(unittest.TestCase):
def testFADD_1(self):
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = a + b
pila.push(a)
pila.push(b)
#print pila._pst
FADD(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
#Test FSUB
class TestFSUB(unittest.TestCase):
def testFSUB(self):
for i in range(8):
pila.pop()
a = random.randint(-2**10,2**10)
b = random.randint(-2**10,2**10)
c = b - a
pila.push(a)
pila.push(b)
#print pila._pst
FSUB(0,1)
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFMUL(unittest.TestCase):
def testFMUL(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = a * b
pila.push(a)
pila.push(b)
print pila._pst
FMUL(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFDIV(unittest.TestCase):
def testFDIV(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
c = b / a
pila.push(a)
pila.push(b)
print pila._pst
FDIV(0,1)
print pila._pst
self.assertEqual(pila.getI(pila.head())[0],c)
class TestFCOS(unittest.TestCase):
def testFCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
pila.push(a)
#print pila._pst
FCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSIN(unittest.TestCase):
def testFSIN(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.sin(a)
pila.push(a)
#print pila._pst
FSIN()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSINCOS(unittest.TestCase):
def testFSINCOS(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = math.cos(a)
c = math.sin(a)
pila.push(a)
#print pila._pst
FSINCOS()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
self.assertEqual(pila.getI(pila.head()-1)[0],c)
class TestFSQRT(unittest.TestCase):
def testFSQRT(self):
for i in range(8):
pila.pop()
a = random.randint(0,2**6)
b = math.sqrt(a)
pila.push(a)
#print pila._pst
FSQRT()
#print pila._pst
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFSTP(unittest.TestCase):
def testFSTP(self):
for i in range(8):
pila.pop()
a = random.randint(-2**6,2**6)
b = random.randint(-2**6,2**6)
pila.push(b)
pila.push(a)
self.assertEqual(FSTP(1111),a)
self.assertEqual(pila.getI(pila.head())[0],b)
class TestFCOM(unittest.TestCase):
def testFCOM(self):
for i in range(8):
pila.pop()
a = [2,1,0]
b = [1,2,0]
c= [[0,0,0,0],[1,0,0,0],[0,0,0,1],[1,0,1,1]]
for i in range(3):
pila.push(b[i])
pila.push(a[i])
FCOM(1)
self.assertEqual(status.getC(),c[i])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fpu_structure import *
from datahandling import *
import math
#TODO, faltan agregar las modificaciones que se hacen a las banderas de los diferentes registros
#TODO, faltan un montón de instrucciones
uesp = None #ultimo_elemento_sacado_de_pila
res = None #resultado de la última operación
#hay que poner a res y uesp como global en cada una de las funciones, debo escribir un script que lo haga :P: global uesp,res
pila = Pila()
control = ControlRegister()
status = StatusRegister()
pinout = Pinout()
statusX86 = StatusX86()
overflow = False
underflow = False
#pag 121
def F2XM1():
pila.push((2**pila.pop()[0] )-1)
res = pila.getI(pila.head())[0]
return res
#pag 123
def FABS():
pila.push(abs(pila.pop()[0]))
res = pila.getI(pila.head())[0]
if res == 0 :
statusX86._ZF=1
return res
# Operaciones de Adición
"""
Operaciones de adición
Opcode Instruction Description
D8 C0+i FADD ST(0), ST(i)Add ST(0) to ST(i) and store result in ST(0)
DC C0+i FADD ST(i), ST(0)Add ST(i) to ST(0) and store result in ST(i)
DE C0+i FADDP ST(i), ST(0) Add ST(0) to ST(i), store result in ST(i), and pop the
register stack
DE C1 FADDP Add ST(0) to ST(1), store result in ST(1), and pop the
"""
#FADD
def FADD(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FADD, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
#print st0,";", sti
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return res
#FADDP
def FADDP(sti=1,st0=0):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FADDP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a + b
pila.setI(pila.head()-1,res) #pila[1]=pila[1]+pila[0]
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
if res == 0 :
statusX86._ZF=1
return uesp
"""
Opcode Instruction Description
D8 E0+i FSUB ST(0), ST(i) Subtract ST(i) from ST(0) and store result in ST(0)
DC E8+i FSUB ST(i), ST(0) Subtract ST(0) from ST(i) and store result in ST(i)
DE E8+i FSUBP ST(i), ST(0) Subtract ST(0) from ST(i), store result in ST(i), and pop
register stack
DE E9 FSUBP Subtract ST(0) from ST(1), store result in ST(1), and pop
register stack
"""
def FSUB(st0=0,sti=1):
if st0 == sti or (sti != 0 and st0 != 0):
print "Error en FSUB, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FSUBP(st0=0,sti=1):
uesp = None
if st0 == sti or (st0!= 0 and sti != 0):
print "Error en FSUBP, st0"
#raise()
else:
a = pila.getI(pila.head())[0]
b = pila.getI(pila.head()-1)[0]
res = a - b
pila.setI(pila.head(), res)#pila[0] = pila[st0] + pila[sti] #TODO, OJO, acá puede haber errores cuando cambie el tema a complemento a 2
if res == 0 :
statusX86._ZF=1
uesp = pila.pop()[0] #OJO acá cuando cambie el registro intermedio de pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de Signo
def FCHS():
pila.setI(pila.head(),-1* pila.getI(pila.head())[0])
res = pila.getI(pila.head())[0]
return res
def FNCLEX():
#clean flags without checking
status._PE=0
status._UE=0
status._OE=0
status._ZE=0
status._DE=0
status._IE=0
# status._ES=0 # pentium processors
# status._EF=0 # pentium processors
status._B=0
#Operaciones de Comparación
"""
Opcode Instruction Description
D8 /2 FCOM m32real Compare ST(0) with m32real.
DC /2 FCOM m64real Compare ST(0) with m64real.
D8 D0+i FCOM ST(i) Compare ST(0) with ST(i).
D8 D1 FCOM Compare ST(0) with ST(1).
D8 /3 FCOMP m32real Compare ST(0) with m32real and pop register stack.
DC /3 FCOMP m64real Compare ST(0) with m64real and pop register stack.
D8 D8+i FCOMP ST(i) Compare ST(0) with ST(i) and pop register stack.
D8 D9 FCOMP Compare ST(0) with ST(1) and pop register stack.
DE D9 FCOMPP Compare ST(0) with ST(1) and pop register stack twice.
"""
def FCOM(sti):
#if 32 bits => op de 32 bits
#else if 64 bits => op de 64 bits
#else, todo mal
c=status.getC()
if pila.getI(pila.head())[0] > pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] < pila.getI(pila.head()-sti)[0]:
c[0]= 1
c[2]= 0
c[3]= 0
elif pila.getI(pila.head())[0] == pila.getI(pila.head()-sti)[0]:
c[0]= 0
c[2]= 0
c[3]= 1
else:
c[0]= 1
c[2]= 1
c[3]= 1
status.setC(c)
def FCOMP(sti):
FCOM(sti)
uesp = pila.pop()[0]
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
def FCOMPP():
FCOM(1)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
uesp = pila.pop()[0] #segundo pop, necesario
res = uesp
status.incTOP() #TODO, revisar si no hay fallo acá
#Operaciones sobre st0
def FCOS():
caux = status.getC()
if abs(pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.cos(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FE FSIN Replace ST(0) with its sine.
"""
def FSIN():
caux = status.getC()
if abs( pila.getI(pila.head())[0]) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FB FSINCOS Compute the sine and cosine of ST(0); replace ST(0) with
the sine, and push the cosine onto the register stack.
"""
def FSINCOS():
caux = status.getC()
aux= pila.getI(pila.head())[0]
if abs(aux) > (2**63):
caux[2]=1
else:
caux[2]=0
pila.push(math.sin(pila.pop()[0]))
pila.push(math.cos(aux))
status.decTOP()
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
status.setC(caux)
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D9 FA FSQRT Calculates square root of ST(0) and stores the result in
ST(0)
"""
def FSQRT():
pila.push(math.sqrt(pila.pop()[0]))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
res =pila.getI(pila.head())[0]
return res
"""
Opcode Instruction Description
D8 F0+i FDIV ST(0), ST(i) Divide ST(0) by ST(i) and store result in ST(0)
DC F8+i FDIV ST(i), ST(0) Divide ST(i) by ST(0) and store result in ST(i)
DE F8+i FDIVP ST(i), ST(0) Divide ST(i) by ST(0), store result in ST(i), and pop the
register stack
"""
def FDIV (st0,sti):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
if a == 0:
status._ZE = 1
res = b / a
pila.setI(pila.head(),res)
if b == 0:
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FDIVP (sti,st0):
FDIV(sti,st0)
uesp = pila.pop()[0] #primer pop
status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#Operaciones de liberación de cabeza de pila
def FFREE():
pila.setI(pila.head(),None,[1,1])
res =pila.getI(pila.head())[0]
def FLD(num):
pila.push(num)
status.decTOP()
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 E8 FLD1 Push +1.0 onto the FPU register stack.
D9 E9 FLDL2T Push log210 onto the FPU register stack.
D9 EA FLDL2E Push log2e onto the FPU register stack.
D9 EB FLDPI Push π onto the FPU register stack.
D9 EC FLDLG2 Push log102 onto the FPU register stack.
D9 ED FLDLN2 Push loge2 onto the FPU register stack.
D9 EE FLDZ Push +0.0 onto the FPU register stack.
"""
def FLD1():
FLD(1.0)
#status.decTOP()
def FLDL2T():
FLD(math.log(10,2)) #log en base 2 de 10
#status.decTOP()
def FLDL2E():
FLD(math.log(math.e,2))#log en base 2 de e
#status.decTOP()
def FLDPI():
FLD(math.pi)
#status.decTOP()
def FLDLG2():
FLD(math.log10(2))
#status.decTOP()
def FLDLN2():
FLD(math.log(2,math.e))
#status.decTOP()
def FLDZ():
FLD(0.0)
#status.decTOP()
"""
Opcode Instruction Description
D9 /2 FST m32real Copy ST(0) to m32real
DD /2 FST m64real Copy ST(0) to m64real
DD D0+i FST ST(i) Copy ST(0) to ST(i)
D9 /3 FSTP m32real Copy ST(0) to m32real and pop register stack
DD /3 FSTP m64real Copy ST(0) to m64real and pop register stack
DB /7 FSTP m80real Copy ST(0) to m80real and pop register stack
DD D8+i FSTP ST(i) Copy ST(0) to ST(i) and pop register stack
"""
def FST(mreal):
uesp= pila.getI(pila.head())[0]
res =uesp
return uesp
def FSTP(mreal):
uesp= pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
res = uesp
return uesp
#incrementa TOP de status
def FINCSTP():
status.incTOP()
#Multiplicación
"""
Opcode Instruction Description
D8 /1 FMUL m32real Multiply ST(0) by m32real and store result in ST(0)
DC /1 FMUL m64real Multiply ST(0) by m64real and store result in ST(0)
D8 C8+i FMUL ST(0), ST(i) Multiply ST(0) by ST(i) and store result in ST(0)
DC C8+i FMUL ST(i), ST(0) Multiply ST(i) by ST(0) and store result in ST(i)
DE C8+i FMULP ST(i), ST(0) Multiply ST(i) by ST(0), store result in ST(i), and pop the
register stack
DE C9 FMULP Multiply ST(1) by ST(0), store result in ST(1), and pop the
register stack
DA /1 FIMUL m32int Multiply ST(0) by m32int and store result in ST(0)
DE /1 FIMUL m16int Multiply ST(0) by m16int and store result in ST(0)
"""
def FMUL (st0=0,sti=1):
a = pila.getI(pila.head()-sti)[0]
b = pila.getI(pila.head())[0]
res = a * b
pila.setI(pila.head(),res)
if res == 0 :
statusX86._ZF=1
return pila.getI(pila.head())[0]
def FMULP (st0,sti):
FMUL(st0,sti)
uesp = pila.pop()[0] #primer pop
#status.incTOP() #TODO, revisar si no hay fallo acá
return uesp
#No Operation
def FNOP():
pass
"""
Opcode Instruction Description
D9 F3 FPATAN Replace ST(1) with arctan(ST(1)/ST(0)) and pop the register stack
"""
def FPATAN():
pila.setI(1,math.atan(pila.getI(1)[0]/ pila.getI(pila.head())[0]))
uesp=pila.pop()[0]
status.incTOP() #TODO, revisar si no hay fallo acá
if uesp == 0 :
statusX86._ZF=1
res = uesp
return uesp
"""
Opcode Instruction Clocks Description
D9 F2 FPTAN 17-173 Replace ST(0) with its tangent and push 1
onto the FPU stack.
"""
def FPTAN():
caux=status.getC()
if pila.getI(pila.head()) < 2**63:
caux[2]=0
status.setC(caux)
pila.setI(pila.head(),math.tan( pila.getI(pila.head())))
if pila.getI(pila.head())[0] == 0 :
statusX86._ZF=1
FLD1()
status.decTOP()
else:
caux[2]=1
status.setC(caux)
print "Operando fuera de rango"
"""
Opcode Instruction Description
D9 FC FRNDINT Round ST(0) to an integer.
"""
def FRNDINT():
pila.push(int(round(pila.pop()[0])))
res =pila.getI(pila.head())[0]
def FSCALE():
pila.setI(pila.head(), pila.getI(pila.head())*(2**pila.getI(1)))
res =pila.getI(pila.head())[0]
#TODO, set flags
"""
Opcode Instruction Description
D9 C8+i FXCH ST(i) Exchange the contents of ST(0) and ST(i)
D9 C9 FXCH Exchange the contents of ST(0) and ST(1)
"""
def FXCH(sti):
aux = pila.getI(pila.head()-sti)
pila.setI(pila.head()-sti, pila.getI(pila.head())[0], pila.getI(pila.head())[1])
pila.setI(pila.head(),aux[0],aux[1])
res =pila.getI(pila.head())[0]
"""
Opcode Instruction Description
D9 F1 FYL2X Replace ST(1) with (ST(1) ∗ log2ST(0)) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2))
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
"""
Opcode Instruction Description
D9 F9 FYL2XP1 Replace ST(1) with ST(1) ∗ log2(ST(0) + 1.0) and pop the
register stack
"""
def FYL2X():
pila.setI(1,math.log( pila.getI(pila.head()),2)+1)
uesp=pila.pop()[0]
status.incTOP() #TODO, ver si está bien esto
res = uesp
return uesp
#Si es llamado como ejecutable, entonces decir que esto es una librería del set de instrucción de la fpu 8087, mostrar la doc y salir.
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-twitter library.'''
__author__ = 'python-twitter@googlegroups.com'
__version__ = '0.8.5'
# The base package metadata to be used by both distutils and setuptools
METADATA = dict(
name = "python-twitter",
version = __version__,
py_modules = ['twitter'],
author='The Python-Twitter Developers',
author_email='python-twitter@googlegroups.com',
description='A python wrapper around the Twitter API',
license='Apache License 2.0',
url='https://github.com/bear/python-twitter',
keywords='twitter api',
)
# Extra package metadata to be used only if setuptools is installed
SETUPTOOLS_METADATA = dict(
install_requires = ['setuptools', 'simplejson', 'oauth2'],
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
],
test_suite = 'twitter_test.suite',
)
def Read(file):
return open(file).read()
def BuildLongDescription():
return '\n'.join([Read('README.md'), Read('CHANGES')])
def Main():
# Build the long_description from the README and CHANGES
METADATA['long_description'] = BuildLongDescription()
# Use setuptools if available, otherwise fallback and use distutils
try:
import setuptools
METADATA.update(SETUPTOOLS_METADATA)
setuptools.setup(**METADATA)
except ImportError:
import distutils.core
distutils.core.setup(**METADATA)
if __name__ == '__main__':
Main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl
except:
from cgi import parse_qsl
import oauth2 as oauth
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
consumer_key = None
consumer_secret = None
if consumer_key is None or consumer_secret is None:
print 'You need to edit this script and provide values for the'
print 'consumer_key and also consumer_secret.'
print ''
print 'The values you need come from Twitter - you need to register'
print 'as a developer your "application". This is needed only until'
print 'Twitter finishes the idea they have of a way to allow open-source'
print 'based libraries to have a token that can be used to generate a'
print 'one-time use key that will allow the library to make the request'
print 'on your behalf.'
print ''
sys.exit(1)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
print 'Requesting temp token from Twitter'
resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
print 'Invalid respond from Twitter requesting temp token: %s' % resp['status']
else:
request_token = dict(parse_qsl(content))
print ''
print 'Please visit this Twitter page and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token'])
print ''
pincode = raw_input('Pincode? ')
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(pincode)
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = oauth.Client(oauth_consumer, token)
resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_callback=oob&oauth_verifier=%s' % pincode)
access_token = dict(parse_qsl(content))
if resp['status'] != '200':
print 'The request for a Token did not succeed: %s' % resp['status']
print access_token
else:
print 'Your Twitter Access Token key: %s' % access_token['oauth_token']
print ' Access Token secret: %s' % access_token['oauth_token_secret']
print ''
| Python |
"""Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
raise ValueError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.7'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| Python |
r"""Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import simplejson
def main():
import sys
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit("%s [infile [outfile]]" % (sys.argv[0],))
try:
obj = simplejson.load(infile)
except ValueError, e:
raise SystemExit(e)
simplejson.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
#!/usr/bin/python2.4
'''Load the latest update for a Twitter user and leave it in an XHTML fragment'''
__author__ = 'dewitt@google.com'
import codecs
import getopt
import sys
import twitter
TEMPLATE = """
<div class="twitter">
<span class="twitter-user"><a href="http://twitter.com/%s">Twitter</a>: </span>
<span class="twitter-text">%s</span>
<span class="twitter-relative-created-at"><a href="http://twitter.com/%s/statuses/%s">Posted %s</a></span>
</div>
"""
def Usage():
print 'Usage: %s [options] twitterid' % __file__
print
print ' This script fetches a users latest twitter update and stores'
print ' the result in a file as an XHTML fragment'
print
print ' Options:'
print ' --help -h : print this help'
print ' --output : the output file [default: stdout]'
def FetchTwitter(user, output):
assert user
statuses = twitter.Api().GetUserTimeline(user=user, count=1)
s = statuses[0]
xhtml = TEMPLATE % (s.user.screen_name, s.text, s.user.screen_name, s.id, s.relative_created_at)
if output:
Save(xhtml, output)
else:
print xhtml
def Save(xhtml, output):
out = codecs.open(output, mode='w', encoding='ascii',
errors='xmlcharrefreplace')
out.write(xhtml)
out.close()
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'ho', ['help', 'output='])
except getopt.GetoptError:
Usage()
sys.exit(2)
try:
user = args[0]
except:
Usage()
sys.exit(2)
output = None
for o, a in opts:
if o in ("-h", "--help"):
Usage()
sys.exit(2)
if o in ("-o", "--output"):
output = a
FetchTwitter(user, output)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.4
'''Post a message to twitter'''
__author__ = 'dewitt@google.com'
import ConfigParser
import getopt
import os
import sys
import twitter
USAGE = '''Usage: tweet [options] message
This script posts a message to Twitter.
Options:
-h --help : print this help
--consumer-key : the twitter consumer key
--consumer-secret : the twitter consumer secret
--access-key : the twitter access token key
--access-secret : the twitter access token secret
--encoding : the character set encoding used in input strings, e.g. "utf-8". [optional]
Documentation:
If either of the command line flags are not present, the environment
variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your
consumer_key or consumer_secret, respectively.
If neither the command line flags nor the enviroment variables are
present, the .tweetrc file, if it exists, can be used to set the
default consumer_key and consumer_secret. The file should contain the
following three lines, replacing *consumer_key* with your consumer key, and
*consumer_secret* with your consumer secret:
A skeletal .tweetrc file:
[Tweet]
consumer_key: *consumer_key*
consumer_secret: *consumer_password*
access_key: *access_key*
access_secret: *access_password*
'''
def PrintUsageAndExit():
print USAGE
sys.exit(2)
def GetConsumerKeyEnv():
return os.environ.get("TWEETUSERNAME", None)
def GetConsumerSecretEnv():
return os.environ.get("TWEETPASSWORD", None)
def GetAccessKeyEnv():
return os.environ.get("TWEETACCESSKEY", None)
def GetAccessSecretEnv():
return os.environ.get("TWEETACCESSSECRET", None)
class TweetRc(object):
def __init__(self):
self._config = None
def GetConsumerKey(self):
return self._GetOption('consumer_key')
def GetConsumerSecret(self):
return self._GetOption('consumer_secret')
def GetAccessKey(self):
return self._GetOption('access_key')
def GetAccessSecret(self):
return self._GetOption('access_secret')
def _GetOption(self, option):
try:
return self._GetConfig().get('Tweet', option)
except:
return None
def _GetConfig(self):
if not self._config:
self._config = ConfigParser.ConfigParser()
self._config.read(os.path.expanduser('~/.tweetrc'))
return self._config
def main():
try:
shortflags = 'h'
longflags = ['help', 'consumer-key=', 'consumer-secret=',
'access-key=', 'access-secret=', 'encoding=']
opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags)
except getopt.GetoptError:
PrintUsageAndExit()
consumer_keyflag = None
consumer_secretflag = None
access_keyflag = None
access_secretflag = None
encoding = None
for o, a in opts:
if o in ("-h", "--help"):
PrintUsageAndExit()
if o in ("--consumer-key"):
consumer_keyflag = a
if o in ("--consumer-secret"):
consumer_secretflag = a
if o in ("--access-key"):
access_keyflag = a
if o in ("--access-secret"):
access_secretflag = a
if o in ("--encoding"):
encoding = a
message = ' '.join(args)
if not message:
PrintUsageAndExit()
rc = TweetRc()
consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey()
consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret()
access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey()
access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret()
if not consumer_key or not consumer_secret or not access_key or not access_secret:
PrintUsageAndExit()
api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token_key=access_key, access_token_secret=access_secret,
input_encoding=encoding)
try:
status = api.PostUpdate(message)
except UnicodeDecodeError:
print "Your message could not be encoded. Perhaps it contains non-ASCII characters? "
print "Try explicitly specifying the encoding with the --encoding flag"
sys.exit(2)
print "%s just posted: %s" % (status.user.name, status.text)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A class that defines the default URL Shortener.
TinyURL is provided as the default and as an example.
'''
import urllib
# Change History
#
# 2010-05-16
# TinyURL example and the idea for this comes from a bug filed by
# acolorado with patch provided by ghills. Class implementation
# was done by bear.
#
# Issue 19 http://code.google.com/p/python-twitter/issues/detail?id=19
#
class ShortenURL(object):
'''Helper class to make URL Shortener calls if/when required'''
def __init__(self,
userid=None,
password=None):
'''Instantiate a new ShortenURL object
Args:
userid: userid for any required authorization call [optional]
password: password for any required authorization call [optional]
'''
self.userid = userid
self.password = password
def Shorten(self,
longURL):
'''Call TinyURL API and returned shortened URL result
Args:
longURL: URL string to shorten
Returns:
The shortened URL as a string
Note:
longURL is required and no checks are made to ensure completeness
'''
result = None
f = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % longURL)
try:
result = f.read()
finally:
f.close()
return result
| Python |
#!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a Python interface to the Twitter API'''
__author__ = 'python-twitter@googlegroups.com'
__version__ = '0.8.5'
import calendar
import datetime
import httplib
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import gzip
import StringIO
try:
# Python >= 2.6
import json as simplejson
except ImportError:
try:
# Python < 2.6
import simplejson
except ImportError:
try:
# Google App Engine
from django.utils import simplejson
except ImportError:
raise ImportError, "Unable to load a json library"
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
import oauth2 as oauth
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
class TwitterError(Exception):
'''Base class for Twitter errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Status(object):
'''A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.truncated
status.source
status.id
status.text
status.location
status.relative_created_at # read only
status.user
status.urls
status.user_mentions
status.hashtags
status.geo
status.place
status.coordinates
status.contributors
'''
def __init__(self,
created_at=None,
favorited=None,
id=None,
text=None,
location=None,
user=None,
in_reply_to_screen_name=None,
in_reply_to_user_id=None,
in_reply_to_status_id=None,
truncated=None,
source=None,
now=None,
urls=None,
user_mentions=None,
hashtags=None,
media=None,
geo=None,
place=None,
coordinates=None,
contributors=None,
retweeted=None,
retweeted_status=None,
retweet_count=None):
'''An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at:
The time this status message was posted. [Optional]
favorited:
Whether this is a favorite of the authenticated user. [Optional]
id:
The unique id of this status message. [Optional]
text:
The text of this status message. [Optional]
location:
the geolocation string associated with this message. [Optional]
relative_created_at:
A human readable string representing the posting time. [Optional]
user:
A twitter.User instance representing the person posting the
message. [Optional]
now:
The current time, if the client chooses to set it.
Defaults to the wall clock time. [Optional]
urls:
user_mentions:
hashtags:
geo:
place:
coordinates:
contributors:
retweeted:
retweeted_status:
retweet_count:
'''
self.created_at = created_at
self.favorited = favorited
self.id = id
self.text = text
self.location = location
self.user = user
self.now = now
self.in_reply_to_screen_name = in_reply_to_screen_name
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_status_id = in_reply_to_status_id
self.truncated = truncated
self.retweeted = retweeted
self.source = source
self.urls = urls
self.user_mentions = user_mentions
self.hashtags = hashtags
self.media = media
self.geo = geo
self.place = place
self.coordinates = coordinates
self.contributors = contributors
self.retweeted_status = retweeted_status
self.retweet_count = retweet_count
def GetCreatedAt(self):
'''Get the time this status message was posted.
Returns:
The time this status message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this status message was posted.
Args:
created_at:
The time this status message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this status message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this status message was "
"posted, in seconds since the epoch")
def GetFavorited(self):
'''Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
'''
return self._favorited
def SetFavorited(self, favorited):
'''Set the favorited state of this status message.
Args:
favorited:
boolean True/False favorited state of this status message
'''
self._favorited = favorited
favorited = property(GetFavorited, SetFavorited,
doc='The favorited state of this status message.')
def GetId(self):
'''Get the unique id of this status message.
Returns:
The unique id of this status message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this status message.
Args:
id:
The unique id of this status message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this status message.')
def GetInReplyToScreenName(self):
return self._in_reply_to_screen_name
def SetInReplyToScreenName(self, in_reply_to_screen_name):
self._in_reply_to_screen_name = in_reply_to_screen_name
in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName,
doc='')
def GetInReplyToUserId(self):
return self._in_reply_to_user_id
def SetInReplyToUserId(self, in_reply_to_user_id):
self._in_reply_to_user_id = in_reply_to_user_id
in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId,
doc='')
def GetInReplyToStatusId(self):
return self._in_reply_to_status_id
def SetInReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId,
doc='')
def GetTruncated(self):
return self._truncated
def SetTruncated(self, truncated):
self._truncated = truncated
truncated = property(GetTruncated, SetTruncated,
doc='')
def GetRetweeted(self):
return self._retweeted
def SetRetweeted(self, retweeted):
self._retweeted = retweeted
retweeted = property(GetRetweeted, SetRetweeted,
doc='')
def GetSource(self):
return self._source
def SetSource(self, source):
self._source = source
source = property(GetSource, SetSource,
doc='')
def GetText(self):
'''Get the text of this status message.
Returns:
The text of this status message.
'''
return self._text
def SetText(self, text):
'''Set the text of this status message.
Args:
text:
The text of this status message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this status message')
def GetLocation(self):
'''Get the geolocation associated with this status message
Returns:
The geolocation string of this status message.
'''
return self._location
def SetLocation(self, location):
'''Set the geolocation associated with this status message
Args:
location:
The geolocation string of this status message
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geolocation string of this status message')
def GetRelativeCreatedAt(self):
'''Get a human readable string representing the posting time
Returns:
A human readable string representing the posting time
'''
fudge = 1.25
delta = long(self.now) - long(self.created_at_in_seconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1/fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1/fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1:
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1/fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1:
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
relative_created_at = property(GetRelativeCreatedAt,
doc='Get a human readable string representing '
'the posting time')
def GetUser(self):
'''Get a twitter.User representing the entity posting this status message.
Returns:
A twitter.User representing the entity posting this status message
'''
return self._user
def SetUser(self, user):
'''Set a twitter.User representing the entity posting this status message.
Args:
user:
A twitter.User representing the entity posting this status message
'''
self._user = user
user = property(GetUser, SetUser,
doc='A twitter.User representing the entity posting this '
'status message')
def GetNow(self):
'''Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
'''
if self._now is None:
self._now = time.time()
return self._now
def SetNow(self, now):
'''Set the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Args:
now:
The wallclock time for this instance.
'''
self._now = now
now = property(GetNow, SetNow,
doc='The wallclock time for this status instance.')
def GetGeo(self):
return self._geo
def SetGeo(self, geo):
self._geo = geo
geo = property(GetGeo, SetGeo,
doc='')
def GetPlace(self):
return self._place
def SetPlace(self, place):
self._place = place
place = property(GetPlace, SetPlace,
doc='')
def GetCoordinates(self):
return self._coordinates
def SetCoordinates(self, coordinates):
self._coordinates = coordinates
coordinates = property(GetCoordinates, SetCoordinates,
doc='')
def GetContributors(self):
return self._contributors
def SetContributors(self, contributors):
self._contributors = contributors
contributors = property(GetContributors, SetContributors,
doc='')
def GetRetweeted_status(self):
return self._retweeted_status
def SetRetweeted_status(self, retweeted_status):
self._retweeted_status = retweeted_status
retweeted_status = property(GetRetweeted_status, SetRetweeted_status,
doc='')
def GetRetweetCount(self):
return self._retweet_count
def SetRetweetCount(self, retweet_count):
self._retweet_count = retweet_count
retweet_count = property(GetRetweetCount, SetRetweetCount,
doc='')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.location == other.location and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.retweeted == other.retweeted and \
self.favorited == other.favorited and \
self.source == other.source and \
self.geo == other.geo and \
self.place == other.place and \
self.coordinates == other.coordinates and \
self.contributors == other.contributors and \
self.retweeted_status == other.retweeted_status and \
self.retweet_count == other.retweet_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.Status instance.
Returns:
A JSON string representation of this twitter.Status instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
'''
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.location:
data['location'] = self.location
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.retweeted is not None:
data['retweeted'] = self.retweeted
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
if self.geo:
data['geo'] = self.geo
if self.place:
data['place'] = self.place
if self.coordinates:
data['coordinates'] = self.coordinates
if self.contributors:
data['contributors'] = self.contributors
if self.hashtags:
data['hashtags'] = [h.text for h in self.hashtags]
if self.retweeted_status:
data['retweeted_status'] = self.retweeted_status.AsDict()
if self.retweet_count:
data['retweet_count'] = self.retweet_count
if self.urls:
data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls])
if self.user_mentions:
data['user_mentions'] = [um.AsDict() for um in self.user_mentions]
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
else:
retweeted_status = None
urls = None
user_mentions = None
hashtags = None
media = None
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
if 'media' in data['entities']:
media = data['entities']['media']
else:
media = []
return Status(created_at=data.get('created_at', None),
favorited=data.get('favorited', None),
id=data.get('id', None),
text=data.get('text', None),
location=data.get('location', None),
in_reply_to_screen_name=data.get('in_reply_to_screen_name', None),
in_reply_to_user_id=data.get('in_reply_to_user_id', None),
in_reply_to_status_id=data.get('in_reply_to_status_id', None),
truncated=data.get('truncated', None),
retweeted=data.get('retweeted', None),
source=data.get('source', None),
user=user,
urls=urls,
user_mentions=user_mentions,
hashtags=hashtags,
media=media,
geo=data.get('geo', None),
place=data.get('place', None),
coordinates=data.get('coordinates', None),
contributors=data.get('contributors', None),
retweeted_status=retweeted_status,
retweet_count=data.get('retweet_count', None))
class User(object):
'''A class representing the User structure used by the twitter API.
The User structure exposes the following properties:
user.id
user.name
user.screen_name
user.location
user.description
user.profile_image_url
user.profile_background_tile
user.profile_background_image_url
user.profile_sidebar_fill_color
user.profile_background_color
user.profile_link_color
user.profile_text_color
user.protected
user.utc_offset
user.time_zone
user.url
user.status
user.statuses_count
user.followers_count
user.friends_count
user.favourites_count
user.geo_enabled
user.verified
user.lang
user.notifications
user.contributors_enabled
user.created_at
user.listed_count
'''
def __init__(self,
id=None,
name=None,
screen_name=None,
location=None,
description=None,
profile_image_url=None,
profile_background_tile=None,
profile_background_image_url=None,
profile_sidebar_fill_color=None,
profile_background_color=None,
profile_link_color=None,
profile_text_color=None,
protected=None,
utc_offset=None,
time_zone=None,
followers_count=None,
friends_count=None,
statuses_count=None,
favourites_count=None,
url=None,
status=None,
geo_enabled=None,
verified=None,
lang=None,
notifications=None,
contributors_enabled=None,
created_at=None,
listed_count=None):
self.id = id
self.name = name
self.screen_name = screen_name
self.location = location
self.description = description
self.profile_image_url = profile_image_url
self.profile_background_tile = profile_background_tile
self.profile_background_image_url = profile_background_image_url
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_background_color = profile_background_color
self.profile_link_color = profile_link_color
self.profile_text_color = profile_text_color
self.protected = protected
self.utc_offset = utc_offset
self.time_zone = time_zone
self.followers_count = followers_count
self.friends_count = friends_count
self.statuses_count = statuses_count
self.favourites_count = favourites_count
self.url = url
self.status = status
self.geo_enabled = geo_enabled
self.verified = verified
self.lang = lang
self.notifications = notifications
self.contributors_enabled = contributors_enabled
self.created_at = created_at
self.listed_count = listed_count
def GetId(self):
'''Get the unique id of this user.
Returns:
The unique id of this user
'''
return self._id
def SetId(self, id):
'''Set the unique id of this user.
Args:
id: The unique id of this user.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this user.')
def GetName(self):
'''Get the real name of this user.
Returns:
The real name of this user
'''
return self._name
def SetName(self, name):
'''Set the real name of this user.
Args:
name: The real name of this user
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this user.')
def GetScreenName(self):
'''Get the short twitter name of this user.
Returns:
The short twitter name of this user
'''
return self._screen_name
def SetScreenName(self, screen_name):
'''Set the short twitter name of this user.
Args:
screen_name: the short twitter name of this user
'''
self._screen_name = screen_name
screen_name = property(GetScreenName, SetScreenName,
doc='The short twitter name of this user.')
def GetLocation(self):
'''Get the geographic location of this user.
Returns:
The geographic location of this user
'''
return self._location
def SetLocation(self, location):
'''Set the geographic location of this user.
Args:
location: The geographic location of this user
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geographic location of this user.')
def GetDescription(self):
'''Get the short text description of this user.
Returns:
The short text description of this user
'''
return self._description
def SetDescription(self, description):
'''Set the short text description of this user.
Args:
description: The short text description of this user
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The short text description of this user.')
def GetUrl(self):
'''Get the homepage url of this user.
Returns:
The homepage url of this user
'''
return self._url
def SetUrl(self, url):
'''Set the homepage url of this user.
Args:
url: The homepage url of this user
'''
self._url = url
url = property(GetUrl, SetUrl,
doc='The homepage url of this user.')
def GetProfileImageUrl(self):
'''Get the url of the thumbnail of this user.
Returns:
The url of the thumbnail of this user
'''
return self._profile_image_url
def SetProfileImageUrl(self, profile_image_url):
'''Set the url of the thumbnail of this user.
Args:
profile_image_url: The url of the thumbnail of this user
'''
self._profile_image_url = profile_image_url
profile_image_url= property(GetProfileImageUrl, SetProfileImageUrl,
doc='The url of the thumbnail of this user.')
def GetProfileBackgroundTile(self):
'''Boolean for whether to tile the profile background image.
Returns:
True if the background is to be tiled, False if not, None if unset.
'''
return self._profile_background_tile
def SetProfileBackgroundTile(self, profile_background_tile):
'''Set the boolean flag for whether to tile the profile background image.
Args:
profile_background_tile: Boolean flag for whether to tile or not.
'''
self._profile_background_tile = profile_background_tile
profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile,
doc='Boolean for whether to tile the background image.')
def GetProfileBackgroundImageUrl(self):
return self._profile_background_image_url
def SetProfileBackgroundImageUrl(self, profile_background_image_url):
self._profile_background_image_url = profile_background_image_url
profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl,
doc='The url of the profile background of this user.')
def GetProfileSidebarFillColor(self):
return self._profile_sidebar_fill_color
def SetProfileSidebarFillColor(self, profile_sidebar_fill_color):
self._profile_sidebar_fill_color = profile_sidebar_fill_color
profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor)
def GetProfileBackgroundColor(self):
return self._profile_background_color
def SetProfileBackgroundColor(self, profile_background_color):
self._profile_background_color = profile_background_color
profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor)
def GetProfileLinkColor(self):
return self._profile_link_color
def SetProfileLinkColor(self, profile_link_color):
self._profile_link_color = profile_link_color
profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor)
def GetProfileTextColor(self):
return self._profile_text_color
def SetProfileTextColor(self, profile_text_color):
self._profile_text_color = profile_text_color
profile_text_color = property(GetProfileTextColor, SetProfileTextColor)
def GetProtected(self):
return self._protected
def SetProtected(self, protected):
self._protected = protected
protected = property(GetProtected, SetProtected)
def GetUtcOffset(self):
return self._utc_offset
def SetUtcOffset(self, utc_offset):
self._utc_offset = utc_offset
utc_offset = property(GetUtcOffset, SetUtcOffset)
def GetTimeZone(self):
'''Returns the current time zone string for the user.
Returns:
The descriptive time zone string for the user.
'''
return self._time_zone
def SetTimeZone(self, time_zone):
'''Sets the user's time zone string.
Args:
time_zone:
The descriptive time zone to assign for the user.
'''
self._time_zone = time_zone
time_zone = property(GetTimeZone, SetTimeZone)
def GetStatus(self):
'''Get the latest twitter.Status of this user.
Returns:
The latest twitter.Status of this user
'''
return self._status
def SetStatus(self, status):
'''Set the latest twitter.Status of this user.
Args:
status:
The latest twitter.Status of this user
'''
self._status = status
status = property(GetStatus, SetStatus,
doc='The latest twitter.Status of this user.')
def GetFriendsCount(self):
'''Get the friend count for this user.
Returns:
The number of users this user has befriended.
'''
return self._friends_count
def SetFriendsCount(self, count):
'''Set the friend count for this user.
Args:
count:
The number of users this user has befriended.
'''
self._friends_count = count
friends_count = property(GetFriendsCount, SetFriendsCount,
doc='The number of friends for this user.')
def GetListedCount(self):
'''Get the listed count for this user.
Returns:
The number of lists this user belongs to.
'''
return self._listed_count
def SetListedCount(self, count):
'''Set the listed count for this user.
Args:
count:
The number of lists this user belongs to.
'''
self._listed_count = count
listed_count = property(GetListedCount, SetListedCount,
doc='The number of lists this user belongs to.')
def GetFollowersCount(self):
'''Get the follower count for this user.
Returns:
The number of users following this user.
'''
return self._followers_count
def SetFollowersCount(self, count):
'''Set the follower count for this user.
Args:
count:
The number of users following this user.
'''
self._followers_count = count
followers_count = property(GetFollowersCount, SetFollowersCount,
doc='The number of users following this user.')
def GetStatusesCount(self):
'''Get the number of status updates for this user.
Returns:
The number of status updates for this user.
'''
return self._statuses_count
def SetStatusesCount(self, count):
'''Set the status update count for this user.
Args:
count:
The number of updates for this user.
'''
self._statuses_count = count
statuses_count = property(GetStatusesCount, SetStatusesCount,
doc='The number of updates for this user.')
def GetFavouritesCount(self):
'''Get the number of favourites for this user.
Returns:
The number of favourites for this user.
'''
return self._favourites_count
def SetFavouritesCount(self, count):
'''Set the favourite count for this user.
Args:
count:
The number of favourites for this user.
'''
self._favourites_count = count
favourites_count = property(GetFavouritesCount, SetFavouritesCount,
doc='The number of favourites for this user.')
def GetGeoEnabled(self):
'''Get the setting of geo_enabled for this user.
Returns:
True/False if Geo tagging is enabled
'''
return self._geo_enabled
def SetGeoEnabled(self, geo_enabled):
'''Set the latest twitter.geo_enabled of this user.
Args:
geo_enabled:
True/False if Geo tagging is to be enabled
'''
self._geo_enabled = geo_enabled
geo_enabled = property(GetGeoEnabled, SetGeoEnabled,
doc='The value of twitter.geo_enabled for this user.')
def GetVerified(self):
'''Get the setting of verified for this user.
Returns:
True/False if user is a verified account
'''
return self._verified
def SetVerified(self, verified):
'''Set twitter.verified for this user.
Args:
verified:
True/False if user is a verified account
'''
self._verified = verified
verified = property(GetVerified, SetVerified,
doc='The value of twitter.verified for this user.')
def GetLang(self):
'''Get the setting of lang for this user.
Returns:
language code of the user
'''
return self._lang
def SetLang(self, lang):
'''Set twitter.lang for this user.
Args:
lang:
language code for the user
'''
self._lang = lang
lang = property(GetLang, SetLang,
doc='The value of twitter.lang for this user.')
def GetNotifications(self):
'''Get the setting of notifications for this user.
Returns:
True/False for the notifications setting of the user
'''
return self._notifications
def SetNotifications(self, notifications):
'''Set twitter.notifications for this user.
Args:
notifications:
True/False notifications setting for the user
'''
self._notifications = notifications
notifications = property(GetNotifications, SetNotifications,
doc='The value of twitter.notifications for this user.')
def GetContributorsEnabled(self):
'''Get the setting of contributors_enabled for this user.
Returns:
True/False contributors_enabled of the user
'''
return self._contributors_enabled
def SetContributorsEnabled(self, contributors_enabled):
'''Set twitter.contributors_enabled for this user.
Args:
contributors_enabled:
True/False contributors_enabled setting for the user
'''
self._contributors_enabled = contributors_enabled
contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled,
doc='The value of twitter.contributors_enabled for this user.')
def GetCreatedAt(self):
'''Get the setting of created_at for this user.
Returns:
created_at value of the user
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set twitter.created_at for this user.
Args:
created_at:
created_at value for the user
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The value of twitter.created_at for this user.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.screen_name == other.screen_name and \
self.location == other.location and \
self.description == other.description and \
self.profile_image_url == other.profile_image_url and \
self.profile_background_tile == other.profile_background_tile and \
self.profile_background_image_url == other.profile_background_image_url and \
self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \
self.profile_background_color == other.profile_background_color and \
self.profile_link_color == other.profile_link_color and \
self.profile_text_color == other.profile_text_color and \
self.protected == other.protected and \
self.utc_offset == other.utc_offset and \
self.time_zone == other.time_zone and \
self.url == other.url and \
self.statuses_count == other.statuses_count and \
self.followers_count == other.followers_count and \
self.favourites_count == other.favourites_count and \
self.friends_count == other.friends_count and \
self.status == other.status and \
self.geo_enabled == other.geo_enabled and \
self.verified == other.verified and \
self.lang == other.lang and \
self.notifications == other.notifications and \
self.contributors_enabled == other.contributors_enabled and \
self.created_at == other.created_at and \
self.listed_count == other.listed_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.User instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.User instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.User instance.
Returns:
A JSON string representation of this twitter.User instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
if self.geo_enabled:
data['geo_enabled'] = self.geo_enabled
if self.verified:
data['verified'] = self.verified
if self.lang:
data['lang'] = self.lang
if self.notifications:
data['notifications'] = self.notifications
if self.contributors_enabled:
data['contributors_enabled'] = self.contributors_enabled
if self.created_at:
data['created_at'] = self.created_at
if self.listed_count:
data['listed_count'] = self.listed_count
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.User instance
'''
if 'status' in data:
status = Status.NewFromJsonDict(data['status'])
else:
status = None
return User(id=data.get('id', None),
name=data.get('name', None),
screen_name=data.get('screen_name', None),
location=data.get('location', None),
description=data.get('description', None),
statuses_count=data.get('statuses_count', None),
followers_count=data.get('followers_count', None),
favourites_count=data.get('favourites_count', None),
friends_count=data.get('friends_count', None),
profile_image_url=data.get('profile_image_url', None),
profile_background_tile = data.get('profile_background_tile', None),
profile_background_image_url = data.get('profile_background_image_url', None),
profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None),
profile_background_color = data.get('profile_background_color', None),
profile_link_color = data.get('profile_link_color', None),
profile_text_color = data.get('profile_text_color', None),
protected = data.get('protected', None),
utc_offset = data.get('utc_offset', None),
time_zone = data.get('time_zone', None),
url=data.get('url', None),
status=status,
geo_enabled=data.get('geo_enabled', None),
verified=data.get('verified', None),
lang=data.get('lang', None),
notifications=data.get('notifications', None),
contributors_enabled=data.get('contributors_enabled', None),
created_at=data.get('created_at', None),
listed_count=data.get('listed_count', None))
class List(object):
'''A class representing the List structure used by the twitter API.
The List structure exposes the following properties:
list.id
list.name
list.slug
list.description
list.full_name
list.mode
list.uri
list.member_count
list.subscriber_count
list.following
'''
def __init__(self,
id=None,
name=None,
slug=None,
description=None,
full_name=None,
mode=None,
uri=None,
member_count=None,
subscriber_count=None,
following=None,
user=None):
self.id = id
self.name = name
self.slug = slug
self.description = description
self.full_name = full_name
self.mode = mode
self.uri = uri
self.member_count = member_count
self.subscriber_count = subscriber_count
self.following = following
self.user = user
def GetId(self):
'''Get the unique id of this list.
Returns:
The unique id of this list
'''
return self._id
def SetId(self, id):
'''Set the unique id of this list.
Args:
id:
The unique id of this list.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this list.')
def GetName(self):
'''Get the real name of this list.
Returns:
The real name of this list
'''
return self._name
def SetName(self, name):
'''Set the real name of this list.
Args:
name:
The real name of this list
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this list.')
def GetSlug(self):
'''Get the slug of this list.
Returns:
The slug of this list
'''
return self._slug
def SetSlug(self, slug):
'''Set the slug of this list.
Args:
slug:
The slug of this list.
'''
self._slug = slug
slug = property(GetSlug, SetSlug,
doc='The slug of this list.')
def GetDescription(self):
'''Get the description of this list.
Returns:
The description of this list
'''
return self._description
def SetDescription(self, description):
'''Set the description of this list.
Args:
description:
The description of this list.
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The description of this list.')
def GetFull_name(self):
'''Get the full_name of this list.
Returns:
The full_name of this list
'''
return self._full_name
def SetFull_name(self, full_name):
'''Set the full_name of this list.
Args:
full_name:
The full_name of this list.
'''
self._full_name = full_name
full_name = property(GetFull_name, SetFull_name,
doc='The full_name of this list.')
def GetMode(self):
'''Get the mode of this list.
Returns:
The mode of this list
'''
return self._mode
def SetMode(self, mode):
'''Set the mode of this list.
Args:
mode:
The mode of this list.
'''
self._mode = mode
mode = property(GetMode, SetMode,
doc='The mode of this list.')
def GetUri(self):
'''Get the uri of this list.
Returns:
The uri of this list
'''
return self._uri
def SetUri(self, uri):
'''Set the uri of this list.
Args:
uri:
The uri of this list.
'''
self._uri = uri
uri = property(GetUri, SetUri,
doc='The uri of this list.')
def GetMember_count(self):
'''Get the member_count of this list.
Returns:
The member_count of this list
'''
return self._member_count
def SetMember_count(self, member_count):
'''Set the member_count of this list.
Args:
member_count:
The member_count of this list.
'''
self._member_count = member_count
member_count = property(GetMember_count, SetMember_count,
doc='The member_count of this list.')
def GetSubscriber_count(self):
'''Get the subscriber_count of this list.
Returns:
The subscriber_count of this list
'''
return self._subscriber_count
def SetSubscriber_count(self, subscriber_count):
'''Set the subscriber_count of this list.
Args:
subscriber_count:
The subscriber_count of this list.
'''
self._subscriber_count = subscriber_count
subscriber_count = property(GetSubscriber_count, SetSubscriber_count,
doc='The subscriber_count of this list.')
def GetFollowing(self):
'''Get the following status of this list.
Returns:
The following status of this list
'''
return self._following
def SetFollowing(self, following):
'''Set the following status of this list.
Args:
following:
The following of this list.
'''
self._following = following
following = property(GetFollowing, SetFollowing,
doc='The following status of this list.')
def GetUser(self):
'''Get the user of this list.
Returns:
The owner of this list
'''
return self._user
def SetUser(self, user):
'''Set the user of this list.
Args:
user:
The owner of this list.
'''
self._user = user
user = property(GetUser, SetUser,
doc='The owner of this list.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.slug == other.slug and \
self.description == other.description and \
self.full_name == other.full_name and \
self.mode == other.mode and \
self.uri == other.uri and \
self.member_count == other.member_count and \
self.subscriber_count == other.subscriber_count and \
self.following == other.following and \
self.user == other.user
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.List instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.List instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.List instance.
Returns:
A JSON string representation of this twitter.List instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.List instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.List instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.slug:
data['slug'] = self.slug
if self.description:
data['description'] = self.description
if self.full_name:
data['full_name'] = self.full_name
if self.mode:
data['mode'] = self.mode
if self.uri:
data['uri'] = self.uri
if self.member_count is not None:
data['member_count'] = self.member_count
if self.subscriber_count is not None:
data['subscriber_count'] = self.subscriber_count
if self.following is not None:
data['following'] = self.following
if self.user is not None:
data['user'] = self.user
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.List instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
return List(id=data.get('id', None),
name=data.get('name', None),
slug=data.get('slug', None),
description=data.get('description', None),
full_name=data.get('full_name', None),
mode=data.get('mode', None),
uri=data.get('uri', None),
member_count=data.get('member_count', None),
subscriber_count=data.get('subscriber_count', None),
following=data.get('following', None),
user=user)
class DirectMessage(object):
'''A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
'''
def __init__(self,
id=None,
created_at=None,
sender_id=None,
sender_screen_name=None,
recipient_id=None,
recipient_screen_name=None,
text=None):
'''An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id:
The unique id of this direct message. [Optional]
created_at:
The time this direct message was posted. [Optional]
sender_id:
The id of the twitter user that sent this message. [Optional]
sender_screen_name:
The name of the twitter user that sent this message. [Optional]
recipient_id:
The id of the twitter that received this message. [Optional]
recipient_screen_name:
The name of the twitter that received this message. [Optional]
text:
The text of this direct message. [Optional]
'''
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
def GetId(self):
'''Get the unique id of this direct message.
Returns:
The unique id of this direct message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this direct message.
Args:
id:
The unique id of this direct message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this direct message.')
def GetCreatedAt(self):
'''Get the time this direct message was posted.
Returns:
The time this direct message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this direct message was posted.
Args:
created_at:
The time this direct message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this direct message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this direct message was "
"posted, in seconds since the epoch")
def GetSenderId(self):
'''Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
'''
return self._sender_id
def SetSenderId(self, sender_id):
'''Set the unique sender id of this direct message.
Args:
sender_id:
The unique sender id of this direct message
'''
self._sender_id = sender_id
sender_id = property(GetSenderId, SetSenderId,
doc='The unique sender id of this direct message.')
def GetSenderScreenName(self):
'''Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
'''
return self._sender_screen_name
def SetSenderScreenName(self, sender_screen_name):
'''Set the unique sender screen name of this direct message.
Args:
sender_screen_name:
The unique sender screen name of this direct message
'''
self._sender_screen_name = sender_screen_name
sender_screen_name = property(GetSenderScreenName, SetSenderScreenName,
doc='The unique sender screen name of this direct message.')
def GetRecipientId(self):
'''Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
'''
return self._recipient_id
def SetRecipientId(self, recipient_id):
'''Set the unique recipient id of this direct message.
Args:
recipient_id:
The unique recipient id of this direct message
'''
self._recipient_id = recipient_id
recipient_id = property(GetRecipientId, SetRecipientId,
doc='The unique recipient id of this direct message.')
def GetRecipientScreenName(self):
'''Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
'''
return self._recipient_screen_name
def SetRecipientScreenName(self, recipient_screen_name):
'''Set the unique recipient screen name of this direct message.
Args:
recipient_screen_name:
The unique recipient screen name of this direct message
'''
self._recipient_screen_name = recipient_screen_name
recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName,
doc='The unique recipient screen name of this direct message.')
def GetText(self):
'''Get the text of this direct message.
Returns:
The text of this direct message.
'''
return self._text
def SetText(self, text):
'''Set the text of this direct message.
Args:
text:
The text of this direct message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this direct message')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
'''
return DirectMessage(created_at=data.get('created_at', None),
recipient_id=data.get('recipient_id', None),
sender_id=data.get('sender_id', None),
text=data.get('text', None),
sender_screen_name=data.get('sender_screen_name', None),
id=data.get('id', None),
recipient_screen_name=data.get('recipient_screen_name', None))
class Hashtag(object):
''' A class representing a twitter hashtag
'''
def __init__(self,
text=None):
self.text = text
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Hashtag instance
'''
return Hashtag(text = data.get('text', None))
class Trend(object):
''' A class representing a trending topic
'''
def __init__(self, name=None, query=None, timestamp=None):
self.name = name
self.query = query
self.timestamp = timestamp
def __str__(self):
return 'Name: %s\nQuery: %s\nTimestamp: %s\n' % (self.name, self.query, self.timestamp)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.name == other.name and \
self.query == other.query and \
self.timestamp == other.timestamp
except AttributeError:
return False
@staticmethod
def NewFromJsonDict(data, timestamp = None):
'''Create a new instance based on a JSON dict
Args:
data:
A JSON dict
timestamp:
Gets set as the timestamp property of the new object
Returns:
A twitter.Trend object
'''
return Trend(name=data.get('name', None),
query=data.get('query', None),
timestamp=timestamp)
class Url(object):
'''A class representing an URL contained in a tweet'''
def __init__(self,
url=None,
expanded_url=None):
self.url = url
self.expanded_url = expanded_url
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Url instance
'''
return Url(url=data.get('url', None),
expanded_url=data.get('expanded_url', None))
class Api(object):
'''A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch the most recently posted public twitter status messages:
>>> statuses = api.GetPublicTimeline()
>>> print [s.user.name for s in statuses]
[u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #...
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print [s.text for s in statuses]
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print [u.name for u in users]
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print status.text
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriendsTimeline(user)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.GetSentDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.GetUserByEmail(email)
>>> api.VerifyCredentials()
'''
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
shortner=None,
base_url=None,
use_gzip_compression=False,
debugHTTP=False):
'''Instantiate a new twitter.Api object.
Args:
consumer_key:
Your Twitter user's consumer_key.
consumer_secret:
Your Twitter user's consumer_secret.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
input_encoding:
The encoding used to encode input strings. [Optional]
request_header:
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
shortner:
The shortner instance to use. Defaults to None.
See shorten_url.py for an example shortner. [Optional]
base_url:
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com. [Optional]
use_gzip_compression:
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False. [Optional]
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._oauth_consumer = None
self._shortlink_size = 19
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
if base_url is None:
self.base_url = 'https://api.twitter.com/1'
else:
self.base_url = base_url
if consumer_key is not None and (access_token_key is None or
access_token_secret is None):
print >> sys.stderr, 'Twitter now requires an oAuth Access Token for API calls.'
print >> sys.stderr, 'If your using this library from a command line utility, please'
print >> sys.stderr, 'run the the included get_access_token.py tool to generate one.'
raise TwitterError('Twitter requires oAuth Access Token for all API access')
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret)
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None):
'''Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
'''
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
self._oauth_consumer = None
if consumer_key is not None and consumer_secret is not None and \
access_token_key is not None and access_token_secret is not None:
self._signature_method_plaintext = oauth.SignatureMethod_PLAINTEXT()
self._signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
self._oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
self._oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
def ClearCredentials(self):
'''Clear the any credentials for this instance
'''
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._oauth_consumer = None
def GetSearch(self,
term=None,
geocode=None,
since_id=None,
max_id=None,
until=None,
per_page=15,
page=1,
lang=None,
show_user="true",
result_type="mixed",
include_entities=None,
query_users=False):
'''Return twitter search results for a given term.
Args:
term:
term to search by. Optional if you include geocode.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
until:
Returns tweets generated before the given date. Date should be
formatted as YYYY-MM-DD. [Optional]
geocode:
geolocation information in the form (latitude, longitude, radius)
[Optional]
per_page:
number of results to return. Default is 15 [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
lang:
language for results as ISO 639-1 code. Default is None (all languages)
[Optional]
show_user:
prefixes screen name in status
result_type:
Type of result which should be returned. Default is "mixed". Other
valid options are "recent" and "popular". [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
query_users:
If set to False, then all users only have screen_name and
profile_image_url available.
If set to True, all information of users are available,
but it uses lots of request quota, one per status.
Returns:
A sequence of twitter.Status instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if until:
parameters['until'] = until
if lang:
parameters['lang'] = lang
if term is None and geocode is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
parameters['geocode'] = ','.join(map(str, geocode))
if include_entities:
parameters['include_entities'] = 1
parameters['show_user'] = show_user
parameters['rpp'] = per_page
parameters['page'] = page
if result_type in ["mixed", "popular", "recent"]:
parameters['result_type'] = result_type
# Make and send requests
url = 'http://search.twitter.com/search.json'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
results = []
for x in data['results']:
temp = Status.NewFromJsonDict(x)
if query_users:
# Build user object with new request
temp.user = self.GetUser(urllib.quote(x['from_user']))
else:
temp.user = User(screen_name=x['from_user'], profile_image_url=x['profile_image_url'])
results.append(temp)
# Return built list of statuses
return results # [Status.NewFromJsonDict(x) for x in data['results']]
def GetTrendsCurrent(self, exclude=None):
'''Get the current top trending topics
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains the twitter.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
url = '%s/trends/current.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for t in data['trends']:
for item in data['trends'][t]:
trends.append(Trend.NewFromJsonDict(item, timestamp = t))
return trends
def GetTrendsWoeid(self, woeid, exclude=None):
'''Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a Trend.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
url = '%s/trends/%s.json' % (self.base_url, woeid)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp = timestamp))
return trends
def GetTrendsDaily(self, exclude=None, startdate=None):
'''Get the current top trending topics for each hour in a given day
Args:
startdate:
The start date for the report.
Should be in the format YYYY-MM-DD. [Optional]
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 24 entries. Each entry contains the twitter.
Trend elements that were trending at the corresponding hour of the day.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
if not startdate:
startdate = time.strftime('%Y-%m-%d', time.gmtime())
parameters['date'] = startdate
url = '%s/trends/daily.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for i in xrange(24):
trends.append(None)
for t in data['trends']:
idx = int(time.strftime('%H', time.strptime(t, '%Y-%m-%d %H:%M')))
trends[idx] = [Trend.NewFromJsonDict(x, timestamp = t)
for x in data['trends'][t]]
return trends
def GetTrendsWeekly(self, exclude=None, startdate=None):
'''Get the top 30 trending topics for each day in a given week.
Args:
startdate:
The start date for the report.
Should be in the format YYYY-MM-DD. [Optional]
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with each entry contains the twitter.
Trend elements of trending topics for the corresponding day of the week
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
if not startdate:
startdate = time.strftime('%Y-%m-%d', time.gmtime())
parameters['date'] = startdate
url = '%s/trends/weekly.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for i in xrange(7):
trends.append(None)
# use the epochs of the dates as keys for a dictionary
times = dict([(calendar.timegm(time.strptime(t, '%Y-%m-%d')),t)
for t in data['trends']])
cnt = 0
# create the resulting structure ordered by the epochs of the dates
for e in sorted(times.keys()):
trends[cnt] = [Trend.NewFromJsonDict(x, timestamp = times[e])
for x in data['trends'][times[e]]]
cnt +=1
return trends
def GetFriendsTimeline(self,
user=None,
count=None,
page=None,
since_id=None,
retweets=None,
include_entities=None):
'''Fetch the sequence of twitter.Status messages for a user's friends
The twitter.Api instance must be authenticated if the user is private.
Args:
user:
Specifies the ID or screen name of the user for whom to return
the friends_timeline. If not specified then the authenticated
user set in the twitter.Api instance will be used. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 100. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
retweets:
If True, the timeline will contain native retweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
'''
if not user and not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
url = '%s/statuses/friends_timeline' % self.base_url
if user:
url = '%s/%s.json' % (url, user)
else:
url = '%s.json' % url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
parameters['count'] = count
if page is not None:
try:
parameters['page'] = int(page)
except ValueError:
raise TwitterError("'page' must be an integer")
if since_id:
parameters['since_id'] = since_id
if retweets:
parameters['include_rts'] = True
if include_entities:
parameters['include_entities'] = 1
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
id=None,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
page=None,
include_rts=None,
trim_user=None,
include_entities=None,
exclude_replies=None):
'''Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
id:
Specifies the ID or screen name of the user for whom to return
the user_timeline. [Optional]
user_id:
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
trim_user:
If True, statuses will only contain the numerical user ID only.
Otherwise a full user object will be returned for each status.
[Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
exclude_replies:
If True, this will prevent replies from appearing in the returned
timeline. Using exclude_replies with the count parameter will mean you
will receive up-to count tweets - this is because the count parameter
retrieves that many tweets before filtering out retweets and replies.
This parameter is only supported for JSON and XML responses. [Optional]
Returns:
A sequence of Status instances, one for each message up to count
'''
parameters = {}
if id:
url = '%s/statuses/user_timeline/%s.json' % (self.base_url, id)
elif user_id:
url = '%s/statuses/user_timeline.json?user_id=%d' % (self.base_url, user_id)
elif screen_name:
url = ('%s/statuses/user_timeline.json?screen_name=%s' % (self.base_url,
screen_name))
elif not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = '%s/statuses/user_timeline.json' % self.base_url
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if page:
try:
parameters['page'] = int(page)
except:
raise TwitterError("page must be an integer")
if include_rts:
parameters['include_rts'] = 1
if include_entities:
parameters['include_entities'] = 1
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self, id, include_entities=None):
'''Returns a single status message.
The twitter.Api instance must be authenticated if the
status message is private.
Args:
id:
The numeric ID of the status you are trying to retrieve.
include_entities:
If True, each tweet will include a node called "entities".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an long integer")
parameters = {}
if include_entities:
parameters['include_entities'] = 1
url = '%s/statuses/show/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyStatus(self, id):
'''Destroys the status specified by the required ID parameter.
The twitter.Api instance must be authenticated and the
authenticating user must be the author of the specified status.
Args:
id:
The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an integer")
url = '%s/statuses/destroy/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, post_data={'id': id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
@classmethod
def _calculate_status_length(cls, status, linksize=19):
dummy_link_replacement = 'https://-%d-chars%s/' % (linksize, '-'*(linksize - 18))
shortened = ' '.join([x if not (x.startswith('http://') or
x.startswith('https://'))
else
dummy_link_replacement
for x in status.split(' ')])
return len(shortened)
def PostUpdate(self, status, in_reply_to_status_id=None, latitude=None, longitude=None):
'''Post a twitter status message from the authenticated user.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
Must be less than or equal to 140 characters.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
latitude:
Latitude coordinate of the tweet in degrees. Will only work
in conjunction with longitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
longitude:
Longitude coordinate of the tweet in degrees. Will only work
in conjunction with latitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
Returns:
A twitter.Status instance representing the message posted.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, unicode) or self._input_encoding is None:
u_status = status
else:
u_status = unicode(status, self._input_encoding)
if self._calculate_status_length(u_status, self._shortlink_size) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to %d characters. "
"Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
if in_reply_to_status_id:
data['in_reply_to_status_id'] = in_reply_to_status_id
if latitude != None and longitude != None:
data['lat'] = str(latitude)
data['long'] = str(longitude)
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def PostUpdates(self, status, continuation=None, **kwargs):
'''Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
'''
results = list()
if continuation is None:
continuation = ''
line_length = CHARACTER_LIMIT - len(continuation)
lines = textwrap.wrap(status, line_length)
for line in lines[0:-1]:
results.append(self.PostUpdate(line + continuation, **kwargs))
results.append(self.PostUpdate(lines[-1], **kwargs))
return results
def GetUserRetweets(self, count=None, since_id=None, max_id=None, include_entities=False):
'''Fetch the sequence of retweets made by a single user.
The twitter.Api instance must be authenticated.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
'''
url = '%s/statuses/retweeted_by_me.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if include_entities:
parameters['include_entities'] = True
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetReplies(self, since=None, since_id=None, page=None):
'''Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
since:
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
'''
url = '%s/statuses/replies.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetRetweets(self, statusid):
'''Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid:
The ID of the tweet for which retweets should be searched for
Returns:
A list of twitter.Status instances, which are retweets of statusid
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instsance must be authenticated.")
url = '%s/statuses/retweets/%s.json?include_entities=true&include_rts=true' % (self.base_url, statusid)
parameters = {}
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetRetweetsOfMe(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
include_entities=True,
include_user_entities=True):
'''Returns up to 100 of the most recent tweets of the user that have been
retweeted by others.
Args:
count:
The number of retweets to retrieve, up to 100. If omitted, 20 is
assumed.
since_id:
Returns results with an ID greater than (newer than) this ID.
max_id:
Returns results with an ID less than or equal to this ID.
trim_user:
When True, the user object for each tweet will only be an ID.
include_entities:
When True, the tweet entities will be included.
include_user_entities:
When True, the user entities will be included.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/retweets_of_me.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if trim_user:
parameters['trim_user'] = trim_user
if not include_entities:
parameters['include_entities'] = include_entities
if not include_user_entities:
parameters['include_user_entities'] = include_user_entities
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetFriends(self, user=None, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each friend.
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/statuses/friends/%s.json' % (self.base_url, user)
else:
url = '%s/statuses/friends.json' % self.base_url
result = []
parameters = {}
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def GetFriendIDs(self, user=None, cursor=-1):
'''Returns a list of twitter user id's for every person
the specified user is following.
Args:
user:
The id or screen_name of the user to retrieve the id list for
[Optional]
Returns:
A list of integers, one for each user id.
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/friends/ids/%s.json' % (self.base_url, user)
else:
url = '%s/friends/ids.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return data
def GetFollowerIDs(self, user=None, cursor=-1):
'''Returns a list of twitter user id's for every person
that is following the specified user.
Args:
user:
The id or screen_name of the user to retrieve the id list for
[Optional]
Returns:
A list of integers, one for each user id.
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/followers/ids/%s.json' % (self.base_url, user)
else:
url = '%s/followers/ids.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return data
def GetFollowers(self, user=None, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Args:
cursor:
Specifies the Twitter API Cursor location to start at. [Optional]
Note: there are pagination limits.
Returns:
A sequence of twitter.User instances, one for each follower
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/statuses/followers/%s.json' % (self.base_url, user.GetId())
else:
url = '%s/statuses/followers.json' % self.base_url
result = []
parameters = {}
while True:
parameters = { 'cursor': cursor }
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def GetFeatured(self):
'''Fetch the sequence of twitter.User instances featured on twitter.com
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances
'''
url = '%s/statuses/featured.json' % self.base_url
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data]
def UsersLookup(self, user_id=None, screen_name=None, users=None):
'''Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
The twitter.Api instance must be authenticated.
Args:
user_id:
A list of user_ids to retrieve extended information.
[Optional]
screen_name:
A list of screen_names to retrieve extended information.
[Optional]
users:
A list of twitter.User objects to retrieve extended information.
[Optional]
Returns:
A list of twitter.User objects for the requested users
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if not user_id and not screen_name and not users:
raise TwitterError("Specify at least one of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join(["%s" % u for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
json = self._FetchUrl(url, parameters=parameters)
try:
data = self._ParseAndCheckTwitter(json)
except TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
data = []
else:
raise
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self, user):
'''Returns a single user.
The twitter.Api instance must be authenticated.
Args:
user: The twitter name or id of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show/%s.json' % (self.base_url, user)
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def GetDirectMessages(self, since=None, since_id=None, page=None):
'''Returns a list of the direct messages sent to the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def GetSentDirectMessages(self, since=None, since_id=None, page=None):
'''Returns a list of the direct messages sent by the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages/sent.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self, user, text):
'''Post a twitter direct message from the authenticated user
The twitter.Api instance must be authenticated.
Args:
user: The ID or screen name of the recipient user.
text: The message text to be posted. Must be less than 140 characters.
Returns:
A twitter.DirectMessage instance representing the message posted
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text, 'user': user}
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id):
'''Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
'''
url = '%s/direct_messages/destroy/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, post_data={'id': id})
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user):
'''Befriends the user specified in the user parameter as the authenticating user.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user to befriend.
Returns:
A twitter.User instance representing the befriended user.
'''
url = '%s/friendships/create/%s.json' % (self.base_url, user)
json = self._FetchUrl(url, post_data={'user': user})
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def DestroyFriendship(self, user):
'''Discontinues friendship with the user specified in the user parameter.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user with whom to discontinue friendship.
Returns:
A twitter.User instance representing the discontinued friend.
'''
url = '%s/friendships/destroy/%s.json' % (self.base_url, user)
json = self._FetchUrl(url, post_data={'user': user})
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def CreateFavorite(self, status):
'''Favorites the status specified in the status parameter as the authenticating user.
Returns the favorite status when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status instance to mark as a favorite.
Returns:
A twitter.Status instance representing the newly-marked favorite.
'''
url = '%s/favorites/create/%s.json' % (self.base_url, status.id)
json = self._FetchUrl(url, post_data={'id': status.id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyFavorite(self, status):
'''Un-favorites the status specified in the ID parameter as the authenticating user.
Returns the un-favorited status in the requested format when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status to unmark as a favorite.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
'''
url = '%s/favorites/destroy/%s.json' % (self.base_url, status.id)
json = self._FetchUrl(url, post_data={'id': status.id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user=None,
page=None):
'''Return a list of Status objects representing favorited tweets.
By default, returns the (up to) 20 most recent tweets for the
authenticated user.
Args:
user:
The twitter name or id of the user whose favorites you are fetching.
If not specified, defaults to the authenticated user. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
'''
parameters = {}
if page:
parameters['page'] = page
if user:
url = '%s/favorites/%s.json' % (self.base_url, user)
elif not user and not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = '%s/favorites.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
since_id=None,
max_id=None,
page=None):
'''Returns the 20 most recent mentions (status containing @twitterID)
for the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
'''
url = '%s/statuses/mentions.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def CreateList(self, user, name, mode=None, description=None):
'''Creates a new list with the give name
The twitter.Api instance must be authenticated.
Args:
user:
Twitter name to create the list for
name:
New name for the list
mode:
'public' or 'private'.
Defaults to 'public'. [Optional]
description:
Description of the list. [Optional]
Returns:
A twitter.List instance representing the new list
'''
url = '%s/%s/lists.json' % (self.base_url, user)
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
json = self._FetchUrl(url, post_data=parameters)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroyList(self, user, id):
'''Destroys the list from the given user
The twitter.Api instance must be authenticated.
Args:
user:
The user to remove the list from.
id:
The slug or id of the list to remove.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/%s/lists/%s.json' % (self.base_url, user, id)
json = self._FetchUrl(url, post_data={'_method': 'DELETE'})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def CreateSubscription(self, owner, list):
'''Creates a subscription to a list by the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner:
User name or id of the owner of the list being subscribed to.
list:
The slug or list id to subscribe the user to
Returns:
A twitter.List instance representing the list subscribed to
'''
url = '%s/%s/%s/subscribers.json' % (self.base_url, owner, list)
json = self._FetchUrl(url, post_data={'list_id': list})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroySubscription(self, owner, list):
'''Destroys the subscription to a list for the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner:
The user id or screen name of the user that owns the
list that is to be unsubscribed from
list:
The slug or list id of the list to unsubscribe from
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/%s/%s/subscribers.json' % (self.base_url, owner, list)
json = self._FetchUrl(url, post_data={'_method': 'DELETE', 'list_id': list})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def GetSubscriptions(self, user, cursor=-1):
'''Fetch the sequence of Lists that the given user is subscribed to
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/%s/lists/subscriptions.json' % (self.base_url, user)
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetLists(self, user, cursor=-1):
'''Fetch the sequence of lists for a user.
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user whose friends you are fetching.
If the passed in user is the same as the authenticated user
then you will also receive private list data.
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/%s/lists.json' % (self.base_url, user)
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetUserByEmail(self, email):
'''Returns a single user by email address.
Args:
email:
The email of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show.json?email=%s' % (self.base_url, email)
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def VerifyCredentials(self):
'''Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
'''
if not self._oauth_consumer:
raise TwitterError("Api instance must first be given user credentials.")
url = '%s/account/verify_credentials.json' % self.base_url
try:
json = self._FetchUrl(url, no_cache=True)
except urllib2.HTTPError, http_error:
if http_error.code == httplib.UNAUTHORIZED:
return None
else:
raise http_error
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
'''Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
'''
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
'''Override the default user agent
Args:
user_agent:
A string that should be send to the server as the User-agent
'''
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
'''Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
'''
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
'''Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server. New source values are authorized on a case by
case basis by the Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
'''
self._default_params['source'] = source
def GetRateLimitStatus(self):
'''Fetch the rate limit status for the currently authorized user.
Returns:
A dictionary containing the time the limit will reset (reset_time),
the number of remaining hits allowed before the reset (remaining_hits),
the number of hits allowed in a 60-minute period (hourly_limit), and
the time of the reset in seconds since The Epoch (reset_time_in_seconds).
'''
url = '%s/account/rate_limit_status.json' % self.base_url
json = self._FetchUrl(url, no_cache=True)
data = self._ParseAndCheckTwitter(json)
return data
def MaximumHitFrequency(self):
'''Determines the minimum number of seconds that a program must wait
before hitting the server again without exceeding the rate_limit
imposed for the currently authenticated user.
Returns:
The minimum second interval that a program must use so as to not
exceed the rate_limit imposed for the user.
'''
rate_status = self.GetRateLimitStatus()
reset_time = rate_status.get('reset_time', None)
limit = rate_status.get('remaining_hits', None)
if reset_time:
# put the reset time into a datetime object
reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
# find the difference in time between now and the reset time + 1 hour
delta = reset + datetime.timedelta(hours=1) - datetime.datetime.utcnow()
if not limit:
return int(delta.seconds)
# determine the minimum number of seconds allowed as a regular interval
max_frequency = int(delta.seconds / limit) + 1
# return the number of seconds
return max_frequency
return 60
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _DecompressGzippedResponse(self, response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=StringIO.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _EncodePostData(self, post_data):
'''Return a string in key=value&key=value form
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if post_data is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()]))
def _ParseAndCheckTwitter(self, json):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error. This is a purely
defensive check because during some Twitter network outages
it will return an HTML failwhale page."""
try:
data = simplejson.loads(json)
self._CheckForTwitterError(data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json:
raise TwitterError("Capacity Error")
if "<title>Twitter / Error</title>" in json:
raise TwitterError("Technical Error")
raise TwitterError("json decoding")
return data
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data:
A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _FetchUrl(self,
url,
post_data=None,
parameters=None,
no_cache=None,
use_gzip_compression=None):
'''Fetch a URL, optionally caching for a specified time.
Args:
url:
The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs.
If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [Optional]
no_cache:
If true, overrides the cache on the current request
use_gzip_compression:
If True, tells the server to gzip-compress the response.
It does not apply to POST requests.
Defaults to None, which will get the value to use from
the instance variable self._use_gzip [Optional]
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
if post_data:
http_method = "POST"
else:
http_method = "GET"
if self._debugHTTP:
_debug = 1
else:
_debug = 0
http_handler = self._urllib.HTTPHandler(debuglevel=_debug)
https_handler = self._urllib.HTTPSHandler(debuglevel=_debug)
http_proxy = os.environ.get('http_proxy')
https_proxy = os.environ.get('https_proxy')
if http_proxy is None or https_proxy is None :
proxy_status = False
else :
proxy_status = True
opener = self._urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
if proxy_status is True :
proxy_handler = self._urllib.ProxyHandler({'http':str(http_proxy),'https': str(https_proxy)})
opener.add_handler(proxy_handler)
if use_gzip_compression is None:
use_gzip = self._use_gzip
else:
use_gzip = use_gzip_compression
# Set up compression
if use_gzip and not post_data:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
if self._oauth_consumer is not None:
if post_data and http_method == "POST":
parameters = post_data.copy()
req = oauth.Request.from_consumer_and_token(self._oauth_consumer,
token=self._oauth_token,
http_method=http_method,
http_url=url, parameters=parameters)
req.sign_request(self._signature_method_hmac_sha1, self._oauth_consumer, self._oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
else:
url = self._BuildUrl(url, extra_params=extra_params)
encoded_post_data = self._EncodePostData(post_data)
# Open and return the URL immediately if we're not going to cache
if encoded_post_data or no_cache or not self._cache or not self._cache_timeout:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
opener.close()
else:
# Unique keys are a combination of the url and the oAuth Consumer Key
if self._consumer_key:
key = self._consumer_key + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
try:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
self._cache.Set(key, url_data)
except urllib2.HTTPError, e:
print e
opener.close()
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self,root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self,key,data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self,key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self,key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self,hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-#
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit tests for the twitter.py library'''
__author__ = 'python-twitter@googlegroups.com'
import os
import simplejson
import time
import calendar
import unittest
import urllib
import twitter
class StatusTest(unittest.TestCase):
SAMPLE_JSON = '''{"created_at": "Fri Jan 26 23:17:14 +0000 2007", "id": 4391023, "text": "A l\u00e9gp\u00e1rn\u00e1s haj\u00f3m tele van angoln\u00e1kkal.", "user": {"description": "Canvas. JC Penny. Three ninety-eight.", "id": 718443, "location": "Okinawa, Japan", "name": "Kesuke Miyagi", "profile_image_url": "https://twitter.com/system/user/profile_image/718443/normal/kesuke.png", "screen_name": "kesuke", "url": "https://twitter.com/kesuke"}}'''
def _GetSampleUser(self):
return twitter.User(id=718443,
name='Kesuke Miyagi',
screen_name='kesuke',
description=u'Canvas. JC Penny. Three ninety-eight.',
location='Okinawa, Japan',
url='https://twitter.com/kesuke',
profile_image_url='https://twitter.com/system/user/pro'
'file_image/718443/normal/kesuke.pn'
'g')
def _GetSampleStatus(self):
return twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007',
id=4391023,
text=u'A légpárnás hajóm tele van angolnákkal.',
user=self._GetSampleUser())
def testInit(self):
'''Test the twitter.Status constructor'''
status = twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007',
id=4391023,
text=u'A légpárnás hajóm tele van angolnákkal.',
user=self._GetSampleUser())
def testGettersAndSetters(self):
'''Test all of the twitter.Status getters and setters'''
status = twitter.Status()
status.SetId(4391023)
self.assertEqual(4391023, status.GetId())
created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1))
status.SetCreatedAt('Fri Jan 26 23:17:14 +0000 2007')
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.GetCreatedAt())
self.assertEqual(created_at, status.GetCreatedAtInSeconds())
status.SetNow(created_at + 10)
self.assertEqual("about 10 seconds ago", status.GetRelativeCreatedAt())
status.SetText(u'A légpárnás hajóm tele van angolnákkal.')
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.',
status.GetText())
status.SetUser(self._GetSampleUser())
self.assertEqual(718443, status.GetUser().id)
def testProperties(self):
'''Test all of the twitter.Status properties'''
status = twitter.Status()
status.id = 1
self.assertEqual(1, status.id)
created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1))
status.created_at = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.created_at)
self.assertEqual(created_at, status.created_at_in_seconds)
status.now = created_at + 10
self.assertEqual('about 10 seconds ago', status.relative_created_at)
status.user = self._GetSampleUser()
self.assertEqual(718443, status.user.id)
def _ParseDate(self, string):
return calendar.timegm(time.strptime(string, '%b %d %H:%M:%S %Y'))
def testRelativeCreatedAt(self):
'''Test various permutations of Status relative_created_at'''
status = twitter.Status(created_at='Fri Jan 01 12:00:00 +0000 2007')
status.now = self._ParseDate('Jan 01 12:00:00 2007')
self.assertEqual('about a second ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:01 2007')
self.assertEqual('about a second ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:02 2007')
self.assertEqual('about 2 seconds ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:05 2007')
self.assertEqual('about 5 seconds ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:50 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:01:00 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:01:10 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:02:00 2007')
self.assertEqual('about 2 minutes ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:31:50 2007')
self.assertEqual('about 31 minutes ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:50:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 13:00:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 13:10:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 14:00:00 2007')
self.assertEqual('about 2 hours ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 19:00:00 2007')
self.assertEqual('about 7 hours ago', status.relative_created_at)
status.now = self._ParseDate('Jan 02 11:30:00 2007')
self.assertEqual('about a day ago', status.relative_created_at)
status.now = self._ParseDate('Jan 04 12:00:00 2007')
self.assertEqual('about 3 days ago', status.relative_created_at)
status.now = self._ParseDate('Feb 04 12:00:00 2007')
self.assertEqual('about 34 days ago', status.relative_created_at)
def testAsJsonString(self):
'''Test the twitter.Status AsJsonString method'''
self.assertEqual(StatusTest.SAMPLE_JSON,
self._GetSampleStatus().AsJsonString())
def testAsDict(self):
'''Test the twitter.Status AsDict method'''
status = self._GetSampleStatus()
data = status.AsDict()
self.assertEqual(4391023, data['id'])
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', data['created_at'])
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', data['text'])
self.assertEqual(718443, data['user']['id'])
def testEq(self):
'''Test the twitter.Status __eq__ method'''
status = twitter.Status()
status.created_at = 'Fri Jan 26 23:17:14 +0000 2007'
status.id = 4391023
status.text = u'A légpárnás hajóm tele van angolnákkal.'
status.user = self._GetSampleUser()
self.assertEqual(status, self._GetSampleStatus())
def testNewFromJsonDict(self):
'''Test the twitter.Status NewFromJsonDict method'''
data = simplejson.loads(StatusTest.SAMPLE_JSON)
status = twitter.Status.NewFromJsonDict(data)
self.assertEqual(self._GetSampleStatus(), status)
class UserTest(unittest.TestCase):
SAMPLE_JSON = '''{"description": "Indeterminate things", "id": 673483, "location": "San Francisco, CA", "name": "DeWitt", "profile_image_url": "https://twitter.com/system/user/profile_image/673483/normal/me.jpg", "screen_name": "dewitt", "status": {"created_at": "Fri Jan 26 17:28:19 +0000 2007", "id": 4212713, "text": "\\"Select all\\" and archive your Gmail inbox. The page loads so much faster!"}, "url": "http://unto.net/"}'''
def _GetSampleStatus(self):
return twitter.Status(created_at='Fri Jan 26 17:28:19 +0000 2007',
id=4212713,
text='"Select all" and archive your Gmail inbox. '
' The page loads so much faster!')
def _GetSampleUser(self):
return twitter.User(id=673483,
name='DeWitt',
screen_name='dewitt',
description=u'Indeterminate things',
location='San Francisco, CA',
url='http://unto.net/',
profile_image_url='https://twitter.com/system/user/prof'
'ile_image/673483/normal/me.jpg',
status=self._GetSampleStatus())
def testInit(self):
'''Test the twitter.User constructor'''
user = twitter.User(id=673483,
name='DeWitt',
screen_name='dewitt',
description=u'Indeterminate things',
url='https://twitter.com/dewitt',
profile_image_url='https://twitter.com/system/user/prof'
'ile_image/673483/normal/me.jpg',
status=self._GetSampleStatus())
def testGettersAndSetters(self):
'''Test all of the twitter.User getters and setters'''
user = twitter.User()
user.SetId(673483)
self.assertEqual(673483, user.GetId())
user.SetName('DeWitt')
self.assertEqual('DeWitt', user.GetName())
user.SetScreenName('dewitt')
self.assertEqual('dewitt', user.GetScreenName())
user.SetDescription('Indeterminate things')
self.assertEqual('Indeterminate things', user.GetDescription())
user.SetLocation('San Francisco, CA')
self.assertEqual('San Francisco, CA', user.GetLocation())
user.SetProfileImageUrl('https://twitter.com/system/user/profile_im'
'age/673483/normal/me.jpg')
self.assertEqual('https://twitter.com/system/user/profile_image/673'
'483/normal/me.jpg', user.GetProfileImageUrl())
user.SetStatus(self._GetSampleStatus())
self.assertEqual(4212713, user.GetStatus().id)
def testProperties(self):
'''Test all of the twitter.User properties'''
user = twitter.User()
user.id = 673483
self.assertEqual(673483, user.id)
user.name = 'DeWitt'
self.assertEqual('DeWitt', user.name)
user.screen_name = 'dewitt'
self.assertEqual('dewitt', user.screen_name)
user.description = 'Indeterminate things'
self.assertEqual('Indeterminate things', user.description)
user.location = 'San Francisco, CA'
self.assertEqual('San Francisco, CA', user.location)
user.profile_image_url = 'https://twitter.com/system/user/profile_i' \
'mage/673483/normal/me.jpg'
self.assertEqual('https://twitter.com/system/user/profile_image/6734'
'83/normal/me.jpg', user.profile_image_url)
self.status = self._GetSampleStatus()
self.assertEqual(4212713, self.status.id)
def testAsJsonString(self):
'''Test the twitter.User AsJsonString method'''
self.assertEqual(UserTest.SAMPLE_JSON,
self._GetSampleUser().AsJsonString())
def testAsDict(self):
'''Test the twitter.User AsDict method'''
user = self._GetSampleUser()
data = user.AsDict()
self.assertEqual(673483, data['id'])
self.assertEqual('DeWitt', data['name'])
self.assertEqual('dewitt', data['screen_name'])
self.assertEqual('Indeterminate things', data['description'])
self.assertEqual('San Francisco, CA', data['location'])
self.assertEqual('https://twitter.com/system/user/profile_image/6734'
'83/normal/me.jpg', data['profile_image_url'])
self.assertEqual('http://unto.net/', data['url'])
self.assertEqual(4212713, data['status']['id'])
def testEq(self):
'''Test the twitter.User __eq__ method'''
user = twitter.User()
user.id = 673483
user.name = 'DeWitt'
user.screen_name = 'dewitt'
user.description = 'Indeterminate things'
user.location = 'San Francisco, CA'
user.profile_image_url = 'https://twitter.com/system/user/profile_image/67' \
'3483/normal/me.jpg'
user.url = 'http://unto.net/'
user.status = self._GetSampleStatus()
self.assertEqual(user, self._GetSampleUser())
def testNewFromJsonDict(self):
'''Test the twitter.User NewFromJsonDict method'''
data = simplejson.loads(UserTest.SAMPLE_JSON)
user = twitter.User.NewFromJsonDict(data)
self.assertEqual(self._GetSampleUser(), user)
class TrendTest(unittest.TestCase):
SAMPLE_JSON = '''{"name": "Kesuke Miyagi", "query": "Kesuke Miyagi"}'''
def _GetSampleTrend(self):
return twitter.Trend(name='Kesuke Miyagi',
query='Kesuke Miyagi',
timestamp='Fri Jan 26 23:17:14 +0000 2007')
def testInit(self):
'''Test the twitter.Trend constructor'''
trend = twitter.Trend(name='Kesuke Miyagi',
query='Kesuke Miyagi',
timestamp='Fri Jan 26 23:17:14 +0000 2007')
def testProperties(self):
'''Test all of the twitter.Trend properties'''
trend = twitter.Trend()
trend.name = 'Kesuke Miyagi'
self.assertEqual('Kesuke Miyagi', trend.name)
trend.query = 'Kesuke Miyagi'
self.assertEqual('Kesuke Miyagi', trend.query)
trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', trend.timestamp)
def testNewFromJsonDict(self):
'''Test the twitter.Trend NewFromJsonDict method'''
data = simplejson.loads(TrendTest.SAMPLE_JSON)
trend = twitter.Trend.NewFromJsonDict(data, timestamp='Fri Jan 26 23:17:14 +0000 2007')
self.assertEqual(self._GetSampleTrend(), trend)
def testEq(self):
'''Test the twitter.Trend __eq__ method'''
trend = twitter.Trend()
trend.name = 'Kesuke Miyagi'
trend.query = 'Kesuke Miyagi'
trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual(trend, self._GetSampleTrend())
class FileCacheTest(unittest.TestCase):
def testInit(self):
"""Test the twitter._FileCache constructor"""
cache = twitter._FileCache()
self.assert_(cache is not None, 'cache is None')
def testSet(self):
"""Test the twitter._FileCache.Set method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cache.Remove("foo")
def testRemove(self):
"""Test the twitter._FileCache.Remove method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cache.Remove("foo")
data = cache.Get("foo")
self.assertEqual(data, None, 'data is not None')
def testGet(self):
"""Test the twitter._FileCache.Get method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
data = cache.Get("foo")
self.assertEqual('Hello World!', data)
cache.Remove("foo")
def testGetCachedTime(self):
"""Test the twitter._FileCache.GetCachedTime method"""
now = time.time()
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cached_time = cache.GetCachedTime("foo")
delta = cached_time - now
self.assert_(delta <= 1,
'Cached time differs from clock time by more than 1 second.')
cache.Remove("foo")
class ApiTest(unittest.TestCase):
def setUp(self):
self._urllib = MockUrllib()
api = twitter.Api(consumer_key='CONSUMER_KEY',
consumer_secret='CONSUMER_SECRET',
access_token_key='OAUTH_TOKEN',
access_token_secret='OAUTH_SECRET',
cache=None)
api.SetUrllib(self._urllib)
self._api = api
def testTwitterError(self):
'''Test that twitter responses containing an error message are wrapped.'''
self._AddHandler('https://api.twitter.com/1/statuses/user_timeline.json',
curry(self._OpenTestData, 'public_timeline_error.json'))
# Manually try/catch so we can check the exception's value
try:
statuses = self._api.GetUserTimeline()
except twitter.TwitterError, error:
# If the error message matches, the test passes
self.assertEqual('test error', error.message)
else:
self.fail('TwitterError expected')
def testGetUserTimeline(self):
'''Test the twitter.Api GetUserTimeline method'''
self._AddHandler('https://api.twitter.com/1/statuses/user_timeline/kesuke.json?count=1',
curry(self._OpenTestData, 'user_timeline-kesuke.json'))
statuses = self._api.GetUserTimeline('kesuke', count=1)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(89512102, statuses[0].id)
self.assertEqual(718443, statuses[0].user.id)
def testGetFriendsTimeline(self):
'''Test the twitter.Api GetFriendsTimeline method'''
self._AddHandler('https://api.twitter.com/1/statuses/friends_timeline/kesuke.json',
curry(self._OpenTestData, 'friends_timeline-kesuke.json'))
statuses = self._api.GetFriendsTimeline('kesuke')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(20, len(statuses))
self.assertEqual(718443, statuses[0].user.id)
def testGetStatus(self):
'''Test the twitter.Api GetStatus method'''
self._AddHandler('https://api.twitter.com/1/statuses/show/89512102.json',
curry(self._OpenTestData, 'show-89512102.json'))
status = self._api.GetStatus(89512102)
self.assertEqual(89512102, status.id)
self.assertEqual(718443, status.user.id)
def testDestroyStatus(self):
'''Test the twitter.Api DestroyStatus method'''
self._AddHandler('https://api.twitter.com/1/statuses/destroy/103208352.json',
curry(self._OpenTestData, 'status-destroy.json'))
status = self._api.DestroyStatus(103208352)
self.assertEqual(103208352, status.id)
def testPostUpdate(self):
'''Test the twitter.Api PostUpdate method'''
self._AddHandler('https://api.twitter.com/1/statuses/update.json',
curry(self._OpenTestData, 'update.json'))
status = self._api.PostUpdate(u'Моё судно на воздушной подушке полно угрей'.encode('utf8'))
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
def testPostUpdateLatLon(self):
'''Test the twitter.Api PostUpdate method, when used in conjunction with latitude and longitude'''
self._AddHandler('https://api.twitter.com/1/statuses/update.json',
curry(self._OpenTestData, 'update_latlong.json'))
#test another update with geo parameters, again test somewhat arbitrary
status = self._api.PostUpdate(u'Моё судно на воздушной подушке полно угрей'.encode('utf8'), latitude=54.2, longitude=-2)
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
self.assertEqual(u'Point',status.GetGeo()['type'])
self.assertEqual(26.2,status.GetGeo()['coordinates'][0])
self.assertEqual(127.5,status.GetGeo()['coordinates'][1])
def testGetReplies(self):
'''Test the twitter.Api GetReplies method'''
self._AddHandler('https://api.twitter.com/1/statuses/replies.json?page=1',
curry(self._OpenTestData, 'replies.json'))
statuses = self._api.GetReplies(page=1)
self.assertEqual(36657062, statuses[0].id)
def testGetRetweetsOfMe(self):
'''Test the twitter.API GetRetweetsOfMe method'''
self._AddHandler('https://api.twitter.com/1/statuses/retweets_of_me.json',
curry(self._OpenTestData, 'retweets_of_me.json'))
retweets = self._api.GetRetweetsOfMe()
self.assertEqual(253650670274637824, retweets[0].id)
def testGetFriends(self):
'''Test the twitter.Api GetFriends method'''
self._AddHandler('https://api.twitter.com/1/statuses/friends.json?cursor=123',
curry(self._OpenTestData, 'friends.json'))
users = self._api.GetFriends(cursor=123)
buzz = [u.status for u in users if u.screen_name == 'buzz']
self.assertEqual(89543882, buzz[0].id)
def testGetFollowers(self):
'''Test the twitter.Api GetFollowers method'''
self._AddHandler('https://api.twitter.com/1/statuses/followers.json?cursor=-1',
curry(self._OpenTestData, 'followers.json'))
users = self._api.GetFollowers()
# This is rather arbitrary, but spot checking is better than nothing
alexkingorg = [u.status for u in users if u.screen_name == 'alexkingorg']
self.assertEqual(89554432, alexkingorg[0].id)
def testGetFeatured(self):
'''Test the twitter.Api GetFeatured method'''
self._AddHandler('https://api.twitter.com/1/statuses/featured.json',
curry(self._OpenTestData, 'featured.json'))
users = self._api.GetFeatured()
# This is rather arbitrary, but spot checking is better than nothing
stevenwright = [u.status for u in users if u.screen_name == 'stevenwright']
self.assertEqual(86991742, stevenwright[0].id)
def testGetDirectMessages(self):
'''Test the twitter.Api GetDirectMessages method'''
self._AddHandler('https://api.twitter.com/1/direct_messages.json?page=1',
curry(self._OpenTestData, 'direct_messages.json'))
statuses = self._api.GetDirectMessages(page=1)
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', statuses[0].text)
def testPostDirectMessage(self):
'''Test the twitter.Api PostDirectMessage method'''
self._AddHandler('https://api.twitter.com/1/direct_messages/new.json',
curry(self._OpenTestData, 'direct_messages-new.json'))
status = self._api.PostDirectMessage('test', u'Моё судно на воздушной подушке полно угрей'.encode('utf8'))
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
def testDestroyDirectMessage(self):
'''Test the twitter.Api DestroyDirectMessage method'''
self._AddHandler('https://api.twitter.com/1/direct_messages/destroy/3496342.json',
curry(self._OpenTestData, 'direct_message-destroy.json'))
status = self._api.DestroyDirectMessage(3496342)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, status.sender_id)
def testCreateFriendship(self):
'''Test the twitter.Api CreateFriendship method'''
self._AddHandler('https://api.twitter.com/1/friendships/create/dewitt.json',
curry(self._OpenTestData, 'friendship-create.json'))
user = self._api.CreateFriendship('dewitt')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, user.id)
def testDestroyFriendship(self):
'''Test the twitter.Api DestroyFriendship method'''
self._AddHandler('https://api.twitter.com/1/friendships/destroy/dewitt.json',
curry(self._OpenTestData, 'friendship-destroy.json'))
user = self._api.DestroyFriendship('dewitt')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, user.id)
def testGetUser(self):
'''Test the twitter.Api GetUser method'''
self._AddHandler('https://api.twitter.com/1/users/show/dewitt.json',
curry(self._OpenTestData, 'show-dewitt.json'))
user = self._api.GetUser('dewitt')
self.assertEqual('dewitt', user.screen_name)
self.assertEqual(89586072, user.status.id)
def _AddHandler(self, url, callback):
self._urllib.AddHandler(url, callback)
def _GetTestDataPath(self, filename):
directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(directory, 'testdata')
return os.path.join(test_data_dir, filename)
def _OpenTestData(self, filename):
f = open(self._GetTestDataPath(filename))
# make sure that the returned object contains an .info() method:
# headers are set to {}
return urllib.addinfo(f, {})
class MockUrllib(object):
'''A mock replacement for urllib that hardcodes specific responses.'''
def __init__(self):
self._handlers = {}
self.HTTPBasicAuthHandler = MockHTTPBasicAuthHandler
def AddHandler(self, url, callback):
self._handlers[url] = callback
def build_opener(self, *handlers):
return MockOpener(self._handlers)
def HTTPHandler(self, *args, **kwargs):
return None
def HTTPSHandler(self, *args, **kwargs):
return None
def OpenerDirector(self):
return self.build_opener()
def ProxyHandler(self,*args,**kwargs):
return None
class MockOpener(object):
'''A mock opener for urllib'''
def __init__(self, handlers):
self._handlers = handlers
self._opened = False
def open(self, url, data=None):
if self._opened:
raise Exception('MockOpener already opened.')
# Remove parameters from URL - they're only added by oauth and we
# don't want to test oauth
if '?' in url:
# We split using & and filter on the beginning of each key
# This is crude but we have to keep the ordering for now
(url, qs) = url.split('?')
tokens = [token for token in qs.split('&')
if not token.startswith('oauth')]
if len(tokens) > 0:
url = "%s?%s"%(url, '&'.join(tokens))
if url in self._handlers:
self._opened = True
return self._handlers[url]()
else:
raise Exception('Unexpected URL %s (Checked: %s)' % (url, self._handlers))
def add_handler(self, *args, **kwargs):
pass
def close(self):
if not self._opened:
raise Exception('MockOpener closed before it was opened.')
self._opened = False
class MockHTTPBasicAuthHandler(object):
'''A mock replacement for HTTPBasicAuthHandler'''
def add_password(self, realm, uri, user, passwd):
# TODO(dewitt): Add verification that the proper args are passed
pass
class curry:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(FileCacheTest))
suite.addTests(unittest.makeSuite(StatusTest))
suite.addTests(unittest.makeSuite(UserTest))
suite.addTests(unittest.makeSuite(ApiTest))
return suite
if __name__ == '__main__':
unittest.main()
| Python |
# File: cdf.py
# """ Start debugging
import sys
import os.path
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, this_dir)
# """ End debugging
import cdflib as clib
import os.path
class CDF(object):
"""
This class defines a CDF object.
A CDF object is only bound at most one CDF data file. It supports the
following operations:
1. Open a CDF file.
2. Close a CDF file.
3. Get a list of all global attributes.
4. Retrieve a single global attribute.
5. Retrieve all global attributes.
6. Get a list of all variables.
7. Get a list of variable attributes given one variable.
8. Retrieve a single variable attribute given the attribute name and the
variable.
9. Retrieve all variable attributes given one variable.
10. Retrieve data given one variable.
11. Retrieve the entire data of the CDF file.
"""
def __init__(self):
"""
This function does the follwoing things:
1. Remove .cdf extension in filepath_in if it is there.
3. Set self._filepath and self._opened = False.
4. Open the CDF file.
5. Set self._opened = True
"""
# Initialize attributes.
self._opened = False
self._filepath = None
def __del__(self):
if self._opened:
self.close()
#++++++++++++++++++++++++++++++
def open(self, filepath_in):
# Remove any file extension.
if self._opened is True:
raise IOError, 'There is an opened CDF file linked to ' + \
'this CDF object. Close it before opening another one.'
else:
filepath, ext = os.path.splitext(filepath_in)
if ext == '.cdf':
self._filepath = filepath_in
cdfname = filepath
else:
self._filepath = filepath_in + '.cdf'
cdfname = filepath_in
clib.open(cdfname)
self._opened = True
#++++++++++++++++++++++++++++++
def close(self):
if self._opened:
cdfname = os.path.splitext(self._filepath)[0]
clib.close(cdfname)
self._opened = False
#++++++++++++++++++++++++++++++
def fileinfo(self):
"""Retrieve file information.
"""
if self._opened: return clib.fileinfo()
def filepath(self):
"""Retrieve file path of the current CDF file.
"""
return self._filepath
def datastats(self):
"""Retrieve data information.
"""
if self._opened: return clib.datastats()
def varlist(self):
"""Retrieve a list of all variables.
"""
if self._opened: return clib.varlist()
def data(self, varName):
"""Retrieve data of the given variable name.
"""
if self._opened: return clib.data(varName)
def gAttrList(self):
"""Retrieve the full list of global attributes.
"""
if self._opened: return clib.gAttrList()
def vAttrList(self):
"""Retrieve the full list of global attributes.
"""
if self._opened: return clib.vAttrList()
# #++++++++++++++++++++++++++++++
# def globalMetadata(self):
# if self.globalMetadata is None and self._opened:
# self.globalMetadata = clib.globalMetadata()
| Python |
# File: cdflib_dict.py
"""
This file defines lookup dictionaries for the cdflib module.
"""
#-------------------- Copy of cdf.h -------------------------------------------
CDF_MIN_DIMS = 0 # Min number of dimensions a CDF variable may have
CDF_MAX_DIMS = 10 # Max number of dimensions a CDF variable may have
# * Lengths
CDF_VAR_NAME_LEN = 64
CDF_ATTR_NAME_LEN = 64
CDF_VAR_NAME_LEN256 = 256
CDF_ATTR_NAME_LEN256 = 256
CDF_COPYRIGHT_LEN = 256
CDF_STATUSTEXT_LEN = 120
CDF_PATHNAME_LEN = 512
EPOCH_STRING_LEN = 24
EPOCH1_STRING_LEN = 16
EPOCH2_STRING_LEN = 14
EPOCH3_STRING_LEN = 24
EPOCH4_STRING_LEN = 23
EPOCH16_STRING_LEN = 36
EPOCH16_1_STRING_LEN = 24
EPOCH16_2_STRING_LEN = 14
EPOCH16_3_STRING_LEN = 36
EPOCH16_4_STRING_LEN = 32
TT2000_0_STRING_LEN = 30
TT2000_1_STRING_LEN = 19
TT2000_2_STRING_LEN = 14
TT2000_3_STRING_LEN = 29
EPOCHx_STRING_MAX = 50
EPOCHx_FORMAT_MAX = 68
# * Data types.
CDF_INT1 = 1
CDF_INT2 = 2
CDF_INT4 = 4
CDF_INT8 = 8
CDF_UINT1 = 11
CDF_UINT2 = 12
CDF_UINT4 = 14
CDF_REAL4 = 21
CDF_REAL8 = 22
CDF_EPOCH = 31 # Standard style.
CDF_EPOCH16 = 32 # Extended style.
CDF_TIME_TT2000 = 33 # One more style with leap seconds and
# J2000 base time.
CDF_BYTE = 41 # same as CDF_INT1 (signed)
CDF_FLOAT = 44 # same as CDF_REAL4
CDF_DOUBLE = 45 # same as CDF_REAL8
CDF_CHAR = 51 # a "string" data type
CDF_UCHAR = 52 # a "string" data type
# * Encoding (for data only, everything else is network encoding).
NETWORK_ENCODING = 1
SUN_ENCODING = 2
VAX_ENCODING = 3
DECSTATION_ENCODING = 4
SGi_ENCODING = 5
IBMPC_ENCODING = 6
IBMRS_ENCODING = 7
HOST_ENCODING = 8
PPC_ENCODING = 9
HP_ENCODING = 11
NeXT_ENCODING = 12
ALPHAOSF1_ENCODING = 13
ALPHAVMSd_ENCODING = 14
ALPHAVMSg_ENCODING = 15
ALPHAVMSi_ENCODING = 16
# * Decodings.
NETWORK_DECODING = NETWORK_ENCODING
SUN_DECODING = SUN_ENCODING
VAX_DECODING = VAX_ENCODING
DECSTATION_DECODING = DECSTATION_ENCODING
SGi_DECODING = SGi_ENCODING
IBMPC_DECODING = IBMPC_ENCODING
IBMRS_DECODING = IBMRS_ENCODING
HOST_DECODING = HOST_ENCODING
PPC_DECODING = PPC_ENCODING
MAC_ENCODING = PPC_ENCODING
MAC_DECODING = PPC_ENCODING
HP_DECODING = HP_ENCODING
NeXT_DECODING = NeXT_ENCODING
ALPHAOSF1_DECODING = ALPHAOSF1_ENCODING
ALPHAVMSd_DECODING = ALPHAVMSd_ENCODING
ALPHAVMSg_DECODING = ALPHAVMSg_ENCODING
ALPHAVMSi_DECODING = ALPHAVMSi_ENCODING
# * Variance flags
VARY = -1 # TRUE record or dimension variance flag
NOVARY = 0 # FALSE record or dimension variance flag
# * Majorities
ROW_MAJOR = 1
COLUMN_MAJOR = 2
# * Formats.
SINGLE_FILE = 1
MULTI_FILE = 2
# * Checksum
NO_CHECKSUM = 0
MD5_CHECKSUM = 1
OTHER_CHECKSUM = 2
# * Attribute scopes
GLOBAL_SCOPE = 1
VARIABLE_SCOPE = 2
# * Readonly modes.
READONLYon = -1
READONLYoff = 0
# * Validate data modes.
VALIDATEFILEon = -1
VALIDATEFILEoff = 0
# * zModes.
zMODEoff = 0
zMODEon1 = 1
zMODEon2 = 2
# * Negative to positive floating point zero modes.
NEGtoPOSfp0on = -1
NEGtoPOSfp0off = 0
# * Backward file mode.
BACKWARDFILEon = 1
BACKWARDFILEoff = 0
# * Compression/sparseness constants.
CDF_MAX_PARMS = 5
NO_COMPRESSION = 0
RLE_COMPRESSION = 1
HUFF_COMPRESSION = 2
AHUFF_COMPRESSION = 3
GZIP_COMPRESSION = 5
RLE_OF_ZEROs = 0
OPTIMAL_ENCODING_TREES = 0
NO_SPARSEARRAYS = 0
NO_SPARSERECORDS = 0
PAD_SPARSERECORDS = 1
PREV_SPARSERECORDS = 2
# * Invalid/reserved constants.
# #define RESERVED_CDFID ((CDFid) NULL) /* Indicates that a CDF hasn't
# been selected yet. */
# #define RESERVED_CDFSTATUS ((CDFstatus) (-1)) /* Indicates that a CDFstatus
# hasn't been selected yet. */
#define ILLEGAL_EPOCH_VALUE (-1.0)
#define ILLEGAL_TT2000_VALUE (-9223372036854775805LL)
#define FILLED_TT2000_VALUE (-9223372036854775807LL-1)
# * Status codes (CDFstatus)
# * - informatory codes are greater than CDF_OK
VIRTUAL_RECORD_DATA = 1001
DID_NOT_COMPRESS = 1002
VAR_ALREADY_CLOSED = 1003
SINGLE_FILE_FORMAT = 1004
NO_PADVALUE_SPECIFIED = 1005
NO_VARS_IN_CDF = 1006
MULTI_FILE_FORMAT = 1007
SOME_ALREADY_ALLOCATED = 1008
PRECEEDING_RECORDS_ALLOCATED = 1009
CDF_OK = 0
ATTR_NAME_TRUNC = -1001
CDF_NAME_TRUNC = -1002
VAR_NAME_TRUNC = -1003
NEGATIVE_FP_ZERO = -1004
# /* -1005 unused. */
FORCED_PARAMETER = -1006
NA_FOR_VARIABLE = -1007
CDF_WARN = -2000
ATTR_EXISTS = -2001
BAD_CDF_ID = -2002
BAD_DATA_TYPE = -2003
BAD_DIM_SIZE = -2004
BAD_DIM_INDEX = -2005
BAD_ENCODING = -2006
BAD_MAJORITY = -2007
BAD_NUM_DIMS = -2008
BAD_REC_NUM = -2009
BAD_SCOPE = -2010
BAD_NUM_ELEMS = -2011
CDF_OPEN_ERROR = -2012
CDF_EXISTS = -2013
BAD_FORMAT = -2014
BAD_ALLOCATE_RECS = -2015
BAD_CDF_EXTENSION = -2016
NO_SUCH_ATTR = -2017
NO_SUCH_ENTRY = -2018
NO_SUCH_VAR = -2019
VAR_READ_ERROR = -2020
VAR_WRITE_ERROR = -2021
BAD_ARGUMENT = -2022
IBM_PC_OVERFLOW = -2023
TOO_MANY_VARS = -2024
VAR_EXISTS = -2025
BAD_MALLOC = -2026
NOT_A_CDF = -2027
CORRUPTED_V2_CDF = -2028
VAR_OPEN_ERROR = -2029
BAD_INITIAL_RECS = -2030
BAD_BLOCKING_FACTOR = -2031
END_OF_VAR = -2032
# /* -2033 unused. */
BAD_CDFSTATUS = -2034
CDF_INTERNAL_ERROR = -2035
BAD_NUM_VARS = -2036
BAD_REC_COUNT = -2037
BAD_REC_INTERVAL = -2038
BAD_DIM_COUNT = -2039
BAD_DIM_INTERVAL = -2040
BAD_VAR_NUM = -2041
BAD_ATTR_NUM = -2042
BAD_ENTRY_NUM = -2043
BAD_ATTR_NAME = -2044
BAD_VAR_NAME = -2045
NO_ATTR_SELECTED = -2046
NO_ENTRY_SELECTED = -2047
NO_VAR_SELECTED = -2048
BAD_CDF_NAME = -2049
# /* -2050 unused. */
CANNOT_CHANGE = -2051
NO_STATUS_SELECTED = -2052
NO_CDF_SELECTED = -2053
READ_ONLY_DISTRIBUTION = -2054
CDF_CLOSE_ERROR = -2055
VAR_CLOSE_ERROR = -2056
# /* -2057 unused. */
BAD_FNC_OR_ITEM = -2058
# /* -2059 unused. */
ILLEGAL_ON_V1_CDF = -2060
# /* -2061 unused. */
# /* -2062 unused. */
BAD_CACHE_SIZE = -2063
# /* -2064 unused. */
# /* -2065 unused. */
CDF_CREATE_ERROR = -2066
NO_SUCH_CDF = -2067
VAR_CREATE_ERROR = -2068
# /* -2069 unused. */
READ_ONLY_MODE = -2070
ILLEGAL_IN_zMODE = -2071
BAD_zMODE = -2072
BAD_READONLY_MODE = -2073
CDF_READ_ERROR = -2074
CDF_WRITE_ERROR = -2075
ILLEGAL_FOR_SCOPE = -2076
NO_MORE_ACCESS = -2077
# /* -2078 unused. */
BAD_DECODING = -2079
# /* -2080 unused. */
BAD_NEGtoPOSfp0_MODE = -2081
UNSUPPORTED_OPERATION = -2082
CDF_SAVE_ERROR = -2083
VAR_SAVE_ERROR = -2084
# /* -2085 unused. */
NO_WRITE_ACCESS = -2086
NO_DELETE_ACCESS = -2087
CDF_DELETE_ERROR = -2088
VAR_DELETE_ERROR = -2089
UNKNOWN_COMPRESSION = -2090
CANNOT_COMPRESS = -2091
DECOMPRESSION_ERROR = -2092
COMPRESSION_ERROR = -2093
# /* -2094 unused. */
# /* -2095 unused. */
EMPTY_COMPRESSED_CDF = -2096
BAD_COMPRESSION_PARM = -2097
UNKNOWN_SPARSENESS = -2098
CANNOT_SPARSERECORDS = -2099
CANNOT_SPARSEARRAYS = -2100
TOO_MANY_PARMS = -2101
NO_SUCH_RECORD = -2102
CANNOT_ALLOCATE_RECORDS = -2103
# /* -2104 unused. */
# /* -2105 unused. */
SCRATCH_DELETE_ERROR = -2106
SCRATCH_CREATE_ERROR = -2107
SCRATCH_READ_ERROR = -2108
SCRATCH_WRITE_ERROR = -2109
BAD_SPARSEARRAYS_PARM = -2110
BAD_SCRATCH_DIR = -2111
NOT_A_CDF_OR_NOT_SUPPORTED = -2113
CORRUPTED_V3_CDF = -2223
ILLEGAL_EPOCH_FIELD = -2224
BAD_CHECKSUM = -2225
CHECKSUM_ERROR = -2226
CHECKSUM_NOT_ALLOWED = -2227
IS_A_NETCDF = -2228
TT2000_TIME_ERROR = -2229
UNABLE_TO_PROCESS_CDF = -2230
# /* #define ZLIB_DECOMPRESSION_ERROR = -2231 */
# """
# * Functions (for INTERNAL interface).
# * Note: These values must be different from those of the items.
CREATE_ = 1001
OPEN_ = 1002
DELETE_ = 1003
CLOSE_ = 1004
SELECT_ = 1005
CONFIRM_ = 1006
GET_ = 1007
PUT_ = 1008
SAVE_ = 1009
BACKWARD_ = 1010
GETCDFFILEBACKWARD_ = 1011
CHECKSUM_ = 1012
GETCDFCHECKSUM_ = 1013
VALIDATE_ = 1014
GETCDFVALIDATE_ = 1015
GETLEAPSECONDSENVVAR_ = 1016
NULL_ = 1000
# * Items on which functions are performed (for INTERNAL interface).
# * Note: These values must be different from those of the functions.
CDF_ = 1
CDF_NAME_ = 2
CDF_ENCODING_ = 3
CDF_DECODING_ = 4
CDF_MAJORITY_ = 5
CDF_FORMAT_ = 6
CDF_COPYRIGHT_ = 7
CDF_NUMrVARS_ = 8
CDF_NUMzVARS_ = 9
CDF_NUMATTRS_ = 10
CDF_NUMgATTRS_ = 11
CDF_NUMvATTRS_ = 12
CDF_VERSION_ = 13
CDF_RELEASE_ = 14
CDF_INCREMENT_ = 15
CDF_STATUS_ = 16
CDF_READONLY_MODE_ = 17
CDF_zMODE_ = 18
CDF_NEGtoPOSfp0_MODE_ = 19
LIB_COPYRIGHT_ = 20
LIB_VERSION_ = 21
LIB_RELEASE_ = 22
LIB_INCREMENT_ = 23
LIB_subINCREMENT_ = 24
rVARs_NUMDIMS_ = 25
rVARs_DIMSIZES_ = 26
rVARs_MAXREC_ = 27
rVARs_RECDATA_ = 28
rVARs_RECNUMBER_ = 29
rVARs_RECCOUNT_ = 30
rVARs_RECINTERVAL_ = 31
rVARs_DIMINDICES_ = 32
rVARs_DIMCOUNTS_ = 33
rVARs_DIMINTERVALS_ = 34
rVAR_ = 35
rVAR_NAME_ = 36
rVAR_DATATYPE_ = 37
rVAR_NUMELEMS_ = 38
rVAR_RECVARY_ = 39
rVAR_DIMVARYS_ = 40
rVAR_NUMBER_ = 41
rVAR_DATA_ = 42
rVAR_HYPERDATA_ = 43
rVAR_SEQDATA_ = 44
rVAR_SEQPOS_ = 45
rVAR_MAXREC_ = 46
rVAR_MAXallocREC_ = 47
rVAR_DATASPEC_ = 48
rVAR_PADVALUE_ = 49
rVAR_INITIALRECS_ = 50
rVAR_BLOCKINGFACTOR_ = 51
rVAR_nINDEXRECORDS_ = 52
rVAR_nINDEXENTRIES_ = 53
rVAR_EXISTENCE_ = 54
zVARs_MAXREC_ = 55
zVARs_RECDATA_ = 56
zVAR_ = 57
zVAR_NAME_ = 58
zVAR_DATATYPE_ = 59
zVAR_NUMELEMS_ = 60
zVAR_NUMDIMS_ = 61
zVAR_DIMSIZES_ = 62
zVAR_RECVARY_ = 63
zVAR_DIMVARYS_ = 64
zVAR_NUMBER_ = 65
zVAR_DATA_ = 66
zVAR_HYPERDATA_ = 67
zVAR_SEQDATA_ = 68
zVAR_SEQPOS_ = 69
zVAR_MAXREC_ = 70
zVAR_MAXallocREC_ = 71
zVAR_DATASPEC_ = 72
zVAR_PADVALUE_ = 73
zVAR_INITIALRECS_ = 74
zVAR_BLOCKINGFACTOR_ = 75
zVAR_nINDEXRECORDS_ = 76
zVAR_nINDEXENTRIES_ = 77
zVAR_EXISTENCE_ = 78
zVAR_RECNUMBER_ = 79
zVAR_RECCOUNT_ = 80
zVAR_RECINTERVAL_ = 81
zVAR_DIMINDICES_ = 82
zVAR_DIMCOUNTS_ = 83
zVAR_DIMINTERVALS_ = 84
ATTR_ = 85
ATTR_SCOPE_ = 86
ATTR_NAME_ = 87
ATTR_NUMBER_ = 88
ATTR_MAXgENTRY_ = 89
ATTR_NUMgENTRIES_ = 90
ATTR_MAXrENTRY_ = 91
ATTR_NUMrENTRIES_ = 92
ATTR_MAXzENTRY_ = 93
ATTR_NUMzENTRIES_ = 94
ATTR_EXISTENCE_ = 95
gENTRY_ = 96
gENTRY_EXISTENCE_ = 97
gENTRY_DATATYPE_ = 98
gENTRY_NUMELEMS_ = 99
gENTRY_DATASPEC_ = 100
gENTRY_DATA_ = 101
rENTRY_ = 102
rENTRY_NAME_ = 103
rENTRY_EXISTENCE_ = 104
rENTRY_DATATYPE_ = 105
rENTRY_NUMELEMS_ = 106
rENTRY_DATASPEC_ = 107
rENTRY_DATA_ = 108
zENTRY_ = 109
zENTRY_NAME_ = 110
zENTRY_EXISTENCE_ = 111
zENTRY_DATATYPE_ = 112
zENTRY_NUMELEMS_ = 113
zENTRY_DATASPEC_ = 114
zENTRY_DATA_ = 115
STATUS_TEXT_ = 116
CDF_CACHESIZE_ = 117
rVARs_CACHESIZE_ = 118
zVARs_CACHESIZE_ = 119
rVAR_CACHESIZE_ = 120
zVAR_CACHESIZE_ = 121
zVARs_RECNUMBER_ = 122
rVAR_ALLOCATERECS_ = 123
zVAR_ALLOCATERECS_ = 124
DATATYPE_SIZE_ = 125
CURgENTRY_EXISTENCE_ = 126
CURrENTRY_EXISTENCE_ = 127
CURzENTRY_EXISTENCE_ = 128
CDF_INFO_ = 129
CDF_COMPRESSION_ = 130
zVAR_COMPRESSION_ = 131
zVAR_SPARSERECORDS_ = 132
zVAR_SPARSEARRAYS_ = 133
zVAR_ALLOCATEBLOCK_ = 134
zVAR_NUMRECS_ = 135
zVAR_NUMallocRECS_ = 136
rVAR_COMPRESSION_ = 137
rVAR_SPARSERECORDS_ = 138
rVAR_SPARSEARRAYS_ = 139
rVAR_ALLOCATEBLOCK_ = 140
rVAR_NUMRECS_ = 141
rVAR_NUMallocRECS_ = 142
rVAR_ALLOCATEDFROM_ = 143
rVAR_ALLOCATEDTO_ = 144
zVAR_ALLOCATEDFROM_ = 145
zVAR_ALLOCATEDTO_ = 146
zVAR_nINDEXLEVELS_ = 147
rVAR_nINDEXLEVELS_ = 148
CDF_SCRATCHDIR_ = 149
rVAR_RESERVEPERCENT_ = 150
zVAR_RESERVEPERCENT_ = 151
rVAR_RECORDS_ = 152
zVAR_RECORDS_ = 153
STAGE_CACHESIZE_ = 154
COMPRESS_CACHESIZE_ = 155
CDF_CHECKSUM_ = 156
CDFwithSTATS_ = 200 # /* For CDF internal use only! */
CDF_ACCESS_ = 201 # /* For CDF internal use only! */
#-------------------- Copy of cdf.h (end) -------------------------------------
#--------------- Define dictionaries ----------------------------
# Data type dictionary
datatype = {
'CDF_INT1' : CDF_INT1,
'CDF_INT2' : CDF_INT2,
'CDF_INT4' : CDF_INT4,
'CDF_INT8' : CDF_INT8,
'CDF_UINT1' : CDF_UINT1,
'CDF_UINT2' : CDF_UINT2,
'CDF_UINT4' : CDF_UINT4,
'CDF_REAL4' : CDF_REAL4,
'CDF_REAL8' : CDF_REAL8,
'CDF_EPOCH' : CDF_EPOCH,
'CDF_EPOCH16' : CDF_EPOCH16,
'CDF_TIME_TT2000' : CDF_TIME_TT2000,
'CDF_BYTE' : CDF_BYTE,
'CDF_FLOAT' : CDF_FLOAT,
'CDF_DOUBLE' : CDF_DOUBLE,
'CDF_CHAR' : CDF_CHAR,
'CDF_UCHAR' : CDF_UCHAR,
}
# Encoding/decoding dictionary
encoding = {
'NETWORK_ENCODING': NETWORK_ENCODING,
'SUN_ENCODING': SUN_ENCODING,
'VAX_ENCODING': VAX_ENCODING,
'DECSTATION_ENCODING': DECSTATION_ENCODING,
'SGi_ENCODING': SGi_ENCODING,
'IBMPC_ENCODING': IBMPC_ENCODING,
'IBMRS_ENCODING': IBMRS_ENCODING,
'HOST_ENCODING': HOST_ENCODING,
'PPC_ENCODING': PPC_ENCODING,
'PPC_ENCODING': PPC_ENCODING,
'PPC_ENCODING': PPC_ENCODING,
'HP_ENCODING': HP_ENCODING,
'NeXT_ENCODING': NeXT_ENCODING,
'ALPHAOSF1_ENCODING': ALPHAOSF1_ENCODING,
'ALPHAVMSd_ENCODING': ALPHAVMSd_ENCODING,
'ALPHAVMSg_ENCODING': ALPHAVMSg_ENCODING,
'ALPHAVMSi_ENCODING': ALPHAVMSi_ENCODING,
}
decoding = {
'NETWORK_DECODING': NETWORK_DECODING,
'SUN_DECODING': SUN_DECODING,
'VAX_DECODING': VAX_DECODING,
'DECSTATION_DECODING': DECSTATION_DECODING,
'SGi_DECODING': SGi_DECODING,
'IBMPC_DECODING': IBMPC_DECODING,
'IBMRS_DECODING': IBMRS_DECODING,
'HOST_DECODING': HOST_DECODING,
'PPC_DECODING': PPC_DECODING,
'MAC_ENCODING': MAC_ENCODING,
'MAC_DECODING': MAC_DECODING,
'HP_DECODING': HP_DECODING,
'NeXT_DECODING': NeXT_DECODING,
'ALPHAOSF1_DECODING': ALPHAOSF1_DECODING,
'ALPHAVMSd_DECODING': ALPHAVMSd_DECODING,
'ALPHAVMSg_DECODING': ALPHAVMSg_DECODING,
'ALPHAVMSi_DECODING': ALPHAVMSi_DECODING,
}
# Compression mode dictionary
compress = {
'NO_COMPRESSION' : NO_COMPRESSION,
'RLE_COMPRESSION' : RLE_COMPRESSION,
'HUFF_COMPRESSION' : HUFF_COMPRESSION,
'AHUFF_COMPRESSION': AHUFF_COMPRESSION,
'GZIP_COMPRESSION' : GZIP_COMPRESSION,
}
# Checksum mode dictionary
checksum = {
'NO_CHECKSUM' : NO_CHECKSUM,
'MD5_CHECKSUM' : MD5_CHECKSUM,
'OTHER_CHECKSUM' : OTHER_CHECKSUM,
}
# Form dictionary
form = {
'SINGLE_FILE' : SINGLE_FILE,
'MULTI_FILE' : MULTI_FILE,
}
# Array major mode dictionary
major = {
'ROW_MAJOR' : ROW_MAJOR,
'COLUMN_MAJOR' : COLUMN_MAJOR,
}
# Record/Dimension Variances dictionary
vary = {
'VARY' : VARY,
'NOVARY' : NOVARY,
}
# Sparse Records dictionary
sparse = {
'NO_SPARSEARRAYS' : NO_SPARSEARRAYS,
'NO_SPARSERECORDS' : NO_SPARSERECORDS,
'PAD_SPARSERECORDS' : PAD_SPARSERECORDS,
'PREV_SPARSERECORDS' : PREV_SPARSERECORDS,
}
# Attribute Scopes dictionary
scope = {
'GLOBAL_SCOPE' : GLOBAL_SCOPE,
'VARIABLE_SCOPE' : VARIABLE_SCOPE,
}
# Read-Only Modes dictionary
read = {
'READONLYon' : READONLYon,
'READONLYoff' : READONLYoff,
}
| Python |
# File: cdflib_namedtuple.py
"""
This file defines named tuple for cdflib.
"""
import collections
VarInfo = collections.namedtuple('VarInfo',
[
'blockingFactor',
'dataType',
'varMaxRecAlloc',
'varMaxRec',
'numAllocRecords',
'numDims',
'numElements',
'numRecords',
'recVary',
'dimSizes',
'dimVarys',
])
DataStats = collections.namedtuple('DataStats',
[
'n_zVars',
'n_rVars',
'n_Attrs',
'n_gAttrs',
'n_vAttrs',
])
FileInfo = collections.namedtuple('FileInfo',
[
'version',
'copyright',
'compression_mode',
'checksum_mode',
'encoding_mode',
'file_format',
'array_majority',
])
| Python |
# File: setup.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import glob
import os.path
"""
Steps of this script:
1. Define metadata for setup().
2. Construct paths and file lists.
3. Define modules for different platforms.
4. Call setup()
"""
#-------------------------------------------------------------------------------
# 1. Define metadata of the package.
#-------------------------------------------------------------------------------
PKG_NAME = 'pynasacdf'
VERSION = '0.1.0'
AUTHOR = 'Jianbao Tao'
JBT_EMAIL = '%s@%s.com' % ('jianbao.tao', 'gmail')
PROJECT_URL = 'https://code.google.com/p/python-nasa-cdf-tools/'
PKG_LIST = [PKG_NAME,
]
#-------------------------------------------------------------------------------
# 2. Construct directories.
#-------------------------------------------------------------------------------
# proj_dir: The directory of setup.py
proj_dir = os.path.dirname(os.path.abspath(__file__))
# cdf_dist: Directory of the NASA CDF C library.
cdf_dist = os.path.join(proj_dir, 'cdf34_1-dist')
# Dirs: Mac version of CDF
cdf_macosx = os.path.join(cdf_dist, 'macosx')
cdf_macosx_inc = os.path.join(cdf_macosx, 'include')
macosx_libcdf_a = os.path.join(cdf_macosx, 'lib', 'libcdf.a')
# Dirs: Linux version of CDF
cdf_linux = os.path.join(cdf_dist, 'linux')
cdf_linux_lib = os.path.join(cdf_linux, 'src', 'lib')
cdf_linux_inc = os.path.join(cdf_linux, 'src', 'include')
# Linux C source files.
linux_c_sources = glob.glob(os.path.join(cdf_linux_lib, '*.c'))
# libmain.c depends on windows, and should be removed from the source list.
linux_c_sources.remove(os.path.join(cdf_linux_lib, 'libmain.c'))
# Cython source files.
cython_sources = [
os.path.join('cython_src', 'cdflib.pyx')
,]
# Figure out numpy's include directory.
from distutils.sysconfig import get_python_lib
site_pkg_dir = get_python_lib()
numpy_inc = os.path.join(site_pkg_dir, 'numpy', 'core', 'include')
#-------------------------------------------------------------------------------
# 3. Define modules for different platforms.
#-------------------------------------------------------------------------------
compile_args = ['-D_FILE_OFFSET_BITS=64',
'-D_LARGEFILE64_SOURCE',
'-D_LARGEFILE_SOURCE',
]
cdflib_module = PKG_NAME + '.cdflib'
import platform
if platform.system() == 'Linux':
sources = cython_sources + linux_c_sources
include_dirs = [numpy_inc, cdf_linux_inc]
extra_objects = []
libraries = ['m']
# modules = Extension(cdflib_module,
# sources = sources,
# extra_objects = extra_objects,
# include_dirs = include_dirs,
# extra_compile_args = compile_args,
# libraries = libraries,
# )
elif platform.system() == 'Darwin':
sources = cython_sources
include_dirs = [numpy_inc, cdf_macosx_inc]
extra_objects = [macosx_libcdf_a]
libraries = []
else:
print platform.system(), ' is not supported yet. Abort.'
import sys
sys.exit(-1)
modules = Extension(cdflib_module,
sources = sources,
extra_objects = extra_objects,
include_dirs = include_dirs,
extra_compile_args = compile_args,
libraries = libraries,
)
#-------------------------------------------------------------------------------
# 4. Call setup()
#-------------------------------------------------------------------------------
setup(name = PKG_NAME,
version = VERSION,
author = AUTHOR,
author_email = JBT_EMAIL,
packages = PKG_LIST,
url = PROJECT_URL,
cmdclass = {'build_ext':build_ext},
ext_modules = [modules]
)
| Python |
__all__ = ['parse_robolog', 'parse_latest']
import csv, os
'''
This python script requires Matplotlib and Numpy to be installed.
It is designed for Python 2.6 or 2.7
'''
def parse_robolog(filename):
# Seperate the robot log into a list of dicts
global linenum, ln, keyname
print("Checking " + filename)
reader = csv.DictReader(open(filename))
names = reader.fieldnames
# Initialize all the variables we're going to use
linelist = []
listsize = 0
linenum = 0
keyname = ""
ln = []
for ln in reader:
linelist.append(ln)
listsize += 1
for ln in linelist:
linenum += 1
for keyname in names:
ln[keyname] = float(ln[keyname])
#Remove extraneous values that we're not using
try:
del ln['Seconds']
except: pass
try:
del ln['Nanoseconds']
except: pass
return linelist
def lod2dol(lod):
'''
Convert a list of dicts into a dict of lists
'''
dol = {}
for key in lod[0]:
dol[key] = list()
for dictset in lod:
for key in dictset:
dol[key].append(dictset[key])
return dol
def graph_robolog(dol, filename, path):
try:
import matplotlib.pyplot as plt
except:
print "Error: matplotlib.pyplot failed to load.\nCheck to make sure it is installed."
times = dol['Elapsed Time']
del dol['Elapsed Time']
i=0
xlen = 1
ylen = len(dol)
for key in dol.keys():
plt.xlabel('Elapsed Time')
plt.ylabel(key)
plt.plot(times, dol[key])
plt.savefig(filename + "__" + key + ".png")
plt.clf()
for key in dol.keys():
i += 1
plt.subplot(ylen,xlen,i)
plt.xlabel('Elapsed Time')
plt.ylabel(key)
plt.plot(times, dol[key])
plt.savefig(filename + ".png")
plt.clf()
def parse_latest():
global max_foldername, filename
base_foldername = raw_input("What is the base log folder name?\n==>")
# The initial timestamp of all 0s
# All timestamps are guaranteed to be greater than this!
max_foldername = "00000000.00.00.00"
for filename in os.listdir(base_foldername):
# Find the latest files
if not filename=="output":
if filename > max_foldername:
max_foldername = filename
address = os.path.join(base_foldername, max_foldername)
pngaddress = os.path.join(base_foldername, max_foldername, 'output')
x = os.path.join(base_foldername, max_foldername, 'output')
if os.path.isdir(os.path.join(base_foldername, max_foldername, 'output'))==False:
os.mkdir(os.path.join(base_foldername, max_foldername, 'output'))
else:
print("Directory `" + pngaddress + "` already exists.")
go = raw_input("Redraw graphs? (Y/N)\n==>")
if go == "N" or go == "n":
return
for filename in os.listdir(address):
# Check each file in the latest folder
# Attempt to graph each file
r = os.path.join(address, filename)
z = os.path.splitext(filename)
if z[1] == ".csv":
try:
lod = parse_robolog(r)
except:
diagnostics(pngaddress)
raw_input();
return
data = lod2dol(lod)
f = os.path.join(pngaddress, filename)
graph_robolog(data, f, x)
lod = ()
data = {}
print "\n\nGraphing complete. Outputted graphs to " + pngaddress
def diagnostics(pngaddress):
global linenum, ln, name, filename, log
# Error in the CSV
log = open(os.path.join(pngaddress, 'error.log'), 'w')
log.write("Error: invalid data on line " + str(linenum))
log.write("\nIn file: " + str(max_foldername + '\\' + filename))
log.write("\nWith key: " + keyname)
log.close()
# log.write("\tLine contents:")
# log.write(ln)
print("\tError: invalid data on line " + str(linenum))
print("\tIn file: " + str(max_foldername + '\\' + filename))
print("\tWith key: " + keyname)
# print("\tLine contents:")
# print(ln)
print("\n\n\tError was outputted to file \"error.log\" \n\tat " + pngaddress + "\\error.log")
if __name__ == "__main__":
parse_latest()
| Python |
#!/usr/bin/env python
import commands
import getopt
import sys
SSH_USER = 'bot'
SSH_HOST = 'localhost'
SSH_PORT = 29418
SSH_COMMAND = 'ssh %s@%s -p %d gerrit approve ' % (SSH_USER, SSH_HOST, SSH_PORT)
FAILURE_SCORE = '--code-review=-2'
FAILURE_MESSAGE = 'This commit message does not match the standard.' \
+ ' Please correct the commit message and upload a replacement patch.'
PASS_SCORE = '--code-review=0'
PASS_MESSAGE = ''
def main():
change = None
project = None
branch = None
commit = None
patchset = None
try:
opts, args = getopt.getopt(sys.argv[1:], '', \
['change=', 'project=', 'branch=', 'commit=', 'patchset='])
except getopt.GetoptError, err:
print 'Error: %s' % (err)
usage()
sys.exit(-1)
for arg, value in opts:
if arg == '--change':
change = value
elif arg == '--project':
project = value
elif arg == '--branch':
branch = value
elif arg == '--commit':
commit = value
elif arg == '--patchset':
patchset = value
else:
print 'Error: option %s not recognized' % (arg)
usage()
sys.exit(-1)
if change == None or project == None or branch == None \
or commit == None or patchset == None:
usage()
sys.exit(-1)
command = 'git cat-file commit %s' % (commit)
status, output = commands.getstatusoutput(command)
if status != 0:
print 'Error running \'%s\'. status: %s, output:\n\n%s' % \
(command, status, output)
sys.exit(-1)
commitMessage = output[(output.find('\n\n')+2):]
commitLines = commitMessage.split('\n')
if len(commitLines) > 1 and len(commitLines[1]) != 0:
fail(commit, 'Invalid commit summary. The summary must be ' \
+ 'one line followed by a blank line.')
i = 0
for line in commitLines:
i = i + 1
if len(line) > 80:
fail(commit, 'Line %d is over 80 characters.' % i)
passes(commit)
def usage():
print 'Usage:\n'
print sys.argv[0] + ' --change <change id> --project <project name> ' \
+ '--branch <branch> --commit <sha1> --patchset <patchset id>'
def fail( commit, message ):
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
+ '\\\" ' + commit
commands.getstatusoutput(command)
sys.exit(1)
def passes( commit ):
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
commands.getstatusoutput(command)
def _shell_escape(x):
s = ''
for c in x:
if c in '\n':
s = s + '\\\"$\'\\n\'\\\"'
else:
s = s + c
return s
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python2.6
# Copyright (c) 2010, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# # Neither the name of Code Aurora Forum, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This script is designed to detect when a patchset uploaded to Gerrit is
# 'identical' (determined via git-patch-id) and reapply reviews onto the new
# patchset from the previous patchset.
# Get usage and help info by running: ./trivial_rebase.py --help
# Documentation is available here: https://www.codeaurora.org/xwiki/bin/QAEP/Gerrit
import json
from optparse import OptionParser
import subprocess
from sys import exit
class CheckCallError(OSError):
"""CheckCall() returned non-0."""
def __init__(self, command, cwd, retcode, stdout, stderr=None):
OSError.__init__(self, command, cwd, retcode, stdout, stderr)
self.command = command
self.cwd = cwd
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def CheckCall(command, cwd=None):
"""Like subprocess.check_call() but returns stdout.
Works on python 2.4
"""
try:
process = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE)
std_out, std_err = process.communicate()
except OSError, e:
raise CheckCallError(command, cwd, e.errno, None)
if process.returncode:
raise CheckCallError(command, cwd, process.returncode, std_out, std_err)
return std_out, std_err
def GsqlQuery(sql_query, server, port):
"""Runs a gerrit gsql query and returns the result"""
gsql_cmd = ['ssh', '-p', port, server, 'gerrit', 'gsql', '--format',
'JSON', '-c', sql_query]
try:
(gsql_out, gsql_stderr) = CheckCall(gsql_cmd)
except CheckCallError, e:
print "return code is %s" % e.retcode
print "stdout and stderr is\n%s%s" % (e.stdout, e.stderr)
raise
new_out = gsql_out.replace('}}\n', '}}\nsplit here\n')
return new_out.split('split here\n')
def FindPrevRev(changeId, patchset, server, port):
"""Finds the revision of the previous patch set on the change"""
sql_query = ("\"SELECT revision FROM patch_sets,changes WHERE "
"patch_sets.change_id = changes.change_id AND "
"patch_sets.patch_set_id = %s AND "
"changes.change_key = \'%s\'\"" % ((patchset - 1), changeId))
revisions = GsqlQuery(sql_query, server, port)
json_dict = json.loads(revisions[0], strict=False)
return json_dict["columns"]["revision"]
def GetApprovals(changeId, patchset, server, port):
"""Get all the approvals on a specific patch set
Returns a list of approval dicts"""
sql_query = ("\"SELECT value,account_id,category_id FROM patch_set_approvals "
"WHERE change_id = (SELECT change_id FROM changes WHERE "
"patch_set_id = %s AND change_key = \'%s\') AND value <> 0\""
% ((patchset - 1), changeId))
gsql_out = GsqlQuery(sql_query, server, port)
approvals = []
for json_str in gsql_out:
dict = json.loads(json_str, strict=False)
if dict["type"] == "row":
approvals.append(dict["columns"])
return approvals
def GetEmailFromAcctId(account_id, server, port):
"""Returns the preferred email address associated with the account_id"""
sql_query = ("\"SELECT preferred_email FROM accounts WHERE account_id = %s\""
% account_id)
email_addr = GsqlQuery(sql_query, server, port)
json_dict = json.loads(email_addr[0], strict=False)
return json_dict["columns"]["preferred_email"]
def GetPatchId(revision):
git_show_cmd = ['git', 'show', revision]
patch_id_cmd = ['git', 'patch-id']
patch_id_process = subprocess.Popen(patch_id_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
git_show_process = subprocess.Popen(git_show_cmd, stdout=subprocess.PIPE)
return patch_id_process.communicate(git_show_process.communicate()[0])[0]
def SuExec(server, port, private_key, as_user, cmd):
suexec_cmd = ['ssh', '-l', "Gerrit Code Review", '-p', port, server, '-i',
private_key, 'suexec', '--as', as_user, '--', cmd]
CheckCall(suexec_cmd)
def DiffCommitMessages(commit1, commit2):
log_cmd1 = ['git', 'log', '--pretty=format:"%an %ae%n%s%n%b"',
commit1 + '^!']
commit1_log = CheckCall(log_cmd1)
log_cmd2 = ['git', 'log', '--pretty=format:"%an %ae%n%s%n%b"',
commit2 + '^!']
commit2_log = CheckCall(log_cmd2)
if commit1_log != commit2_log:
return True
return False
def Main():
server = 'localhost'
usage = "usage: %prog <required options> [--server-port=PORT]"
parser = OptionParser(usage=usage)
parser.add_option("--change", dest="changeId", help="Change identifier")
parser.add_option("--project", help="Project path in Gerrit")
parser.add_option("--commit", help="Git commit-ish for this patchset")
parser.add_option("--patchset", type="int", help="The patchset number")
parser.add_option("--private-key-path", dest="private_key_path",
help="Full path to Gerrit SSH daemon's private host key")
parser.add_option("--server-port", dest="port", default='29418',
help="Port to connect to Gerrit's SSH daemon "
"[default: %default]")
(options, args) = parser.parse_args()
if not options.changeId:
parser.print_help()
exit(0)
if options.patchset == 1:
# Nothing to detect on first patchset
exit(0)
prev_revision = None
prev_revision = FindPrevRev(options.changeId, options.patchset, server,
options.port)
if not prev_revision:
# Couldn't find a previous revision
exit(0)
prev_patch_id = GetPatchId(prev_revision)
cur_patch_id = GetPatchId(options.commit)
if cur_patch_id.split()[0] != prev_patch_id.split()[0]:
# patch-ids don't match
exit(0)
# Patch ids match. This is a trivial rebase.
# In addition to patch-id we should check if the commit message changed. Most
# approvers would want to re-review changes when the commit message changes.
changed = DiffCommitMessages(prev_revision, options.commit)
if changed:
# Insert a comment into the change letting the approvers know only the
# commit message changed
comment_msg = ("\'--message=New patchset patch-id matches previous patchset"
", but commit message has changed.'")
comment_cmd = ['ssh', '-p', options.port, server, 'gerrit', 'approve',
'--project', options.project, comment_msg, options.commit]
CheckCall(comment_cmd)
exit(0)
# Need to get all approvals on prior patch set, then suexec them onto
# this patchset.
approvals = GetApprovals(options.changeId, options.patchset, server,
options.port)
gerrit_approve_msg = ("\'Automatically re-added by Gerrit trivial rebase "
"detection script.\'")
for approval in approvals:
# Note: Sites with different 'copy_min_score' values in the
# approval_categories DB table might want different behavior here.
# Additional categories should also be added if desired.
if approval["category_id"] == "CRVW":
approve_category = '--code-review'
elif approval["category_id"] == "VRIF":
# Don't re-add verifies
#approve_category = '--verified'
continue
elif approval["category_id"] == "SUBM":
# We don't care about previous submit attempts
continue
else:
print "Unsupported category: %s" % approval
exit(0)
score = approval["value"]
gerrit_approve_cmd = ['gerrit', 'approve', '--project', options.project,
'--message', gerrit_approve_msg, approve_category,
score, options.commit]
email_addr = GetEmailFromAcctId(approval["account_id"], server,
options.port)
SuExec(server, options.port, options.private_key_path, email_addr,
' '.join(gerrit_approve_cmd))
exit(0)
if __name__ == "__main__":
Main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Unittest for the util/regexp/re2/unicode.py module."""
import os
import StringIO
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.util.regexp.re2 import unicode
_UNICODE_DIR = os.path.join(flags.FLAGS.test_srcdir, "google3", "third_party",
"unicode", "ucd-5.1.0")
class ConvertTest(googletest.TestCase):
"""Test the conversion functions."""
def testUInt(self):
self.assertEquals(0x0000, unicode._UInt("0000"))
self.assertEquals(0x263A, unicode._UInt("263A"))
self.assertEquals(0x10FFFF, unicode._UInt("10FFFF"))
self.assertRaises(unicode.InputError, unicode._UInt, "263")
self.assertRaises(unicode.InputError, unicode._UInt, "263AAAA")
self.assertRaises(unicode.InputError, unicode._UInt, "110000")
def testURange(self):
self.assertEquals([1, 2, 3], unicode._URange("0001..0003"))
self.assertEquals([1], unicode._URange("0001"))
self.assertRaises(unicode.InputError, unicode._URange, "0001..0003..0005")
self.assertRaises(unicode.InputError, unicode._URange, "0003..0001")
self.assertRaises(unicode.InputError, unicode._URange, "0001..0001")
def testUStr(self):
self.assertEquals("0x263A", unicode._UStr(0x263a))
self.assertEquals("0x10FFFF", unicode._UStr(0x10FFFF))
self.assertRaises(unicode.InputError, unicode._UStr, 0x110000)
self.assertRaises(unicode.InputError, unicode._UStr, -1)
_UNICODE_TABLE = """# Commented line, should be ignored.
# The next line is blank and should be ignored.
0041;Capital A;Line 1
0061..007A;Lowercase;Line 2
1F00;<Greek, First>;Ignored
1FFE;<Greek, Last>;Line 3
10FFFF;Runemax;Line 4
0000;Zero;Line 5
"""
_BAD_TABLE1 = """
111111;Not a code point;
"""
_BAD_TABLE2 = """
0000;<Zero, First>;Missing <Zero, Last>
"""
_BAD_TABLE3 = """
0010..0001;Bad range;
"""
class AbortError(Exception):
"""Function should not have been called."""
def Abort():
raise AbortError("Abort")
def StringTable(s, n, f):
unicode.ReadUnicodeTable(StringIO.StringIO(s), n, f)
class ReadUnicodeTableTest(googletest.TestCase):
"""Test the ReadUnicodeTable function."""
def testSimpleTable(self):
ncall = [0] # can't assign to ordinary int in DoLine
def DoLine(codes, fields):
self.assertEquals(3, len(fields))
ncall[0] += 1
self.assertEquals("Line %d" % (ncall[0],), fields[2])
if ncall[0] == 1:
self.assertEquals([0x0041], codes)
self.assertEquals("0041", fields[0])
self.assertEquals("Capital A", fields[1])
elif ncall[0] == 2:
self.assertEquals(range(0x0061, 0x007A + 1), codes)
self.assertEquals("0061..007A", fields[0])
self.assertEquals("Lowercase", fields[1])
elif ncall[0] == 3:
self.assertEquals(range(0x1F00, 0x1FFE + 1), codes)
self.assertEquals("1F00..1FFE", fields[0])
self.assertEquals("Greek", fields[1])
elif ncall[0] == 4:
self.assertEquals([0x10FFFF], codes)
self.assertEquals("10FFFF", fields[0])
self.assertEquals("Runemax", fields[1])
elif ncall[0] == 5:
self.assertEquals([0x0000], codes)
self.assertEquals("0000", fields[0])
self.assertEquals("Zero", fields[1])
StringTable(_UNICODE_TABLE, 3, DoLine)
self.assertEquals(5, ncall[0])
def testErrorTables(self):
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 4, Abort)
self.assertRaises(unicode.InputError, StringTable, _UNICODE_TABLE, 2, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE1, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE2, 3, Abort)
self.assertRaises(unicode.InputError, StringTable, _BAD_TABLE3, 3, Abort)
class ParseContinueTest(googletest.TestCase):
"""Test the ParseContinue function."""
def testParseContinue(self):
self.assertEquals(("Private Use", "First"),
unicode._ParseContinue("<Private Use, First>"))
self.assertEquals(("Private Use", "Last"),
unicode._ParseContinue("<Private Use, Last>"))
self.assertEquals(("<Private Use, Blah>", None),
unicode._ParseContinue("<Private Use, Blah>"))
class CaseGroupsTest(googletest.TestCase):
"""Test the CaseGroups function (and the CaseFoldingReader)."""
def FindGroup(self, c):
if type(c) == str:
c = ord(c)
for g in self.groups:
if c in g:
return g
return None
def testCaseGroups(self):
self.groups = unicode.CaseGroups(unicode_dir=_UNICODE_DIR)
self.assertEquals([ord("A"), ord("a")], self.FindGroup("a"))
self.assertEquals(None, self.FindGroup("0"))
class ScriptsTest(googletest.TestCase):
"""Test the Scripts function (and the ScriptsReader)."""
def FindScript(self, c):
if type(c) == str:
c = ord(c)
for script, codes in self.scripts.items():
for code in codes:
if c == code:
return script
return None
def testScripts(self):
self.scripts = unicode.Scripts(unicode_dir=_UNICODE_DIR)
self.assertEquals("Latin", self.FindScript("a"))
self.assertEquals("Common", self.FindScript("0"))
self.assertEquals(None, self.FindScript(0xFFFE))
class CategoriesTest(googletest.TestCase):
"""Test the Categories function (and the UnicodeDataReader)."""
def FindCategory(self, c):
if type(c) == str:
c = ord(c)
short = None
for category, codes in self.categories.items():
for code in codes:
if code == c:
# prefer category Nd over N
if len(category) > 1:
return category
if short == None:
short = category
return short
def testCategories(self):
self.categories = unicode.Categories(unicode_dir=_UNICODE_DIR)
self.assertEquals("Ll", self.FindCategory("a"))
self.assertEquals("Nd", self.FindCategory("0"))
self.assertEquals("Lo", self.FindCategory(0xAD00)) # in First, Last range
self.assertEquals(None, self.FindCategory(0xFFFE))
self.assertEquals("Lo", self.FindCategory(0x8B5A))
self.assertEquals("Lo", self.FindCategory(0x6C38))
self.assertEquals("Lo", self.FindCategory(0x92D2))
self.assertTrue(ord("a") in self.categories["L"])
self.assertTrue(ord("0") in self.categories["N"])
self.assertTrue(0x8B5A in self.categories["L"])
self.assertTrue(0x6C38 in self.categories["L"])
self.assertTrue(0x92D2 in self.categories["L"])
def main():
googletest.main()
if __name__ == "__main__":
main()
| Python |
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Parser for Unicode data files (as distributed by unicode.org)."""
import os
import re
import urllib2
# Directory or URL where Unicode tables reside.
_UNICODE_DIR = "http://www.unicode.org/Public/6.0.0/ucd"
# Largest valid Unicode code value.
_RUNE_MAX = 0x10FFFF
class Error(Exception):
"""Unicode error base class."""
class InputError(Error):
"""Unicode input error class. Raised on invalid input."""
def _UInt(s):
"""Converts string to Unicode code point ('263A' => 0x263a).
Args:
s: string to convert
Returns:
Unicode code point
Raises:
InputError: the string is not a valid Unicode value.
"""
try:
v = int(s, 16)
except ValueError:
v = -1
if len(s) < 4 or len(s) > 6 or v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (s,))
return v
def _URange(s):
"""Converts string to Unicode range.
'0001..0003' => [1, 2, 3].
'0001' => [1].
Args:
s: string to convert
Returns:
Unicode range
Raises:
InputError: the string is not a valid Unicode range.
"""
a = s.split("..")
if len(a) == 1:
return [_UInt(a[0])]
if len(a) == 2:
lo = _UInt(a[0])
hi = _UInt(a[1])
if lo < hi:
return range(lo, hi + 1)
raise InputError("invalid Unicode range %s" % (s,))
def _UStr(v):
"""Converts Unicode code point to hex string.
0x263a => '0x263A'.
Args:
v: code point to convert
Returns:
Unicode string
Raises:
InputError: the argument is not a valid Unicode value.
"""
if v < 0 or v > _RUNE_MAX:
raise InputError("invalid Unicode value %s" % (v,))
return "0x%04X" % (v,)
def _ParseContinue(s):
"""Parses a Unicode continuation field.
These are of the form '<Name, First>' or '<Name, Last>'.
Instead of giving an explicit range in a single table entry,
some Unicode tables use two entries, one for the first
code value in the range and one for the last.
The first entry's description is '<Name, First>' instead of 'Name'
and the second is '<Name, Last>'.
'<Name, First>' => ('Name', 'First')
'<Name, Last>' => ('Name', 'Last')
'Anything else' => ('Anything else', None)
Args:
s: continuation field string
Returns:
pair: name and ('First', 'Last', or None)
"""
match = re.match("<(.*), (First|Last)>", s)
if match is not None:
return match.groups()
return (s, None)
def ReadUnicodeTable(filename, nfields, doline):
"""Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2).
"""
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("http://"):
fil = urllib2.urlopen(filename)
else:
fil = open(filename, "r")
else:
fil = filename
first = None # first code in multiline range
expect_last = None # tag expected for "Last" line in multiline range
lineno = 0 # current line number
for line in fil:
lineno += 1
try:
# Chop # comments and white space; ignore empty lines.
sharp = line.find("#")
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
# Split fields on ";", chop more white space.
# Must have the expected number of fields.
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
# The Unicode text files have two different ways
# to list a Unicode range. Either the first field is
# itself a range (0000..FFFF), or the range is split
# across two lines, with the second field noting
# the continuation.
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
# If the last line gave the First code in a range,
# this one had better give the Last one.
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
# Otherwise, if this is the First code in a range,
# remember it and go to the next line.
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception, e:
print "%s:%d: %s" % (filename, lineno, e)
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,))
def CaseGroups(unicode_dir=_UNICODE_DIR):
"""Returns list of Unicode code groups equivalent under case folding.
Each group is a sorted list of code points,
and the list of groups is sorted by first code point
in the group.
Args:
unicode_dir: Unicode data directory
Returns:
list of Unicode code groups
"""
# Dict mapping lowercase code point to fold-equivalent group.
togroup = {}
def DoLine(codes, fields):
"""Process single CaseFolding.txt line, updating togroup."""
(_, foldtype, lower, _) = fields
if foldtype not in ("C", "S"):
return
lower = _UInt(lower)
togroup.setdefault(lower, [lower]).extend(codes)
ReadUnicodeTable(unicode_dir+"/CaseFolding.txt", 4, DoLine)
groups = togroup.values()
for g in groups:
g.sort()
groups.sort()
return togroup, groups
def Scripts(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping script names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping script names to code lists
"""
scripts = {}
def DoLine(codes, fields):
"""Process single Scripts.txt line, updating scripts."""
(_, name) = fields
scripts.setdefault(name, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/Scripts.txt", 2, DoLine)
return scripts
def Categories(unicode_dir=_UNICODE_DIR):
"""Returns dict mapping category names to code lists.
Args:
unicode_dir: Unicode data directory
Returns:
dict mapping category names to code lists
"""
categories = {}
def DoLine(codes, fields):
"""Process single UnicodeData.txt line, updating categories."""
category = fields[2]
categories.setdefault(category, []).extend(codes)
# Add codes from Lu into L, etc.
if len(category) > 1:
short = category[0]
categories.setdefault(short, []).extend(codes)
ReadUnicodeTable(unicode_dir+"/UnicodeData.txt", 15, DoLine)
return categories
| Python |
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.0.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.0"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, use
sudo easy_install mercurial=="""+hg_suggested+"""
or visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
# As of Mercurial 2.1 the commands are all required to return integer
# exit codes, whereas earlier versions allowed returning arbitrary strings
# to be printed as errors. We wrap the old functions to make sure we
# always return integer exit codes now. Otherwise Mercurial dies
# with a TypeError traceback (unsupported operand type(s) for &: 'str' and 'int').
# Introduce a Python decorator to convert old functions to the new
# stricter convention.
def hgcommand(f):
def wrapped(ui, repo, *pats, **opts):
err = f(ui, repo, *pats, **opts)
if type(err) is int:
return err
if not err:
return 0
raise hg_util.Abort(err)
wrapped.__doc__ = f.__doc__
return wrapped
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
return codereview_disabled
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
if repo[None].branch() != "default":
return "cannot create CL outside default branch; switch with 'hg update default'"
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
return "cannot use -d and -D together"
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
return "cannot use "+flag+" with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use "+flag+" with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
if opts["delete"]:
if cl.copied_from:
return "original author must delete CL; hg change -D will remove locally"
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
return codereview_disabled
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
return "cannot run hg clpatch outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
return "cannot run hg undo outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
return "no active release branches"
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
return err
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
return None
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^http://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgpatch_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgpatch"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgpatch: " + ExceptionDetail() + "\nInstall hgpatch with:\n$ go get code.google.com/p/go.codereview/cmd/hgpatch\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgpatch_failure"]:
return "hgpatch failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgpatch_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
return codereview_disabled
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
return codereview_disabled
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
return codereview_disabled
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
return codereview_disabled
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
return "no reviewers listed in CL"
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
return "no changed files, not sending mail"
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
return codereview_disabled
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
return codereview_disabled
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
commit_okay = False
if ret:
return "nothing changed"
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
hg_push(ui, repo)
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return None
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
return codereview_disabled
if not opts["local"]:
err = hg_pull(ui, repo, update=True)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
return codereview_disabled
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "precommit.codereview", precommithook)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "http://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.go10' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.go9 with
# release-branch.go10. It will be a while before we care.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
tags.sort()
for t in tags:
if t.startswith('release-branch.go'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def formatter(self, topic, opts):
from mercurial.formatter import plainformatter
return plainformatter(self, topic, opts)
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0"
self.base_rev = out.split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
| Python |
from build import plot_results, print_results
#valid formats are png, pdf, ps, eps and svg
#if format=None the plot will be displayed
format = 'png'
#output = print_results
output = plot_results
for function in ['fill', 'reduce', 'inner_product', 'gather', 'merge']:
output(function + '.xml', 'InputType', 'InputSize', 'Bandwidth', format=format)
for function in ['inclusive_scan', 'inclusive_segmented_scan', 'unique']:
output(function + '.xml', 'InputType', 'InputSize', 'Throughput', format=format)
for method in ['indirect_sort']:
output(method + '.xml', 'Sort', 'VectorLength', 'Time', plot='semilogx', title='Indirect Sorting', format=format)
for method in ['sort', 'merge_sort', 'radix_sort']:
output(method + '.xml', 'KeyType', 'InputSize', 'Sorting', title='thrust::' + method, format=format)
output(method + '_by_key.xml', 'KeyType', 'InputSize', 'Sorting', title='thrust::' + method + '_by_key', format=format)
output('stl_sort.xml', 'KeyType', 'InputSize', 'Sorting', title='std::sort', format=format)
for method in ['radix_sort']:
output(method + '_bits.xml', 'KeyType', 'KeyBits', 'Sorting', title='thrust::' + method, plot='plot', dpi=72, format=format)
for format in ['png', 'pdf']:
output('reduce_float.xml', 'InputType', 'InputSize', 'Bandwidth', dpi=120, plot='semilogx', title='thrust::reduce<float>()', format=format)
output('sort_large.xml', 'KeyType', 'InputSize', 'Sorting', dpi=120, plot='semilogx', title='thrust::sort<T>()', format=format)
| Python |
import os
import glob
from build.perftest import compile_test
# try to import an environment first
try:
Import('env')
except:
exec open("../build/build-env.py")
env = Environment()
def cu_build_function(source, target, env):
compile_test(str(source[0]), str(target[0]))
# define a rule to build a .cu from a .test
cu_builder = Builder(action = cu_build_function,
suffix = '.cu',
src_suffix = '.test')
env.Append(BUILDERS = {'CUFile' : cu_builder})
# define a rule to build a report from an executable
report_builder = Builder(action = os.path.join('"' + env.GetLaunchDir(), '$SOURCE" > $TARGET'),
suffix = '.xml',
src_suffix = env['PROGSUFFIX'])
env.Append(BUILDERS = {'Report' : report_builder})
env.Append(CPPPATH = ['.', '../testing/'])
cu_list = []
program_list = []
report_list = []
build_files = [os.path.join('build', f) for f in ['perftest.py', 'test_function_template.cxx']]
# describe dependency graph:
# report -> program -> .cu -> .test
for test in glob.glob("*.test"):
cu = env.CUFile(test)
env.Depends(cu, build_files)
cu_list.append(cu)
prog = env.Program(cu)
program_list.append(prog)
report = env.Report(prog)
report_list.append(report)
# add .linkinfo files to the clean list
env.Clean(prog, str(test).replace("test", "linkinfo"))
# make aliases for groups of targets
reports = env.Alias("reports", report_list)
programs = env.Alias("programs", program_list)
# when no build target is specified, by default we build the programs
env.Default(programs)
# output a help message
env.Help("""
Type: 'scons' to build all performance test programs.
Type: 'scons reports' to run all performance tests and output reports.
Type: 'scons <test name>' to build a single performance test program of interest.
Type: 'scons <test name>.xml' to run a single performance test of interest and output a report.
""")
| Python |
import os
import inspect
# try to import an environment first
try:
Import('env')
except:
exec open("../build/build-env.py")
env = Environment()
# on windows we have to do /bigobj
if env['PLATFORM'] == "win32" or env['PLATFORM'] == "win64":
env.Append(CPPFLAGS = "/bigobj")
# find all .cus & .cpps in the current and backend/ directories
sources = []
directories = ['.', 'backend']
extensions = ['*.cu', '*.cpp']
# always add generic code
directories.append(os.path.join('backend','generic'))
directories.append(os.path.join('backend','generic','scalar'))
# find all backend-specific files
if env['backend'] == 'cuda' or env['backend'] == 'ocelot':
directories.append(os.path.join('backend','cuda'))
directories.append(os.path.join('backend','cuda','block'))
directories.append(os.path.join('backend','cuda','whitebox'))
elif env['backend'] == 'omp':
directories.append(os.path.join('backend','omp'))
elif env['backend'] == 'tbb':
directories.append(os.path.join('backend','tbb'))
for dir in directories:
for ext in extensions:
regexp = os.path.join(dir, ext)
sources.extend(env.Glob(regexp))
# filter test files using a regular expression
if 'tests' in env:
import re
pattern = re.compile(env['tests'])
necessary_sources = set(['testframework.cu'])
filtered_sources = []
for f in sources:
if str(f) in necessary_sources or pattern.search(f.get_contents()):
filtered_sources.append(f)
sources = filtered_sources
# add the directory containing this file to the include path
this_file = inspect.currentframe().f_code.co_filename
this_dir = os.path.dirname(this_file)
env.Append(CPPPATH = [this_dir])
tester = env.Program('tester', sources)
| Python |
import os
import glob
from warnings import warn
thrust_abspath = os.path.abspath("../../thrust/")
# try to import an environment first
try:
Import('env')
except:
exec open("../../build/build-env.py")
env = Environment()
# this function builds a trivial source file from a Thrust header
def trivial_source_from_header(source, target, env):
target_filename = str(target[0])
fid = open(target_filename, 'w')
# make sure we don't trip over <windows.h> when compiling with cl.exe
if env.subst('$CC') == 'cl':
fid.write('#include <windows.h>\n')
for src in source:
src_abspath = str(src)
src_relpath = os.path.relpath(src_abspath, thrust_abspath)
include = os.path.join('thrust', src_relpath)
fid.write('#include <' + include + '>\n')
fid.close()
# CUFile builds a trivial .cu file from a Thrust header
cu_from_header_builder = Builder(action = trivial_source_from_header,
suffix = '.cu',
src_suffix = '.h')
env.Append(BUILDERS = {'CUFile' : cu_from_header_builder})
# CPPFile builds a trivial .cpp file from a Thrust header
cpp_from_header_builder = Builder(action = trivial_source_from_header,
suffix = '.cpp',
src_suffix = '.h')
env.Append(BUILDERS = {'CPPFile' : cpp_from_header_builder})
# find all user-includable .h files in the thrust tree and generate trivial source files #including them
extensions = ['.h']
folders = ['', # main folder
'iterator/',
'system/',
'system/cpp',
'system/cuda',
'system/cuda/experimental',
'system/omp',
'system/tbb']
sources = []
header_fullpaths = []
for folder in folders:
for ext in extensions:
pattern = os.path.join(os.path.join(thrust_abspath, folder), "*" + ext)
for fullpath in glob.glob(pattern):
header_fullpaths.append(fullpath)
headerfilename = os.path.basename(fullpath)
# replace slashes with '_slash_'
sourcefilename = fullpath.replace('/', '_slash_').replace('\\', '_slash_').replace('.h', '.ext')
cu = env.CUFile(sourcefilename.replace('.ext', '.cu'), fullpath)
cpp = env.CPPFile(sourcefilename.replace('.ext', '_cpp.cpp'), fullpath)
sources.append(cu)
sources.append(cpp)
# insure that all files #include <thrust/detail/config.h>
fid = open(fullpath)
if '#include <thrust/detail/config.h>' not in fid.read():
warn('Header <thrust/' + folder + headerfilename + '> does not include <thrust/detail/config.h>')
# generate source files which #include all headers
all_headers_cu = env.CUFile('all_headers.cu', header_fullpaths)
all_headers_cpp = env.CUFile('all_headers_cpp.cpp', header_fullpaths)
sources.append(all_headers_cu)
sources.append(all_headers_cpp)
# and the file with main()
sources.append('main.cu')
tester = env.Program('tester', sources)
| Python |
import os
import glob
# try to import an environment first
try:
Import('env')
except:
exec open("../../build/build-env.py")
env = Environment()
# on mac we have to tell the linker to link against the C++ library
if env['PLATFORM'] == "darwin":
env.Append(LINKFLAGS = "-lstdc++")
# find all .cus & .cpps in the current directory
sources = ['device.cu','host.cpp']
# compile example
env.Program('example', sources)
| Python |
import os
import inspect
import glob
# try to import an environment first
try:
Import('env')
except:
exec open("../build/build-env.py")
env = Environment()
# find all .cus & .cpps in the current directory
sources = []
directories = ['.']
extensions = ['*.cu', '*.cpp']
if env['backend'] == 'cuda' or env['backend'] == 'ocelot':
directories.append('cuda')
elif env['backend'] == 'tbb':
directories.append('tbb')
elif env['backend'] == 'omp':
directories.append('omp')
#TODO handle mixed/multiple systems
for dir in directories:
for ext in extensions:
regexp = os.path.join(dir, ext)
sources.extend(glob.glob(regexp))
# compile examples
commands = []
for src in sources:
program = env.Program(src)
# add the program to the 'run_examples' alias
program_alias = env.Alias('run_examples', [program], program[0].abspath)
# always build the 'run_examples' target whether or not it needs it
env.AlwaysBuild(program_alias)
| Python |
#!/usr/bin/env python
# encoding: UTF-8
import sys
import logging
try:
import json
except ImportError:
import simplejson as json
import onering
logging.basicConfig(level=logging.DEBUG)
def app(environ, start_response):
start_response('200 OK', [])
path = environ['PATH_INFO']
if path == '/init':
ret = json.dumps({'width':400, 'height':300, 'title':"Douban Radio",
'url': '/',
})
elif path == '/':
ret = """<html>
<head>
<title>OneRing测试</title>
<script type="text/javascript" src="onering://onering/onering.js"></script>
</head>
<body>
<img src="http://www.douban.com/pics/logosmall.gif" />
<br/>
<a href="#"
onclick="javascript:ONERING.getCurrentWindow().createWindow('/', 100, 100);return false">创建新窗口</a>
</body>
</html>
"""
else:
ret = ""
return [ret]
onering.register_wsgi_app('test', app)
onering.loop("test")
| Python |
import os, sys
from setuptools import setup, Extension
if sys.platform == 'darwin':
include_dirs = ['/Library/Frameworks/OneRing.framework/Headers/']
library_dirs = []
libraries = []
extra_link_args = ['-framework', 'OneRing']
else:
include_dirs = [os.path.join('..', '..', 'include')]
library_dirs = [os.path.join('..', '..', 'release'),
os.path.join('..', '..', 'debug')]
libraries = ['OneRing1']
extra_link_args = []
onering_module = Extension('_onering',
sources=['_onering.c'],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_link_args=extra_link_args,
)
install_requires = []
if sys.version_info < (2, 6):
install_requires.append("simplejson")
setup(name = "OneRing",
version="0.1",
ext_modules = [onering_module],
py_modules = ['onering'],
zip_safe = False,
install_requires=install_requires,
)
| Python |
import logging
from cStringIO import StringIO
try:
import json
except ImportError:
import simplejson as json
import _onering
__all__ = ['register_wsgi_app', 'loop', 'publish']
logger = logging.getLogger('onering')
def register_wsgi_app(appname, app):
def callback(method, url, body=None):
response = []
dummy = url.split('?', 1)
if len(dummy) == 2:
path, query = dummy
else:
path, query = dummy[0], ''
environ = {
'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': path,
'QUERY_STRING': query,
'HTTP_HOST': appname,
}
if body is not None:
environ['wsgi.input'] = StringIO(body)
environ['CONTENT_LENGTH'] = len(body)
def start_response(status, response_headers, exc_info=None):
response.append("HTTP/1.1 %s\r\n" % status)
response.extend("%s: %s\r\n" % h for h in response_headers)
response.append("\r\n")
response.extend(app(environ, start_response))
response = ''.join(response)
logger.debug("%s %s %s -> %d bytes", method, url, body,
len(response))
return response
return _onering.register_app(appname, callback)
def loop(appname):
return _onering.loop(appname)
def publish(channel, data=None):
msg = json.dumps(data)
return _onering.publish(channel, msg)
| Python |
#!/usr/bin/env python
# encoding: UTF-8
__version__ = '0.1'
import sys, os
try:
import json
except ImportError:
import simplejson as json
import logging
import web
from web.contrib.template import render_mako
import onering
urls = (
'/init', 'init',
'/', 'index',
'/create_window', 'create_window',
'/create_window/new', 'create_window_new',
'/systrayicon', 'systrayicon',
'/systraymenu', 'systraymenu',
'/font-face', 'font_face',
'/shadow', 'shadow',
'/localstorage', 'localstorage',
'/hotkey', 'hotkey',
'/browser', 'browser',
'/browser/url', 'browser_url',
'/browser/launcher', 'browser_launcher',
'/audio', 'audio',
'/about', 'about',
'/pubsub', 'pubsub',
'/pubsub/scan', 'pubsub_scan',
'/static/(.*)', 'static',
)
render = render_mako(
directories=['templates'],
input_encoding='utf8',
output_encoding='utf8',
)
def jsonize(func):
def _(*a, **kw):
ret = func(*a, **kw)
web.header('Content-Type', 'application/json')
return json.dumps(ret)
return _
class init:
@jsonize
def GET(self):
url = ('/' + startup_demo) if startup_demo else '/'
return dict(width=640, height=480, title="OneRing演示", url=url,
appname="OneRing Demo", icon="/static/onering.ico",
)
class index:
def GET(self):
return render.index()
class create_window:
def GET(self):
return render.create_window()
class create_window_new:
def GET(self):
return render.create_window_new()
class systrayicon:
def GET(self):
return render.systrayicon()
class systraymenu:
def GET(self):
return render.systraymenu()
class font_face:
def GET(self):
return render.font_face()
class shadow:
def GET(self):
return render.shadow()
class localstorage:
def GET(self):
return render.localstorage()
class hotkey:
def GET(self):
return render.hotkey()
class browser:
def GET(self):
return render.browser()
class browser_url:
@jsonize
def GET(self):
return dict(url="http://code.google.com/p/onering-desktop/")
class browser_launcher:
def POST(self):
i = web.input()
url = i.url
import webbrowser
webbrowser.open(url)
class audio:
def GET(self):
return render.audio()
class about:
def GET(self):
return render.about()
class pubsub:
def GET(self):
return render.pubsub()
class pubsub_scan:
def POST(self):
def scan():
import time
i = 0
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
print filename
onering.publish("filescan",
os.path.join(dirpath, filename))
time.sleep(1)
i += 1
if i > 10:
onering.publish("filescan", None);
return
import threading
t = threading.Thread(target=scan)
t.setDaemon(True)
t.start()
return True
class static:
def GET(self, filename):
content = open('static/%s' % filename, 'rb').read()
content_types = {
'.js': 'text/javascript',
'.css': 'text/css',
'.ico': 'image/x-icon',
}
ext = os.path.splitext(filename)[1]
content_type = content_types.get(ext, 'application/octet-stream')
web.header('Content-Type', content_type)
web.header('Content-Length', len(content))
return content
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
if '--demo' in sys.argv:
startup_demo = sys.argv[sys.argv.index('--demo')+1]
else:
startup_demo = None
app = web.application(urls, globals(), autoreload=True)
if __name__ == '__main__':
onering.register_wsgi_app("demo", app.wsgifunc())
onering.loop("demo")
| Python |
from distutils.core import setup
from glob import glob
import sys, os
from demo import __version__ as version
def gen_data_files(topdir):
for dirpath, dirnames, filenames in os.walk(topdir):
filenames = [x for x in filenames if not x.startswith('.')]
yield dirpath, [os.path.join(dirpath, fn) for fn in filenames]
dotdirs = [d for d in dirnames if d.startswith('.')]
for d in dotdirs:
dirnames.remove(d)
data_files = list(gen_data_files('templates')) + list(gen_data_files('static'))
data_files.append(('', ['qt.conf']))
if sys.platform != 'darwin':
# Mac OS X should use ./deploy_plugins to change plugins install name
data_files += list(gen_data_files('plugins'))
setup_kwargs = dict(
data_files = data_files,
)
if sys.platform == 'win32':
import py2exe
py2exe_options = dict(
compressed = True,
includes = ['mako.cache'],
dist_dir = 'demo-' + version,
)
py2exe_kwargs = dict(
windows = [
{'script': 'demo.py',
'icon_resources': [(1, 'static/onering.ico')],
},
],
zipfile = os.path.normcase('lib/site-packages.zip'),
options = {'py2exe': py2exe_options},
)
setup_kwargs.update(py2exe_kwargs)
if sys.platform == 'darwin':
import py2app
py2app_options = dict(
includes = ['mako.cache'],
iconfile = 'onering.icns',
)
py2app_kwargs = dict(
app = ['demo.py'],
setup_requires = ['py2app'],
options=dict(py2app=py2app_options),
)
setup_kwargs.update(py2app_kwargs)
setup(**setup_kwargs)
| Python |
#!/usr/bin/env python
# encoding: UTF-8
__author__ = "Qiangning Hong <hongqn@gmail.com>"
__version__ = "$Revision$"
__date__ = "$Date$"
import sys, os
import logging
import commands
import re
import shutil
from subprocess import check_call
logger = logging.getLogger()
def get_qt_dir():
onering = '/Library/Frameworks/OneRing.framework/OneRing'
cmd = "otool -L %s" % onering
outtext = commands.getoutput(cmd).splitlines()
for line in outtext:
m = re.match(r'\s+(.*)Qt\w+\.framework/Versions/', line)
if m:
qt_dir = m.group(1)
break
else:
raise Exception("wrong OneRing")
if qt_dir.endswith('/lib/'):
qt_dir = qt_dir[:-4]
logger.info("Qt dir: %s", qt_dir)
return qt_dir # binary package
def fix_plugin_install_name(dylib, qtlibdir, appbundle):
print qtlibdir
for line in commands.getoutput("otool -L %s" % dylib).splitlines()[2:]:
line = line.strip()
m = re.match(re.escape(qtlibdir)+r"((Qt\w+|phonon)\.framework/Versions/4/\w+)", line)
if m:
framework = m.group(2)
if not os.path.exists(os.path.join(
appbundle, 'Contents', 'Frameworks',
framework+'.framework')):
deploy_framework(framework, qtlibdir, appbundle)
old_id = m.group(0)
new_id = "@executable_path/../Frameworks/" + m.group(1)
logger.info("change id: %s -> %s", old_id, new_id)
check_call(['install_name_tool', '-change', old_id, new_id, dylib])
def fix_framework_install_name(binpath, framework, qtlibdir, appbundle):
new_id = '@executable_path/../Frameworks/%s.framework/Versions/4/%s' \
% (framework, framework)
logger.info("change id: %s", new_id)
check_call(['install_name_tool', '-id', new_id, binpath])
fix_plugin_install_name(binpath, qtlibdir, appbundle)
def deploy_framework(framework, qtlibdir, appbundle):
src = os.path.join(qtlibdir or '/Library/Frameworks',
framework+'.framework')
dst = os.path.join(appbundle, 'Contents', 'Frameworks',
framework+'.framework')
os.makedirs(os.path.join(dst, 'Versions', '4'))
logger.info("copy %s", framework+'.framework')
shutil.copy(os.path.join(src, 'Versions', '4', framework),
os.path.join(dst, 'Versions', '4', framework))
os.symlink(os.path.join('Versions', '4', framework),
os.path.join(dst, framework))
os.symlink('4', os.path.join(dst, 'Versions', 'Current'))
fix_framework_install_name(os.path.join(dst, 'Versions', '4', framework),
framework, qtlibdir, appbundle)
strip(os.path.join(dst, 'Versions', '4', framework))
def strip(path):
logger.info("strip %s", path)
check_call(['strip', '-x', path])
def deploy_plugin(plugin, srcdir, dstdir, qtlibdir, appbundle):
pdir, pfile = plugin.split('/')
if not os.path.exists(os.path.join(dstdir, pdir)):
os.makedirs(os.path.join(dstdir, pdir))
logger.info("copy %s", plugin)
shutil.copy(os.path.join(srcdir, plugin), os.path.join(dstdir, plugin))
strip(os.path.join(dstdir, plugin))
fix_plugin_install_name(os.path.join(dstdir, plugin), qtlibdir, appbundle)
def main(appbundle, plugins):
qtdir = get_qt_dir()
psrcdir = os.path.join(qtdir, 'plugins') if qtdir \
else '/Developer/Applications/Qt/plugins'
pdstdir = os.path.join(appbundle, 'Contents', 'plugins')
qtlibdir = qtdir and os.path.join(qtdir, 'lib/')
for plugin in plugins:
deploy_plugin(plugin, psrcdir, pdstdir, qtlibdir, appbundle)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage="%prog [options] appbundle plugin1...")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-q', '--quiet', action='store_true')
options, args = parser.parse_args()
logging.basicConfig(
level = options.quiet and logging.WARNING
or options.verbose and logging.DEBUG
or logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
appbundle, plugins = args[0], args[1:]
main(appbundle, plugins)
| Python |
'''
Created on Dec 27, 2010
@author: t-bone
'''
from django.shortcuts import render_to_response
from django.forms import forms, fields, widgets
from google.appengine.api import taskqueue
#from google.appengine.ext import db
from django.http import HttpResponseRedirect, HttpResponse
from google.appengine.api import memcache
from datastore import models
from google.appengine.ext.db import djangoforms
from django.forms.extras.widgets import SelectDateWidget
from miscellaneous import data_loader
from google.appengine.api import users
import time, datetime
LEFT_KIND = [
('Book', 'Book'),
('Commodity', 'Commodity')]
TOP_KIND = [('Delivery','Delivery')]
PRODUCT = [('Forward','Forward'),
('Option','Option')]
def entry(request,kind,obj_id = None):
if request.method == 'POST':
error = None
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
form = eval(kind + 'Form(data=request.POST,instance=db_obj)')
else:
form = eval(kind + 'Form(data=request.POST)')
form.set_choices()
if form.is_valid():
db_obj = form.save(commit=False)
db_obj.put()
obj_id = str(db_obj.key().id())
else:
error = form.errors
if error:
return render_to_response('entry.html',{'form' : form,
'obj_id' : obj_id,
'kind' : kind})
else:
return HttpResponseRedirect('/entry/' + kind)
else:
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
form = eval(kind + 'Form(instance = db_obj)')
form.set_choices()
else:
form = eval(kind + 'Form()')
form.set_choices()
return render_to_response('entry.html',{'form' : form,
'obj_id' : obj_id,
'kind' : kind})
def entry_list(request,kind = 'Holiday',obj_id = None):
if request.method == 'POST':
entry_button = request.POST.get('entry')
error = None
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
form = eval(kind + 'Form(data=request.POST,instance=db_obj)')
else:
form = eval(kind + 'Form(data=request.POST)')
form.set_choices()
if form.is_valid():
db_obj = form.save(commit=False)
db_obj.put()
obj_id = str(db_obj.key().id())
else:
error = form.errors
if error:
return render_to_response('entry_list.html',{'form' : form,
'obj_id' : obj_id,
'kind' : kind})
elif entry_button == 'Add':
return render_to_response('entry_list.html',{'form' : form,
'obj_id' : obj_id,
'kind' : kind,
'list' : db_obj.date})
else:
return HttpResponseRedirect('/entry/' + kind)
else:
list = []
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
form = eval(kind + 'Form(instance = db_obj)')
form.set_choices()
else:
form = eval(kind + 'Form()')
form.set_choices()
return render_to_response('entry_list.html',{'form' : form,
'obj_id' : obj_id,
'kind' : kind,
'list' : db_obj.date})
def trade_entry(request, derivative = 'Forward', basket = False, trade_id = None, underlying_id = None):
if request.method == 'POST':
entry_button = request.POST.get('entry')
if trade_id:
trade = models.Trade.get_by_id(int(trade_id))
form = TradeForm(data=request.POST,instance=trade)
else:
form = TradeForm(data=request.POST)
form.set_choices()
if form.is_valid():
trade = form.save(commit=False)
trade.put()
else:
form_error = form.errors
#Underlying
underlying = models.Underlying.all().filter('trade =',trade).get()
if basket:
if underlying and entry_button == 'Save':
underlying_form = UnderlyingForm(trade = trade,data=request.POST,instance=underlying)
else:
underlying_form = UnderlyingForm(trade = trade,data=request.POST)
else:
if underlying:
underlying_form = SingleUnderlyingForm(trade = trade,data=request.POST,instance=underlying)
else:
underlying_form = SingleUnderlyingForm(trade = trade,data=request.POST)
underlying_form.set_choices()
if underlying_form.is_valid():
underlying = underlying_form.save(commit=False)
underlying.put()
else:
underlying_form_error = underlying_form.errors
print underlying_form_error
#Delivery term of the trade
if underlying.delivery:
delivery_form = DeliveryForm(underlying = underlying,data=request.POST,instance=underlying.delivery)
else:
delivery_form = DeliveryForm(underlying = underlying,data=request.POST)
if delivery_form.is_valid():
delivery = delivery_form.save(commit=False)
delivery.put()
underlying.delivery = delivery
else:
print delivery_form.errors
#Saves the underlying key on the trade
if underlying_form.is_valid():
underlying.put()
trade.underlying.append(underlying.key())
#subobj_id = db_subobj.key().id()
if form.is_valid():
trade.put()
if not(trade_id):
trade_id = str(trade.key().id())
if entry_button != "Save":
request.method = 'GET'
return HttpResponseRedirect('/entry/Trade/' + derivative + '/Basket:' + trade_id)
else:
return HttpResponseRedirect('/entry/Trade/')
else:
delivery_form = None
underlying_form = None
underlying_list = []
if trade_id:
trade = models.Trade.get_by_id(int(trade_id))
form = TradeForm(instance = trade)
form.set_choices()
#subobj_id = trade.underlying.key().id()
#underlying_keys = trade.underlying
underlying_query = models.Underlying.all().filter('trade =',trade)
underlying_instance = underlying_query.get()
if underlying_instance:
if basket:
underlying_form = UnderlyingForm(initial={"commodity" : underlying_instance.commodity.name,
"delivery_point" : underlying_instance.delivery_point.name,
"weight" : underlying_instance.weight,
"uom" : underlying_instance.uom.name})
for underlying in underlying_query:
underlying_list.append([str(underlying.weight) + ': ' + underlying.commodity.name + ' @ ' + underlying.delivery_point.name,underlying.key().id()])
else:
underlying_form = SingleUnderlyingForm(initial={"commodity" : underlying_instance.commodity.name,
"delivery_point" : underlying_instance.delivery_point.name,
"uom" : underlying_instance.uom.name})
delivery = underlying_instance.delivery
else:
underlying_form = UnderlyingForm()
#underlying_list.append(['No underlying is set'])
delivery = None
underlying_form.set_choices()
if delivery:
#delivery_id = delivery.key().id()
delivery_form = DeliveryForm(initial={"date" : underlying_instance.delivery.date,
"quantity" : underlying_instance.delivery.quantity})
else:
delivery_form = DeliveryForm()
else:
form = ForwardForm(data=request.GET)
form.set_choices()
if basket:
underlying_form = UnderlyingForm()
else:
underlying_form = SingleUnderlyingForm()
underlying_form.set_choices()
delivery_form =DeliveryForm()
delivery_form.set_choices()
return render_to_response('trade.html',{'form' : form,
'obj_id' : trade_id,
'kind' : derivative,
'basket' : basket,
'list' : underlying_list,
'subform' : underlying_form,
'subsubform' : delivery_form})
def option_entry(request,obj_id = None,subkind = None,subobj_id = None):
if request.method == 'POST':
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
form = eval(kind + 'Form(data=request.POST,instance=db_obj)')
else:
form = eval(kind + 'Form(data=request.POST)')
form.set_choices()
if form.is_valid():
db_obj = form.save(commit=False)
db_obj.put()
else:
pass
subkind = form.reference()
if subkind:
if db_obj.underlying:
subform = eval(subkind + 'Form(trade = db_obj,data=request.POST,instance=db_obj.underlying)')
else:
subform = eval(subkind + 'Form(trade = db_obj,data=request.POST)')
subform.set_choices()
if subform.is_valid():
underlying = subform.save(commit=False)
underlying.put()
else:
print subform.errors()
subsubkind = subform.reference()
if subsubkind:
if underlying.delivery:
subsubform = eval(subsubkind + 'Form(underlying = underlying,data=request.POST,instance=underlying.delivery)')
else:
subsubform = eval(subsubkind + 'Form(underlying = underlying,data=request.POST)')
if subsubform.is_valid():
subsubdb_obj = subsubform.save(commit=False)
subsubdb_obj.put()
underlying.delivery = subsubdb_obj
else:
print subsubform.errors()
if subform.is_valid():
underlying.put()
db_obj.underlying = underlying
#subobj_id = db_subobj.key().id()
if form.is_valid():
db_obj.put()
if not(obj_id):
obj_id = str(db_obj.key().id())
entry_button = request.POST.get('entry')
if entry_button != "Save":
request.method = 'GET'
return HttpResponseRedirect('/entry/' + kind + ':' + obj_id + '/' + entry_button)
else:
return HttpResponseRedirect('/entry/' + kind)
else:
subsubform = None
subform = None
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
form = eval(kind + 'Form(instance = db_obj)')
form.set_choices()
subkind = form.reference()
if subkind:
subobj_id = eval('db_obj.' + subkind.lower() + '.key().id()')
subform = eval(subkind + 'Form(initial={"commodity" : db_obj.underlying.commodity.name})')
subform.set_choices()
subsubkind = subform.reference()
if subsubkind:
subsubdb_obj = eval('db_obj.' + subkind.lower() + '.'+ subsubkind.lower())
if subsubdb_obj:
subsubobj_id = subsubdb_obj.key().id()
#subsubdb_obj = eval('models.' + subsubkind + '.get_by_id(int(subsubobj_id))')
#db_obj.underlying = underlying
#db_obj.put()
subsubform = eval(subsubkind + 'Form(initial={"date" : db_obj.underlying.delivery.date,"quantity" : db_obj.underlying.delivery.quantity})')
else:
subsubform = eval(subsubkind + 'Form()')
else:
form = TradeForm(data=request.GET)
form.set_choices()
subkind = form.reference()
if subkind:
subform = eval(subkind + 'Form()')
subform.set_choices()
subsubkind = subform.reference()
if subsubkind:
subsubform = eval(subsubkind + 'Form()')
subsubform.set_choices()
return render_to_response('trade.html',{'form' : form,
'obj_id' : obj_id,
'kind' : 'Trade',
'subform' : subform,
'subsubform' : subsubform})
def delete(request,kind,obj_id = None):
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
db_obj.delete()
else:
pass
return HttpResponseRedirect('/entry/' + kind)
def underlying_delete(request, derivative = 'Forward', basket = False, trade_id = None, underlying_id = None):
if underlying_id:
underlying = models.Underlying.get_by_id(int(underlying_id))
underlying.trade.underlying.remove(underlying.key())
underlying.trade.put()
underlying.delete()
else:
pass
return HttpResponseRedirect('/entry/Trade/' + derivative + '/Basket:' + trade_id)
def list_delete(request, kind = 'Holiday', obj_id = None,attribute = None, year = None, month = None, day =None):
if year:
date = datetime.datetime(int(year),int(month),int(day))
if obj_id:
db_obj = eval('models.' + kind + '.get_by_id(int(obj_id))')
eval('db_obj.' + attribute + '.remove(date)')
db_obj.put()
else:
pass
return HttpResponseRedirect('/entry/' + kind + ':' + obj_id)
def list(request,kind):
db_obj = eval('models.' + kind + '.all()')
return render_to_response('list.html',{'list' : db_obj,'kind' : kind})
def imports(request):
if request.method == 'POST':
form = ImportForm(request.POST, request.FILES)
import_type = request.POST.get('import')
if import_type == 'Import Prices':
result = data_loader.price(request.FILES['file'])
elif import_type == 'Import Volatilities':
result = data_loader.volatility(request.FILES['file'])
elif import_type == 'Import Interest Rate':
result = data_loader.interest_rate(request.FILES['file'])
else:
result = False
if result:
done = 'Completed'
else:
done ='Failed'
else:
done = ''
form = ImportForm()
return render_to_response('import.html',{'form' : form,'done':done})
def import_prices(request):
result = data_loader.price(request.FILES['file'])
return HttpResponseRedirect('/')
def script(request):
#trade_query = models.Trade.all()
#derivative = models.Derivatives.all().filter("name =","Forward").get()
#for trade in trade_query:
#trade.derivative = derivative
#trade.put()
if request.method == 'POST':
#===========================================================================
# query = models.EndOfDay.all()
#
# for item in query:
# item.delete()
#
# query = models.Market.all()
#
# for item in query:
# item.delete()
#
# query = models.Price.all()
#
# for item in query:
# item.delete()
#
# query = models.Volatility.all()
#
# for item in query:
# item.delete()
#
# query = models.Delivery.all()
#
# for item in query:
# if not(item.underlying):
# item.delete()
#===========================================================================
query = models.Underlying.all()
for item in query:
item.delete()
query = models.Delivery.all()
for item in query:
item.delete()
done = 'Completed'
else:
done = ""
return render_to_response('script.html',{'done' : done})
def cube(request):
trade = models.Trade.all()
if trade.get():
if request.method == 'POST':
left_kind = request.POST.get('left_kind', '')
left_obj = eval('models.' + left_kind + ".all().order('name')")
left_ref = left_obj.get().reference()
top_kind = request.POST.get('top_kind','')
top_obj = eval('models.' + top_kind + ".all().order('date')")
top_ref = top_obj.get().reference()
top = [top_obj.get().date]
for deli in top_obj:
if deli.date != top[-1]:
top.append(deli.date)
left = [left.name for left in left_obj]
center = [[0 for element in top] for item in left]
#for item in left:
# center[counter_left].append(left[counter_left])
for trade in trades:
ix_top = eval('top.index(' + top_ref + '.' + top_kind.lower() + '.date)')
ix_left = eval('left.index(' + left_ref + '.' + left_kind.lower() + '.name)')
center[ix_left][ix_top] = center[ix_left][ix_top] + trade.underlying.delivery.quantity
form = CubeForm(request.POST)
else:
delivery = models.Delivery.all().order('date')
commodity = models.Commodity.all().order('name')
top = [delivery.get().date]
for deli in delivery:
if deli.date != top[-1]:
top.append(deli.date)
left = [com.name for com in commodity]
center = [[0 for element in top] for item in left]
#for item in left:
# center[counter_left].append(left[counter_left])
for tr in trade:
ix_top = top.index(tr.underlying.delivery.date)
ix_left = left.index(tr.underlying.commodity.name)
center[ix_left][ix_top] = center[ix_left][ix_top] + tr.underlying.delivery.quantity
form = CubeForm()
center_temp = [[item] for item in left]
for i in range(len(center)):
center_temp[i].extend(center[i])
center = center_temp
else:
center = None
left = None
form = CubeForm()
top = 'No trade are saved. Please create a trade before using this tool.'
return render_to_response('cube.html',{'center' : center,'left' : left, 'top' : top,'form' : form})
def welcome(request):
user = users.get_current_user
if user:
logout_url = users.create_logout_url("/")
return render_to_response("welcome.html",{'user':user,'sign_out':logout_url})
else:
return render_to_response("welcome.html")
def valuation(request,obj_id = None):
results = None
form = None
subform = None
subsubform = None
if obj_id:
db_obj = models.Trade.get_by_id(int(obj_id))
form = TradeForm(instance = db_obj)
form.set_choices()
subkind = form.reference()
if subkind:
subobj_id = eval('db_obj.' + subkind.lower() + '.key().id()')
subform = eval(subkind + 'Form(initial={"commodity" : db_obj.underlying.commodity.name,"delivery_point" : db_obj.underlying.delivery_point.name})')
subform.set_choices()
subsubkind = subform.reference()
if subsubkind:
subsubdb_obj = eval('db_obj.' + subkind.lower() + '.'+ subsubkind.lower())
if subsubdb_obj:
subsubobj_id = subsubdb_obj.key().id()
subsubform = eval(subsubkind + 'Form(initial={"date" : db_obj.underlying.delivery.date,"quantity" : db_obj.underlying.delivery.quantity})')
else:
subsubform = eval(subsubkind + 'Form()')
if request.method == 'POST':
trades = models.Trade.all()
if request.POST.get('commodity') != 'None':
commodity_instance = models.Commodity.all(keys_only=True).filter("name =",request.POST.get('commodity')).get()
underlying_query = models.Underlying.all(keys_only=True).filter("commodity =",commodity_instance)
trades.filter("underlying in", [x for x in underlying_query])
if request.POST.get('book') != 'None':
book_instance = models.Book.all(keys_only=True).filter("name =", request.POST.get('book')).get()
trades.filter("book =", book_instance)
if request.POST.get('derivative') != 'None':
derivative_instance = models.Derivatives.all(keys_only=True).filter("name =", request.POST.get('derivative')).get()
trades.filter("derivative =", derivative_instance)
search_form = SearchForm(request.POST)
search_form.set_choices()
market_form = MarketForm(request.POST)
market_form.set_choices()
if request.POST.get('valuation', '') == 'Evaluate':
eod = time.strptime(request.POST.get('eod', ''),"%Y-%m-%d")
eod = datetime.date(eod[0],eod[1],eod[2])
eod = models.EndOfDay.all(keys_only=True).filter("date =",eod).get()
delivery_instance = models.Delivery.all(keys_only=True).filter("date =",db_obj.underlying.delivery.date).filter("quantity =",1.0).get()
market = models.Market.all()
market.filter("delivery_point =",db_obj.underlying.delivery_point).filter('eod =',eod).filter("delivery =",delivery_instance)
db_obj.eval(market.get())
results = {'MTM' : db_obj.mtm,'Delta' : db_obj.delta}
return render_to_response("search.html",{'search_form' : search_form,
'form' : form,
'subform' : subform,
'subsubform' : subsubform,
'market_form' : market_form,
'list' : trades,
'obj_id' : obj_id,
'results': results})
else:
search_form = SearchForm()
search_form.set_choices()
market_form = MarketForm()
market_form.set_choices()
return render_to_response("search.html",{'search_form' : search_form,
'form' : form,
'subform' : subform,
'subsubform' : subsubform,
'market_form' : market_form,
'obj_id' : obj_id})
def market(request):
if request.method == 'POST':
eod = time.strptime(request.POST.get('eod', ''),"%Y-%m-%d")
eod = datetime.date(eod[0],eod[1],eod[2])
eod = models.EndOfDay.all().filter("date =",eod).get()
delivery_point = models.DeliveryPoint.all().filter("name =", request.POST.get('delivery_point', '')).get()
market = models.Market.all()
market.filter("eod =", eod)
market.filter("delivery_point =", delivery_point)
market_form = MarketForm(request.POST)
market_form.set_choices()
if request.POST.get('type', '')=='Price':
table = [['Delivery','Mid','Bid','Offer']]
for item in market:
table.append([item.delivery.date,item.price.mid,item.price.bid,item.price.offer])
elif request.POST.get('type', '')=='Volatility':
table = [['Delivery','Mid','Moneyness']]
for item in market:
table.append([item.delivery.date,item.volatility.mid,item.volatility.moneyness])
else:
table = []
pass
return render_to_response("market.html",{'form':market_form,'table':table})
else:
market_form = MarketForm()
market_form.set_choices()
return render_to_response("market.html",{'form':market_form})
class CommodityForm(djangoforms.ModelForm):
uom = fields.ChoiceField(label='Unit of Measure')
name = fields.CharField()
def __init__(self, *args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
if data:
super(CommodityForm, self).__init__(data=data)
elif self.instance:
initial = {'uom':self.instance.uom.name,
'name':self.instance.name}
super(CommodityForm, self).__init__(data=initial)
else:
super(CommodityForm, self).__init__(*args, **kwargs)
def set_choices(self):
uom = models.UnitOfMeasure.all().order('name')
self['uom'].field.choices = [(x.name,x.name) for x in uom]
def save(self, commit=True):
uom = models.UnitOfMeasure.all()
uom.filter("name =", self.cleaned_data['uom'])
if self.instance:
instance = self.instance
instance.uom = uom.get()
instance.name = self.cleaned_data['name']
else:
instance = models.Commodity(uom = uom.get(), name = self.cleaned_data['name'])
if commit:
instance.put()
return instance
def reference(self):
return None
class DeliveryForm(forms.Form):
date = fields.DateField(label = 'Delivery Date',widget=SelectDateWidget())
quantity = fields.FloatField(label = 'Quantity')
def __init__(self, *args, **kwargs):
self.underlying = kwargs.get('underlying',None)
data = kwargs.get('data',None)
self.instance =kwargs.get('instance',None)
if data:
super(DeliveryForm, self).__init__(data=data)
elif self.instance:
initial = {'date':self.instance.date,
'quantity':self.instance.quantity}
super(DeliveryForm, self).__init__(data=initial)
else:
super(DeliveryForm, self).__init__(*args, **kwargs)
def set_choices(self):
pass
def save(self, commit=True):
if self.instance:
instance = self.instance
instance.date = self.cleaned_data['date']
instance.quantity = self.cleaned_data['quantity']
instance.underlying = self.underlying
else:
instance = models.Delivery(underlying = self.underlying, date = self.cleaned_data['date'],quantity = self.cleaned_data['quantity'])
if commit:
instance.put()
return instance
class UnderlyingForm(forms.Form):
commodity = fields.ChoiceField(label='Commodity')
delivery_point = fields.ChoiceField(label='Delivery Point')
weight = fields.FloatField(label='Weight',initial=1.0,required=False)
weight.default = 1.0
uom = fields.ChoiceField(label='Unit of Measure')
def __init__(self, *args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
self.trade = kwargs.get('trade',None)
if data:
super(UnderlyingForm, self).__init__(data=data)
elif self.instance:
initial = {'commodity':self.instance.commodity.name,
'delivery_point':self.instance.delivery_point.name,
'weight':self.instance.weight,
'uom':self.instance.uom.name}
super(UnderlyingForm, self).__init__(data=initial)
else:
super(UnderlyingForm, self).__init__(*args, **kwargs)
def set_choices(self):
commodity = models.Commodity.all().order('name')
self['commodity'].field.choices = [(com.name,com.name) for com in commodity]
delivery_point = models.DeliveryPoint.all().order('name')
self['delivery_point'].field.choices = [(dp.name,dp.name) for dp in delivery_point]
uom = models.UnitOfMeasure.all().order('name')
self['uom'].field.choices = [(x.name,x.name) for x in uom]
def reference(self):
return 'Delivery'
def save(self, *args, **kwargs):
commodity = models.Commodity.all()
commodity.filter("name =", self.cleaned_data['commodity'])
delivery_point = models.DeliveryPoint.all()
delivery_point.filter("name =", self.cleaned_data['delivery_point'])
uom = models.UnitOfMeasure.all()
uom.filter("name =", self.cleaned_data['uom'])
if self.instance:
instance = self.instance
instance.commodity = commodity.get()
instance.delivery_point = delivery_point.get()
instance.trade = self.trade
instance.weight = self.cleaned_data['weight']
instance.uom = uom.get()
else:
instance = models.Underlying(trade = self.trade,
commodity = commodity.get(),
delivery_point = delivery_point.get(),
weight = self.cleaned_data['weight'],
uom = uom.get())
if kwargs.get('commit',True):
instance.put()
return instance
class SingleUnderlyingForm(UnderlyingForm):
def __init__(self,*args,**kwargs):
super(SingleUnderlyingForm, self).__init__(*args,**kwargs)
self.fields['weight'].widget = fields.HiddenInput()
#weight = self.fields['weight']
#weight.widget = 'HiddenInput'
#del self.fields['weight']
#===========================================================================
# def save(self, *args, **kwargs):
# instance = super(SingleUnderlyingForm, self).save(self, *args, **kwargs)
# instance.weight = 1.0
# if kwargs.get('commit',True):
# instance.put()
# return instance
#===========================================================================
class TradeForm(forms.Form):
trade_date = fields.DateField(label = 'Trade Date',initial = datetime.date.today(),required = False)
book = fields.ChoiceField(label='Book',required = False)
derivative = fields.ChoiceField(label='Class of Derivatives',required = False)
derivative.widget.attrs["onchange"]="changeForm(this)"
strike = fields.FloatField(label='Strike',required = False)
buy_sell = fields.ChoiceField(label='Buy or Sell',choices=[('Buy','Buy'),('Sell','Sell')],required = False)
call_put = fields.ChoiceField(label='Call or Put',choices=[('Call','Call'),('Put','Put')],required = False)
trade_price = fields.FloatField(label='Premium',required = False)
expiry_date = fields.DateField(label = 'Expiry Date',initial = datetime.date.today(),required = False)
#instance = None
def __init__(self,*args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
#print instance.date
if data:
super(TradeForm, self).__init__(data = data)
elif self.instance:
initial = {'trade_date':self.instance.date,
'book':self.instance.book.name,
'derivative':self.instance.derivative.name,
'strike':self.instance.strike,
'buy_sell':self.instance.buy_sell,
'call_put':self.instance.call_put,
'trade_price':self.instance.trade_price,
'expiry_date':self.instance.expiry}
#initial = instance2
#x = y
super(TradeForm, self).__init__(data = initial)
else:
super(TradeForm, self).__init__(*args)
def reference(self):
return 'Underlying'
def set_choices(self):
book = models.Book.all().order('name')
derivatives = models.Derivatives.all().order('name')
self['book'].field.choices = [(b.name,b.name) for b in book]
self['derivative'].field.choices = [(x.name,x.name) for x in derivatives]
def save(self, commit=True):
book = models.Book.all()
book.filter("name =", self.cleaned_data['book'])
derivatives = models.Derivatives.all()
derivatives.filter("name =", self.cleaned_data['derivative'])
if self.instance:
instance = self.instance
instance.date = self.cleaned_data['trade_date']
instance.book = book.get()
instance.derivative = derivatives.get()
instance.srike = self.cleaned_data['strike']
instance.buy_sell = self.cleaned_data['buy_sell']
instance.call_put = self.cleaned_data['call_put']
instance.trade_price = self.cleaned_data['trade_price']
instance.expiry = self.cleaned_data['expiry_date']
else:
instance = models.Trade(date = self.cleaned_data['trade_date'],
book = book.get(),
derivative = derivatives.get(),
strike = self.cleaned_data['strike'],
buy_sell = self.cleaned_data['buy_sell'],
call_put = self.cleaned_data['call_put'],
trade_price = self.cleaned_data['trade_price'],
expiry = self.cleaned_data['expiry_date'],
underlying = [])
if commit:
instance.put()
return instance
class ForwardForm(TradeForm):
def __init__(self,*args,**kwargs):
super(TradeForm, self).__init__(*args,**kwargs)
del self.fields['call_put']
self.fields['strike'].label = 'Forward Price'
class UnitOfMeasureForm(djangoforms.ModelForm):
class Meta:
model = models.UnitOfMeasure
def set_choices(self):
pass
def reference(self):
return None
class BookForm(djangoforms.ModelForm):
class Meta:
model = models.Book
exclude = ['parent_book','child_book']
def set_choices(self):
pass
def reference(self):
return None
class DerivativesForm(djangoforms.ModelForm):
class Meta:
model = models.Derivatives
def set_choices(self):
pass
def reference(self):
return None
class CalendarForm(forms.Form):
name = fields.CharField()
holiday = fields.MultipleChoiceField(widget=widgets.CheckboxSelectMultiple(),required=False)
weekend = fields.BooleanField(initial=True,required=False)
def __init__(self, *args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
if data:
super(CalendarForm, self).__init__(data=data)
elif self.instance:
initial = {'name':self.instance.name,
'weekend':self.instance.weekend,
'holiday':self.instance.holiday}
super(CalendarForm, self).__init__(data=initial)
else:
super(CalendarForm, self).__init__(*args, **kwargs)
def set_choices(self):
holiday = models.Holiday.all().order('name')
self['holiday'].field.choices = [(x.key(),x.name) for x in holiday]
def reference(self):
pass
def save(self, commit=True):
holiday = [models.Holiday.get(key).key() for key in self.cleaned_data['holiday']]
if self.instance:
instance = self.instance
instance.name = self.cleaned_data['name']
instance.weekend = (self.cleaned_data['weekend'])
instance.holiday = holiday
else:
instance = models.Calendar(name = self.cleaned_data['name'],
weekend = self.cleaned_data['weekend'],
holiday = holiday)
if commit:
instance.put()
return instance
class ProfileForm(forms.Form):
name = fields.CharField()
granularity = fields.ChoiceField()
field = (fields.FloatField(label='one'),
fields.CharField(label='two'))
shape_factor = fields.MultiValueField(fields=field)
def __init__(self, *args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
if data:
super(ProfileForm, self).__init__(data=data)
elif self.instance:
initial = {'name':self.instance.name,
'weekend':self.instance.weekend,
'holiday':self.instance.holiday}
super(ProfileForm, self).__init__(data=initial)
else:
super(ProfileForm, self).__init__(*args, **kwargs)
def set_choices(self):
choices=[('yearly','yearly'),('monthly','monthly'),('hourly','hourly')]
self['granularity'].field.choices = choices
def reference(self):
pass
def save(self, commit=True):
if self.instance:
instance = self.instance
instance.name = self.cleaned_data['name']
instance.granularity = (self.cleaned_data['granularity'])
else:
instance = models.Profile(name = self.cleaned_data['name'],
granularity = self.cleaned_data['granularity'])
if commit:
instance.put()
return instance
class HolidayForm(forms.Form):
name = fields.CharField()
date = fields.DateTimeField(initial = datetime.date.today())
def __init__(self, *args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
if data:
super(HolidayForm, self).__init__(data=data)
elif self.instance:
initial = {'name':self.instance.name,
'date':self.instance.date[-1]}
super(HolidayForm, self).__init__(data=initial)
else:
super(HolidayForm, self).__init__(*args, **kwargs)
def set_choices(self):
pass
def reference(self):
pass
def save(self, commit=True):
if self.instance:
instance = self.instance
instance.name = self.cleaned_data['name']
instance.date.append(self.cleaned_data['date'])
else:
instance = models.Holiday(name = self.cleaned_data['name'],
date = [self.cleaned_data['date']])
if commit:
instance.put()
return instance
class DeliveryPointForm(forms.Form):
name = fields.CharField()
nickname = fields.CharField()
commodity = fields.ChoiceField(label='Commodity')
def __init__(self, *args, **kwargs):
data = kwargs.get('data',None)
self.instance = kwargs.get('instance',None)
if data:
super(DeliveryPointForm, self).__init__(data=data)
elif self.instance:
initial = {'commodity':self.instance.commodity.name,
'name':self.instance.name,
'nickname':self.instance.nickname}
super(DeliveryPointForm, self).__init__(data=initial)
else:
super(DeliveryPointForm, self).__init__(*args, **kwargs)
def set_choices(self):
commodity = models.Commodity.all().order('name')
self['commodity'].field.choices = [(com.name,com.name) for com in commodity]
def reference(self):
pass
def save(self, commit=True):
commodity = models.Commodity.all()
commodity.filter("name =", self.cleaned_data['commodity'])
if self.instance:
instance = self.instance
instance.commodity = commodity.get()
instance.name = self.cleaned_data['name']
instance.nickname = self.cleaned_data['nickname']
else:
instance = models.DeliveryPoint(commodity = commodity.get(),
name = self.cleaned_data['name'],
nickname = self.cleaned_data['nickname'])
if commit:
instance.put()
return instance
class CubeForm(forms.Form):
left_kind = fields.ChoiceField(choices=LEFT_KIND,label = 'Left')
top_kind = fields.ChoiceField(choices=TOP_KIND,label = 'Top')
class SearchForm(forms.Form):
commodity = fields.ChoiceField(label = 'Commodity')
book = fields.ChoiceField(label = 'Book')
derivative = fields.ChoiceField(label = 'Class of Derivatives')
def set_choices(self):
commodity = models.Commodity.all().order('name')
commodity_list = [(None,'---')]
commodity_list.extend([(com.name,com.name) for com in commodity])
self['commodity'].field.choices = commodity_list
book = models.Book.all().order('name')
book_list = [(None,'---')]
book_list.extend([(b.name,b.name) for b in book])
self['book'].field.choices = book_list
derivative = models.Derivatives.all().order('name')
derivative_list = [(None,'---')]
derivative_list.extend([(b.name,b.name) for b in derivative])
self['derivative'].field.choices = derivative_list
class MarketForm(forms.Form):
eod = fields.ChoiceField(label='End-of-Day')
delivery_point = fields.ChoiceField(label='Delivery Point')
type = fields.ChoiceField(label='Type')
def set_choices(self):
delivery_point = models.DeliveryPoint.all().order('name')
eod = models.EndOfDay.all().order('-date')
self['delivery_point'].field.choices = [(x.name,x.name) for x in delivery_point]
self['eod'].field.choices = [(x.date,x.date) for x in eod]
self['type'].field.choices = [('Price','Price'),('Volatility','Volatility'),('Interest Rate','Interest Rate')]
class ImportForm(forms.Form):
file = forms.FileField() | Python |
'''
Created on Nov 30, 2010
@author: t-bone
'''
#from pricing.option import kirk95
#print kirk95('c',[5,4],1,.5,.02,[.35,.35],.85)
x = [1]
if x:
print True
else:
print False | Python |
'''
Created on Dec 18, 2010
@author: t-bone
'''
from google.appengine.ext import db
from pricing import option
import datetime
class UnitOfMeasure(db.Model):
name = db.StringProperty()
type = db.StringProperty(choices=set(['Energy','Volume']))
joules_con_factor = db.FloatProperty(verbose_name='Conversion factor to joule')
class Commodity(db.Model):
name = db.StringProperty()
uom = db.ReferenceProperty(UnitOfMeasure,verbose_name='Unit of Measure',collection_name='commodity_uom')
def reference(self):
return 'trade.underlying'
class Holiday(db.Model):
name = db.StringProperty()
date = db.ListProperty(datetime.datetime)
class Calendar(db.Model):
name = db.StringProperty()
holiday = db.ListProperty(db.Key)
weekend = db.BooleanProperty()
class Profile(db.Model):
name = db.StringProperty()
granularity = db.StringProperty()
shape_factor = db.ListProperty(float)
class Delivery(db.Model):
name = db.StringProperty()
first_date = db.DateProperty()
last_date = db.DateProperty()
calendar = db.ReferenceProperty(collection_name='delivery_cal')
profile = db.ReferenceProperty(collection_name='delivery_profile')
def reference(self):
return 'trade.underlying'
class Underlying(db.Model):
commodity = db.ReferenceProperty(Commodity,collection_name='underlyings_com')
delivery_point = db.ReferenceProperty(collection_name='underlyings_dp')
delivery = db.ReferenceProperty(Delivery,collection_name='underlyings')
quantity = db.FloatProperty()
trade = db.ReferenceProperty(collection_name='underlyings_trade')
weight = db.FloatProperty()
uom = db.ReferenceProperty(UnitOfMeasure,collection_name='underlyings_uom')
class Book(db.Model):
name = db.StringProperty()
parent_book = db.SelfReferenceProperty(collection_name='child_books')
child_book = db.SelfReferenceProperty(collection_name='parent_books')
def reference(self):
return 'trade'
class Trade(db.Model):
book = db.ReferenceProperty()
date = db.DateProperty()
derivative = db.ReferenceProperty(collection_name='trade_derivative')
strike = db.FloatProperty()
buy_sell = db.StringProperty(verbose_name='Buy or Sell',choices=set(["Buy", "Sell"]))
call_put = db.StringProperty(verbose_name='Call or Put',choices=set(["Call", "Put"]))
trade_price = db.FloatProperty()
expiry = db.DateProperty()
underlying = db.ListProperty(db.Key)
#uom = db.ReferenceProperty(UnitOfMeasure,collection_name='trades')
def eval(self,market):
time_to_expiry = (self.expiry - market.eod.date)
time_to_expiry = float(time_to_expiry.days)/360
if self.buy_sell == 'Buy':
buy_sell = 1
else:
buy_sell = -1
if self.derivative.name == 'Forward':
self.MTM = (market.price.mid - self.strike) * self.underlying.delivery.volume * buy_sell
elif self.derivative.name == 'Option':
mtm = option.black76(self.call_put,
market.price.mid,
self.strike,
time_to_expiry,
#market.irate.constant_maturity,
0.0,
market.volatility.mid)
delta = option.black76_delta(self.call_put,
market.price.mid,
self.strike,
time_to_expiry,
#market.irate.constant_maturity,
0.0,
market.volatility.mid)
self.mtm = mtm * self.underlying.delivery.volume * buy_sell
self.delta = delta * self.underlying.delivery.volume * buy_sell
#=======================================================================
else:
self.MTM = self.derivative.name + ' is an unknown valuation model.'
class DeliveryPoint(db.Model):
name = db.StringProperty(db.Model)
nickname = db.StringProperty(db.Model)
commodity = db.ReferenceProperty(Commodity)
class Market(db.Model):
eod = db.ReferenceProperty(collection_name='market_eod')
price = db.ReferenceProperty(collection_name='market_prices')
volatility = db.ReferenceProperty(collection_name='market_volatilities')
delivery_point = db.ReferenceProperty(DeliveryPoint,collection_name='markets')
delivery = db.ReferenceProperty(Delivery,collection_name='market_delivery')
class Price(db.Model):
mid = db.FloatProperty()
bid = db.FloatProperty()
offer = db.FloatProperty()
class Volatility(db.Model):
historical = db.FloatProperty()
mid = db.FloatProperty()
bid = db.FloatProperty()
offer = db.FloatProperty()
moneyness = db.FloatProperty()
class Analytic(db.Model):
date = db.ReferenceProperty(collection_name='analytics_eod')
trade = db.ReferenceProperty(Trade,collection_name='analytics')
MTM = db.FloatProperty()
class EndOfDay(db.Model):
date = db.DateProperty()
class Derivatives(db.Model):
name = db.StringProperty()
algorythm = db.StringProperty()
class Exchanges(db.Model):
name = db.StringProperty()
class Products(db.Model):
name = db.StringProperty
exchange = db.ReferenceProperty(collection_name='products_ex')
code = db.StringProperty()
size = db.FloatProperty()
uom = db.ReferenceProperty(UnitOfMeasure,collection_name='products_uom')
currency = db.ReferenceProperty(collection_name='products_currency')
payment_date = db.ReferenceProperty(collection_name='products_payment')
settlement = db.StringProperty() | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
'''
Created on Nov 20, 2010
@author: t-bone
'''
import logging, os, sys
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Remove the standard version of Django.
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
# Force sys.path to have our own directory first, in case we want to import
# from it.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Force Django to reload its settings.
#from django.conf import settings
#settings._target = None
# Must set this env var before importing any part of Django
# 'project' is the name of the project created with django-admin.py
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
def log_exception(*args, **kwds):
logging.exception('Exception in request:')
# Log errors.
django.dispatch.dispatcher.Signal.connect(
django.core.signals.got_request_exception, log_exception)
# Unregister the rollback event handler.
django.dispatch.dispatcher.Signal.disconnect(
django.core.signals.got_request_exception,
django.db._rollback_on_exception)
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
'''
Created on Nov 29, 2010
@author: t-bone
'''
import math
def pdf(x, mu=0, sigma=1):
u = (x-mu)/abs(sigma)
y = (1/(math.sqrt(2*math.pi)*abs(sigma)))*math.exp(-u*u/2)
return y
def erf(x):
a = 0.140012
if x == 0:
y = 0
else:
y = x/abs(x) * math.sqrt(1 - math.exp(-x**2 * (4/math.pi+a*x**2)/(1+a*x**2)))
return y
def erfc(x):
y = 1.0 - erf(x)
return y
def cdf(x, mu=0.0, sigma=1.0):
t = x-mu
y = 0.5 * (erfc(-t/( sigma * math.sqrt(2.0) )))
if y>1.0:
y = 1.0
return y
| Python |
'''
Created on Mar 7, 2011
@author: t-bone
'''
import math
def percentile(N, percent, key=lambda x:x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1 | Python |
from django.conf.urls.defaults import *
from web.views import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^entry/(\w*)/delete/(\d*)', delete),
#(r'^entry/(\w*):(\d*)/(\w*):(\d*)', entry),
#(r'^entry/(\w*):(\d*)/(\w*)', subentry),
(r'^entry/Trade/(\w*)/(\w*):(\d*)/delete:(\d*)', underlying_delete),
(r'^entry/(\w*):(\d*)/(\w*)/delete:(\d*)-(\d*)-(\d*)', list_delete),
(r'^entry/Trade/(\w*)/(\w*):(\d*)', trade_entry),
(r'^entry/Trade:(?P<trade_id>\d*)', trade_entry),
(r'^entry/Trade/(?P<derivative>\w*):(?P<trade_id>\d*)', trade_entry),
(r'^entry/Trade:(?P<trade_id>\d*)', trade_entry),
(r'^entry/Holiday:(?P<obj_id>\d*)', entry_list),
#(r'^entry/Option:(\d*)', option_entry),
(r'^entry/(\w*):(\d*)', entry),
(r'^entry/(\w*)', list),
(r'^import/prices/',import_prices),
(r'^import',imports),
(r'^script',script),
(r'^cube',cube),
(r'^valuation:(\d*)',valuation),
(r'^valuation',valuation),
(r'^market',market),
(r'', welcome),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
# (r'^(?i)trade', trade_entry),
)
| Python |
'''
Created on Nov 29, 2010
@author: t-bone
'''
import math
from stats.normal import cdf
# Black Merton Scholes Function
def bms(call_put,S,X,T,r,v,q):
d1 = (math.log(S/X)+(r+v*v/2.)*T)/(v*math.sqrt(T))
d2 = d1-v*math.sqrt(T)
if call_put=='c':
return S*math.exp(-q*T)*cdf(d1)-X*math.exp(-r*T)*cdf(d2)
else:
return X*math.exp(-r*T)*cdf(-d2)-S*cdf(-d1)
def black76(call_put,S,X,T,r,v):
q = r
return bms(call_put,S,X,T,r,v,q)
def bms_delta(call_put,S,X,T,r,v,q):
d1 = (math.log(S/X)+(r+v*v/2.0)*T)/(v*math.sqrt(T))
if call_put=='c':
return cdf(d1)
else:
return cdf(-d1)
def black76_delta(call_put,S,X,T,r,v):
d1 = math.exp(-r*T)*(math.log(S/X)+(v*v/2.0)*T)/(v*math.sqrt(T))
if call_put=='c':
return cdf(d1)
else:
return cdf(-d1)
def kirk95(call_put,F,K,T,r,vol,corr):
v = math.sqrt( vol[0]**2
-2*F[1]/(F[1]+K)*corr*vol[0]*vol[1]
+(F[1]/(F[1]+K))**2*vol[1]*2
)
d1 = (math.log(F[0]/(F[1]+K))+(v*v/2.0)*T)/(v*math.sqrt(T))
d2 = d1-v*math.sqrt(T)
if call_put=='c':
return math.exp(-r*T)*(F[0]*cdf(d1)-(F[1]+K)*cdf(d2))
else:
return math.exp(-r*T)*((F[1]+K)*cdf(d2)-F[0]*cdf(d1)) | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.