code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom channels in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: customchannels.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
ad_client_id = 'INSERT_AD_CLIENT_ID_HERE'
try:
# Retrieve custom channel list in pages and display data as we receive it.
request = service.customchannels().list(adClientId=ad_client_id,
maxResults=MAX_PAGE_SIZE)
while ( request != None ):
result = request.execute()
custom_channels = result['items']
for custom_channel in custom_channels:
print ('Custom channel with code "%s" and name "%s" was found. '
% (custom_channel['code'], custom_channel['name']))
request = service.customchannels().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example retrieves a report for the specified ad client.
Please only use pagination if your application requires it due to memory or
storage constraints.
If you need to retrieve more than 5000 rows, please check generate_report.py, as
due to current limitations you will not be able to use paging for large reports.
To get ad clients, run get_all_ad_clients.py.
Tags: reports.generate
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# This is the maximum number of obtainable rows for paged reports.
ROW_LIMIT = 5000
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
ad_client_id = 'INSERT_AD_CLIENT_ID_HERE'
try:
# Retrieve report in pages and display data as we receive it.
start_index = 0
rows_to_obtain = MAX_PAGE_SIZE
while True:
result = service.reports().generate(
startDate='2011-01-01', endDate='2011-08-31',
filter=['AD_CLIENT_ID==' + ad_client_id],
metric=['PAGE_VIEWS', 'AD_REQUESTS', 'AD_REQUESTS_COVERAGE',
'CLICKS', 'AD_REQUESTS_CTR', 'COST_PER_CLICK',
'AD_REQUESTS_RPM', 'EARNINGS'],
dimension=['DATE'],
sort=['+DATE'],
startIndex=start_index,
maxResults=rows_to_obtain).execute()
# If this is the first page, display the headers.
if start_index == 0:
for header in result['headers']:
print '%25s' % header['name'],
print
# Display results for this page.
for row in result['rows']:
for column in row:
print '%25s' % column,
print
start_index += len(result['rows'])
# Check to see if we're going to go above the limit and get as many
# results as we can.
if start_index + MAX_PAGE_SIZE > ROW_LIMIT:
rows_to_obtain = ROW_LIMIT - start_index
if rows_to_obtain <= 0:
break
if (start_index >= int(result['totalMatchedRows'])):
break
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all URL channels in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: urlchannels.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
ad_client_id = 'INSERT_AD_CLIENT_ID_HERE'
try:
# Retrieve URL channel list in pages and display data as we receive it.
request = service.urlchannels().list(adClientId=ad_client_id,
maxResults=MAX_PAGE_SIZE)
while ( request != None ):
result = request.execute()
custom_channels = result['items']
url_channels = result['items']
for url_channel in url_channels:
print ('URL channel with URL pattern "%s" was found.'
% url_channel['urlPattern'])
request = service.customchannels().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad clients for an account.
Tags: adclients.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve ad client list in pages and display data as we receive it.
request = service.adclients().list(maxResults=MAX_PAGE_SIZE)
while ( request != None ):
result = request.execute()
ad_clients = result['items']
for ad_client in ad_clients:
print ('Ad client for product "%s" with ID "%s" was found. '
% (ad_client['productCode'], ad_client['id']))
print ('\tSupports reporting: %s' %
(ad_client['supportsReporting'] and 'Yes' or 'No'))
request = service.adclients().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auxiliary file for AdSense Management API code samples.
Handles various tasks to do with logging, authentication and initialization.
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import logging
import os
import sys
from apiclient.discovery import build
import gflags
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/adsense.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for applications.
# Run this program with the '--help' argument to see all the flags that it
# understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def process_flags(argv):
"""Uses the command-line flags to set the logging level."""
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def prepare_credentials():
"""Handles auth. Reuses credentialss if available or runs the auth flow."""
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('adsense.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
return credentials
def retrieve_service(http):
"""Retrieves an AdSense Management API service via the discovery service."""
# Construct a service object via the discovery service.
service = build("adsense", "v1", http=http)
return service
def initialize_service():
"""Builds instance of service from discovery data and does auth."""
# Create an httplib2.Http object to handle our HTTP requests.
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = prepare_credentials()
http = credentials.authorize(http)
# Retrieve service.
return retrieve_service(http)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: adunits.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
ad_client_id = 'INSERT_AD_CLIENT_ID_HERE'
try:
# Retrieve ad unit list in pages and display data as we receive it.
request = service.adunits().list(adClientId=ad_client_id,
maxResults=MAX_PAGE_SIZE)
while ( request != None ):
result = request.execute()
ad_units = result['items']
for ad_unit in ad_units:
print ('Ad unit with code "%s", name "%s" and status "%s" was found. ' %
(ad_unit['code'], ad_unit['name'], ad_unit['status']))
request = service.adunits().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line example for Moderator.
Command-line application that exercises the Google Moderator API.
Usage:
$ python moderator.py
You can also get help on all the command-line flags the program understands
by running:
$ python moderator.py --help
To get detailed log output run:
$ python moderator.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/moderator',
user_agent='moderator-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('moderator.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("moderator", "v1", http=http)
try:
# Create a new Moderator series.
series_body = {
"description": "Share and rank tips for eating healthy and cheap!",
"name": "Eating Healthy & Cheap",
"videoSubmissionAllowed": False
}
series = service.series().insert(body=series_body).execute()
print "Created a new series"
# Create a new Moderator topic in that series.
topic_body = {
"description": "Share your ideas on eating healthy!",
"name": "Ideas",
"presenter": "liz"
}
topic = service.topics().insert(seriesId=series['id']['seriesId'],
body=topic_body).execute()
print "Created a new topic"
# Create a new Submission in that topic.
submission_body = {
"attachmentUrl": "http://www.youtube.com/watch?v=1a1wyc5Xxpg",
"attribution": {
"displayName": "Bashan",
"location": "Bainbridge Island, WA"
},
"text": "Charlie Ayers @ Google"
}
submission = service.submissions().insert(seriesId=topic['id']['seriesId'],
topicId=topic['id']['topicId'], body=submission_body).execute()
print "Inserted a new submisson on the topic"
# Vote on that newly added Submission.
vote_body = {
"vote": "PLUS"
}
service.votes().insert(seriesId=topic['id']['seriesId'],
submissionId=submission['id']['submissionId'],
body=vote_body)
print "Voted on the submission"
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
FLOW = OAuth2WebServerFlow(
client_id='2ad565600216d25d9cde',
client_secret='03b56df2949a520be6049ff98b89813f17b467dc',
scope='read',
user_agent='oauth2client-sample/1.0',
auth_uri='https://api.dailymotion.com/oauth/authorize',
token_uri='https://api.dailymotion.com/oauth/token'
)
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
callback = self.request.relative_url('/auth_return')
authorize_url = FLOW.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(FLOW))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
resp, content = http.request('https://api.dailymotion.com/me')
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
variables = {
'content': content,
'logout': logout
}
self.response.out.write(template.render(path, variables))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference command-line example for Google Analytics Management API v3.
This application demonstrates how to use the python client library to access
all the pieces of data returned by the Google Analytics Management API v3.
The application manages autorization by saving an OAuth2.0 token in a local
file and reusing the token for subsequent requests. It then traverses the
Google Analytics Management hiearchy. It first retrieves and prints all the
authorized user's accounts, next it prints all the web properties for the
first account, then all the profiles for the first web property and finally
all the goals for the first profile. The sample then prints all the
user's advanced segments.
To read an indepth discussion on how this file works, check out the Management
API Python Getting Started guide here:
http://code.google.com/apis/analytics/docs/mgmt/v3/mgmtPython.html
Usage:
Before you begin, you should register your application as an installed
application to get your own Project / OAUth2 Client ID / Secret:
https://code.google.com/apis/console
Learn more about registering your Analytics Application here:
http://code.google.com/apis/analytics/docs/mgmt/v3/mgmtPython.html#authorize
$ python analytics.py
Also you can also get help on all the command-line flags the program
understands by running:
$ python analytics.py --help
"""
__author__ = 'api.nickm@ (Nick Mihailovski)'
import sys
from apiclient.discovery import build
from apiclient.errors import HttpError
import gflags
import httplib2
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Remember to get your own client_id / client_secret in the
# Google API developer console: https://code.google.com/apis/console
FLOW = OAuth2WebServerFlow(
client_id='INSERT_YOUR_CLIENT_ID_HERE',
client_secret='INSERT_YOUR_CLIENT_SECRET_HERE',
scope='https://www.googleapis.com/auth/analytics.readonly',
user_agent='analytics-api-v3-awesomeness')
TOKEN_FILE_NAME = 'analytics.dat'
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Manage re-using tokens.
storage = Storage(TOKEN_FILE_NAME)
credentials = storage.get()
if not credentials or credentials.invalid:
# Get a new token.
credentials = run(FLOW, storage)
# Build an authorized service object.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('analytics', 'v3', http=http)
# Traverse the Management hiearchy and print results.
try:
traverse_hiearchy(service)
except HttpError, error:
print ('Arg, there was an API error : %s %s : %s' %
(error.resp.status, error.resp.reason, error._get_reason()))
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
def traverse_hiearchy(service):
"""Traverses the management API hiearchy and prints results.
This retrieves and prints the authorized user's accounts. It then
retrieves and prints all the web properties for the first account,
retrieves and prints all the profiles for the first web property,
and retrieves and prints all the goals for the first profile.
Args:
service: The service object built by the Google API Python client library.
Raises:
HttpError: If an error occured when accessing the API.
AccessTokenRefreshError: If the current token was invalid.
"""
view = View()
accounts = service.management().accounts().list().execute()
view.print_accounts(accounts)
if accounts.get('items'):
firstAccountId = accounts.get('items')[0].get('id')
webproperties = service.management().webproperties().list(
accountId=firstAccountId).execute()
view.print_webproperties(webproperties)
if webproperties.get('items'):
firstWebpropertyId = webproperties.get('items')[0].get('id')
profiles = service.management().profiles().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId).execute()
view.print_profiles(profiles)
if profiles.get('items'):
firstProfileId = profiles.get('items')[0].get('id')
goals = service.management().goals().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId,
profileId=firstProfileId).execute()
view.print_goals(goals)
view.print_segments(service.management().segments().list().execute())
class View(object):
"""Utility class to print various Management API collections."""
def print_accounts(self, accounts_list):
"""Prints all the account info in the Accounts Collection."""
print '------ Account Collection -------'
self.print_pagination_info(accounts_list)
print
for account in accounts_list.get('items'):
print 'Account ID = %s' % account.get('id')
print 'Kind = %s' % account.get('kind')
print 'Self Link = %s' % account.get('selfLink')
print 'Account Name = %s' % account.get('name')
print 'Created = %s' % account.get('created')
print 'Updated = %s' % account.get('updated')
child_link = account.get('childLink')
print 'Child link href = %s' % child_link.get('href')
print 'Child link type = %s' % child_link.get('type')
print
def print_webproperties(self, webproperties_list):
"""Prints all the web property info in the WebProperties Collection."""
print '------ Web Properties Collection -------'
self.print_pagination_info(webproperties_list)
print
for webproperty in webproperties_list.get('items'):
print 'Kind = %s' % webproperty.get('kind')
print 'Account ID = %s' % webproperty.get('accountId')
print 'Web Property ID = %s' % webproperty.get('id')
print ('Internal Web Property ID = %s' %
webproperty.get('internalWebPropertyId'))
print 'Website URL = %s' % webproperty.get('websiteUrl')
print 'Created = %s' % webproperty.get('created')
print 'Updated = %s' % webproperty.get('updated')
print 'Self Link = %s' % webproperty.get('selfLink')
parent_link = webproperty.get('parentLink')
print 'Parent link href = %s' % parent_link.get('href')
print 'Parent link type = %s' % parent_link.get('type')
child_link = webproperty.get('childLink')
print 'Child link href = %s' % child_link.get('href')
print 'Child link type = %s' % child_link.get('type')
print
def print_profiles(self, profiles_list):
"""Prints all the profile info in the Profiles Collection."""
print '------ Profiles Collection -------'
self.print_pagination_info(profiles_list)
print
for profile in profiles_list.get('items'):
print 'Kind = %s' % profile.get('kind')
print 'Account ID = %s' % profile.get('accountId')
print 'Web Property ID = %s' % profile.get('webPropertyId')
print ('Internal Web Property ID = %s' %
profile.get('internalWebPropertyId'))
print 'Profile ID = %s' % profile.get('id')
print 'Profile Name = %s' % profile.get('name')
print 'Currency = %s' % profile.get('currency')
print 'Timezone = %s' % profile.get('timezone')
print 'Default Page = %s' % profile.get('defaultPage')
print ('Exclude Query Parameters = %s' %
profile.get('excludeQueryParameters'))
print ('Site Search Category Parameters = %s' %
profile.get('siteSearchCategoryParameters'))
print ('Site Search Query Parameters = %s' %
profile.get('siteSearchQueryParameters'))
print 'Created = %s' % profile.get('created')
print 'Updated = %s' % profile.get('updated')
print 'Self Link = %s' % profile.get('selfLink')
parent_link = profile.get('parentLink')
print 'Parent link href = %s' % parent_link.get('href')
print 'Parent link type = %s' % parent_link.get('type')
child_link = profile.get('childLink')
print 'Child link href = %s' % child_link.get('href')
print 'Child link type = %s' % child_link.get('type')
print
def print_goals(self, goals_list):
"""Prints all the goal info in the Goals Collection."""
print '------ Goals Collection -------'
self.print_pagination_info(goals_list)
print
for goal in goals_list.get('items'):
print 'Goal ID = %s' % goal.get('id')
print 'Kind = %s' % goal.get('kind')
print 'Self Link = %s' % goal.get('selfLink')
print 'Account ID = %s' % goal.get('accountId')
print 'Web Property ID = %s' % goal.get('webPropertyId')
print ('Internal Web Property ID = %s' %
goal.get('internalWebPropertyId'))
print 'Profile ID = %s' % goal.get('profileId')
print 'Goal Name = %s' % goal.get('name')
print 'Goal Value = %s' % goal.get('value')
print 'Goal Active = %s' % goal.get('active')
print 'Goal Type = %s' % goal.get('type')
print 'Created = %s' % goal.get('created')
print 'Updated = %s' % goal.get('updated')
parent_link = goal.get('parentLink')
print 'Parent link href = %s' % parent_link.get('href')
print 'Parent link type = %s' % parent_link.get('type')
# Print the goal details depending on the type of goal.
if goal.get('urlDestinationDetails'):
self.print_url_destination_goal_details(
goal.get('urlDestinationDetails'))
elif goal.get('visitTimeOnSiteDetails'):
self.print_visit_time_on_site_goal_details(
goal.get('visitTimeOnSiteDetails'))
elif goal.get('visitNumPagesDetails'):
self.print_visit_num_pages_goal_details(
goal.get('visitNumPagesDetails'))
elif goal.get('eventDetails'):
self.print_event_goal_details(goal.get('eventDetails'))
print
def print_url_destination_goal_details(self, goal_details):
"""Prints all the URL Destination goal type info."""
print '------ Url Destination Goal -------'
print 'Goal URL = %s' % goal_details.get('url')
print 'Case Sensitive = %s' % goal_details.get('caseSensitive')
print 'Match Type = %s' % goal_details.get('matchType')
print 'First Step Required = %s' % goal_details.get('firstStepRequired')
print '------ Url Destination Goal Steps -------'
if goal_details.get('steps'):
for goal_step in goal_details.get('steps'):
print 'Step Number = %s' % goal_step.get('number')
print 'Step Name = %s' % goal_step.get('name')
print 'Step URL = %s' % goal_step.get('url')
else:
print 'No Steps Configured'
def print_visit_time_on_site_goal_details(self, goal_details):
"""Prints all the Visit Time On Site goal type info."""
print '------ Visit Time On Site Goal -------'
print 'Comparison Type = %s' % goal_details.get('comparisonType')
print 'comparison Value = %s' % goal_details.get('comparisonValue')
def print_visit_num_pages_goal_details(self, goal_details):
"""Prints all the Visit Num Pages goal type info."""
print '------ Visit Num Pages Goal -------'
print 'Comparison Type = %s' % goal_details.get('comparisonType')
print 'comparison Value = %s' % goal_details.get('comparisonValue')
def print_event_goal_details(self, goal_details):
"""Prints all the Event goal type info."""
print '------ Event Goal -------'
print 'Use Event Value = %s' % goal_details.get('useEventValue')
for event_condition in goal_details.get('eventConditions'):
event_type = event_condition.get('type')
print 'Type = %s' % event_type
if event_type in ('CATEGORY', 'ACTION', 'LABEL'):
print 'Match Type = %s' % event_condition.get('matchType')
print 'Expression = %s' % event_condition.get('expression')
else: # VALUE type.
print 'Comparison Type = %s' % event_condition.get('comparisonType')
print 'Comparison Value = %s' % event_condition.get('comparisonValue')
def print_segments(self, segments_list):
"""Prints all the segment info in the Segments Collection."""
print '------ Segments Collection -------'
self.print_pagination_info(segments_list)
print
for segment in segments_list.get('items'):
print 'Segment ID = %s' % segment.get('id')
print 'Kind = %s' % segment.get('kind')
print 'Self Link = %s' % segment.get('selfLink')
print 'Name = %s' % segment.get('name')
print 'Definition = %s' % segment.get('definition')
print 'Created = %s' % segment.get('created')
print 'Updated = %s' % segment.get('updated')
print
def print_pagination_info(self, mgmt_list):
"""Prints common pagination details."""
print 'Items per page = %s' % mgmt_list.get('itemsPerPage')
print 'Total Results = %s' % mgmt_list.get('totalResults')
print 'Start Index = %s' % mgmt_list.get('startIndex')
# These only have values if other result pages exist.
if mgmt_list.get('previousLink'):
print 'Previous Link = %s' % mgmt_list.get('previousLink')
if mgmt_list.get('nextLink'):
print 'Next Link = %s' % mgmt_list.get('nextLink')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line example for Translate.
Command-line application that translates some text.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
def main():
# Build a service object for interacting with the API. Visit
# the Google APIs Console <http://code.google.com/apis/console>
# to get an API key for your own application.
service = build('translate', 'v2',
developerKey='AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0')
print service.translations().list(
source='en',
target='fr',
q=['flower', 'car']
).execute()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Latitude.
Command-line application that sets the users
current location.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import httplib2
import pickle
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
from apiclient.ext.authtools import run
from apiclient.ext.file import Storage
# Uncomment to get detailed logging
# httplib2.debuglevel = 4
def main():
storage = Storage('latitude.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
auth_discovery = build("latitude", "v1").auth_discovery()
flow = FlowThreeLegged(auth_discovery,
# You MUST have a consumer key and secret tied to a
# registered domain to use the latitude API.
#
# https://www.google.com/accounts/ManageDomains
consumer_key='REGISTERED DOMAIN NAME',
consumer_secret='KEY GIVEN DURING REGISTRATION',
user_agent='google-api-client-python-latitude/1.0',
domain='REGISTERED DOMAIN NAME',
scope='https://www.googleapis.com/auth/latitude',
xoauth_displayname='Google API Latitude Example',
location='current',
granularity='city'
)
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("latitude", "v1", http=http)
body = {
"data": {
"kind": "latitude#location",
"latitude": 37.420352,
"longitude": -122.083389,
"accuracy": 130,
"altitude": 35
}
}
print service.currentLocation().insert(body=body).execute()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Starting template for Google App Engine applications.
Use this project as a starting point if you are just beginning to build a
Google App Engine project which will access and manage data held under a role
account for the App Engine app. More information about using Google App Engine
apps to call Google APIs can be found in Scenario 1 of the following document:
<https://sites.google.com/site/oauthgoog/Home/google-oauth2-assertion-flow>
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client.appengine import AppAssertionCredentials
credentials = AppAssertionCredentials(
scope='https://www.googleapis.com/auth/urlshortener')
http = credentials.authorize(httplib2.Http(memcache))
service = build("urlshortener", "v1", http=http)
class MainHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
shortened = service.url().list().execute()
short_and_long = [(item["id"], item["longUrl"]) for item in
shortened["items"]]
variables = {
'short_and_long': short_and_long,
}
self.response.out.write(template.render(path, variables))
def post(self):
long_url = self.request.get("longUrl")
shortened = service.url().insert(body={"longUrl": long_url}).execute()
self.redirect("/")
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for Audit API.
Command-line application that retrieves events through the Audit API.
This works only for Google Apps for Business, Education, and ISP accounts.
It can not be used for the basic Google Apps product.
Usage:
$ python audit.py
You can also get help on all the command-line flags the program understands
by running:
$ python audit.py --help
To get detailed log output run:
$ python audit.py --logging_level=DEBUG
"""
__author__ = 'rahulpaul@google.com (Rahul Paul)'
import gflags
import httplib2
import logging
import re
import simplejson
import sys
from apiclient.discovery import build
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# When creating credentials for this application be sure to choose an
# Application type of 'Installed application'.
FLOW = OAuth2WebServerFlow(
client_id='880851855448.apps.googleusercontent.com',
client_secret='d8nBjlNBpOMH_LITqz31IMdI',
scope='https://www.googleapis.com/auth/apps/reporting/audit.readonly',
user_agent='audit-cmdline-sample/1.0')
# The flags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def print_activities(activity_list):
events = activity_list['items']
print '\nRetrieved %d activities.' % len(events)
for i in range(len(events)):
print '\nEvent %d : %s' % (i, simplejson.JSONEncoder().encode(events[i]))
print '\nNext URL : %s' % (activity_list['next'])
print '======================================================================'
def main(argv):
# Let the flags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('audit.dat')
credentials = storage.get()
if not credentials or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('audit', 'v1', http=http)
try:
activities = service.activities()
# Retrieve the first two activities
print 'Retrieving the first 2 activities...'
activity_list = activities.list(
applicationId='207535951991', customerId='C01rv1wm7', maxResults='2',
actorEmail='admin@enterprise-audit-clientlib.com').execute()
print_activities(activity_list)
# Now retrieve the next 2 events
match = re.search('(?<=continuationToken=).+$', activity_list['next'])
if match is not None:
next_token = match.group(0)
print '\nRetrieving the next 2 activities...'
activity_list = activities.list(
applicationId='207535951991', customerId='C01rv1wm7',
maxResults='2', actorEmail='admin@enterprise-audit-clientlib.com',
continuationToken=next_token).execute()
print_activities(activity_list)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Google+ API.
Command-line application that retrieves the users latest content and
then adds a new entry.
Usage:
$ python plus.py
You can also get help on all the command-line flags the program understands
by running:
$ python plus.py --help
To get detailed log output run:
$ python plus.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import os
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/plus.me',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('plus.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("plus", "v1", http=http)
try:
person = service.people().get(userId='me').execute(http)
print "Got your ID: %s" % person['displayName']
print
print "%-040s -> %s" % ("[Activitity ID]", "[Content]")
# Don't execute the request until we reach the paging loop below
request = service.activities().list(
userId=person['id'], collection='public')
# Loop over every activity and print the ID and a short snippet of content.
while ( request != None ):
activities_doc = request.execute()
for item in activities_doc.get('items', []):
print '%-040s -> %s' % (item['id'], item['object']['content'][:30])
request = service.activities().list_next(request, activities_doc)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
# version: v1.2
# scope: https://www.googleapis.com/auth/prediction
# title: Simple command-line sample for the Google Prediction API
# description: Command-line application that trains on some data. This sample does the same thing as the Hello Prediction! example.
# Name of Google Storage bucket/object that contains the training data
OBJECT_NAME = "apiclient-prediction-sample/prediction_models/languages"
# Start training on a data set
train = service.training()
start = train.insert(data=OBJECT_NAME, body={}).execute()
print 'Started training'
pprint.pprint(start)
import time
# Wait for the training to complete
while True:
status = train.get(data=OBJECT_NAME).execute()
pprint.pprint(status)
if 'RUNNING' != status['trainingStatus']:
break
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'csvInstance': ["mucho bueno"]}}
prediction = service.predict(body=body, data=OBJECT_NAME).execute()
print 'The prediction is:'
pprint.pprint(prediction)
| Python |
# version: v1
# title: Command-line sample for the Google URL Shortener API.
# scope: https://www.googleapis.com/auth/urlshortener
# description: Simple command-line example for Google URL Shortener API that shortens a URI then expands it.
url = service.url()
# Create a shortened URL by inserting the URL into the url collection.
body = {"longUrl": "http://code.google.com/apis/urlshortener/" }
resp = url.insert(body=body).execute()
pprint.pprint(resp)
short_url = resp['id']
# Convert the shortened URL back into a long URL
resp = url.get(shortUrl=short_url).execute()
pprint.pprint(resp)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""$title
$description
Usage:
$$ python $name.py
You can also get help on all the command-line flags the program understands
by running:
$$ python $name.py --help
To get detailed log output run:
$$ python $name.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='$scope',
user_agent='$name-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('$name.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("$name", "$version", http=http)
try:
$content
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
# version: v1
# scope: https://www.googleapis.com/auth/moderator
# title: Simple command-line example for Moderator.
# description: Command-line application that exercises the Google Moderator API.
# Create a new Moderator series.
series_body = {
"description": "Share and rank tips for eating healthy and cheap!",
"name": "Eating Healthy & Cheap",
"videoSubmissionAllowed": False
}
series = service.series().insert(body=series_body).execute()
print "Created a new series"
# Create a new Moderator topic in that series.
topic_body = {
"description": "Share your ideas on eating healthy!",
"name": "Ideas",
"presenter": "liz"
}
topic = service.topics().insert(seriesId=series['id']['seriesId'],
body=topic_body).execute()
print "Created a new topic"
# Create a new Submission in that topic.
submission_body = {
"attachmentUrl": "http://www.youtube.com/watch?v=1a1wyc5Xxpg",
"attribution": {
"displayName": "Bashan",
"location": "Bainbridge Island, WA"
},
"text": "Charlie Ayers @ Google"
}
submission = service.submissions().insert(seriesId=topic['id']['seriesId'],
topicId=topic['id']['topicId'], body=submission_body).execute()
print "Inserted a new submisson on the topic"
# Vote on that newly added Submission.
vote_body = {
"vote": "PLUS"
}
service.votes().insert(seriesId=topic['id']['seriesId'],
submissionId=submission['id']['submissionId'],
body=vote_body)
print "Voted on the submission"
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
# Portions copyright PSF License
# http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
"""A pm-action hook for setting timezone.
Uses the Google Latitude API and the geonames.org
API to find your cellphones latitude and longitude
and from the determine the timezone you are in,
and then sets the computer's timezone to that.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import httplib2
import os
import pickle
import pprint
import subprocess
import sys
import time
import uritemplate
from apiclient.anyjson import simplejson
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
from apiclient.ext.authtools import run
from apiclient.ext.file import Storage
# Uncomment to get detailed logging
# httplib2.debuglevel = 4
# URI Template to convert latitude and longitude into a timezone
GEONAMES = 'http://api.geonames.org/timezoneJSON?lat={lat}&lng={long}&username=jcgregorio'
PID_FILE = '/var/lock/tznever.pid'
CACHE = '/var/local/tznever/.cache'
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def main():
storage = Storage('/var/local/tznever/latitude_credentials.dat')
credentials = storage.get()
if len(sys.argv) == 1:
if credentials is None or credentials.invalid == True:
auth_discovery = build('latitude', 'v1').auth_discovery()
flow = FlowThreeLegged(auth_discovery,
consumer_key='m-buzz.appspot.com',
consumer_secret='NQEHb4eU6GkjjFGe1MD5W6IC',
user_agent='tz-never/1.0',
domain='m-buzz.appspot.com',
scope='https://www.googleapis.com/auth/latitude',
xoauth_displayname='TZ Never Again',
location='current',
granularity='city'
)
credentials = run(flow, storage)
else:
print "You are already authorized"
else:
if credentials is None or credentials.invalid == True:
print "This app, tznever, is not authorized. Run from the command-line to re-authorize."
os.exit(1)
if len(sys.argv) > 1 and sys.argv[1] in ['hibernate', 'suspend']:
print "Hibernating"
# Kill off the possibly still running process by its pid
if os.path.isfile(PID_FILE):
f = file(PID_FILE, 'r')
pid = f.read()
f.close()
cmdline = ['/bin/kill', '-2', pid]
subprocess.Popen(cmdline)
os.unlink(PID_FILE)
elif len(sys.argv) > 1 and sys.argv[1] in ['thaw', 'resume']:
print "Resuming"
# write our pid out
f = file(PID_FILE, 'w')
f.write(str(os.getpid()))
f.close()
success = False
first_time = True
while not success:
try:
if not first_time:
time.sleep(5)
else:
first_time = False
print "Daemonizing so as not to gum up the works."
createDaemon()
# rewrite the PID file with our new PID
f = file(PID_FILE, 'w')
f.write(str(os.getpid()))
f.close()
http = httplib2.Http(CACHE)
http = credentials.authorize(http)
service = build('latitude', 'v1', http=http)
location = service.currentLocation().get(granularity='city').execute()
position = {
'lat': str(location['latitude']),
'long': str(location['longitude'])
}
http2 = httplib2.Http(CACHE)
resp, content = http2.request(uritemplate.expand(GEONAMES, position))
geodata = simplejson.loads(content)
tz = geodata['timezoneId']
f = file('/etc/timezone', 'w')
f.write(tz)
f.close()
cmdline = 'dpkg-reconfigure -f noninteractive tzdata'.split(' ')
subprocess.Popen(cmdline)
success = True
except httplib2.ServerNotFoundError, e:
print "still not connected, sleeping"
except KeyboardInterrupt, e:
if os.path.isfile(PID_FILE):
os.unlink(PID_FILE)
success = True
# clean up pid file
if os.path.isfile(PID_FILE):
os.unlink(PID_FILE)
def createDaemon():
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return(0)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for Blogger.
Command-line application that retrieves the users blogs and posts.
Usage:
$ python blogger.py
You can also get help on all the command-line flags the program understands
by running:
$ python blogger.py --help
To get detailed log output run:
$ python blogger.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
import os
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/blogger',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('blogger.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("blogger", "v2", http=http)
try:
users = service.users()
# Retrieve this user's profile information
thisuser = users.get(userId="self").execute(http)
print "This user's display name is: %s" % thisuser['displayName']
# Retrieve the list of Blogs this user has write privileges on
thisusersblogs = users.blogs().list(userId="self").execute()
for blog in thisusersblogs['items']:
print "The blog named \"%s\" is at: %s" % (blog['name'], blog['url'])
posts = service.posts()
# List the posts for each blog this user has
for blog in thisusersblogs['items']:
print "The posts for %s:" % blog['name']
request = posts.list(blogId=blog['id'])
while request != None:
posts_doc = request.execute(http)
if 'items' in posts_doc and not (posts_doc['items'] is None):
for post in posts_doc['items']:
print " %s (%s)" % (post['title'], post['url'])
request = posts.list_next(request, posts_doc)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a histogram of the top 15 brand distribution for a search
query.
Histograms are created by using the "Facets" functionality of the API. A
Facet is a view of a certain property of products, containing a number of
buckets, one for each value of that property. Or concretely, for a parameter
such as "brand" of a product, the facets would include a facet for brand,
which would contain a number of buckets, one for each brand returned in the
result.
A bucket contains either a value and a count, or a value and a range. In the
simple case of a value and a count for our example of the "brand" property,
the value would be the brand name, eg "sony" and the count would be the
number of results in the search.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US', q=u'digital camera',
facets_include='brand:15', facets_enabled=True)
response = request.execute()
# Pick the first and only facet for this query
facet = response['facets'][0]
print '\n\tHistogram for "%s":\n' % facet['property']
labels = []
values = []
for bucket in facet['buckets']:
labels.append(bucket['value'].rjust(20))
values.append(bucket['count'])
weighting = 50.0 / max(values)
for label, value in zip(labels, values):
print label, '#' * int(weighting * value), '(%s)' % value
print
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
'''Simple command-line example for The Google Search
API for Shopping.
Command-line application that does a search for products.
'''
__author__ = 'aherrman@google.com (Andy Herrman)'
from apiclient.discovery import build
# Uncomment the next line to get very detailed logging
# httplib2.debuglevel = 4
def main():
p = build('shopping', 'v1',
developerKey='AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0')
# Search over all public offers:
print 'Searching all public offers.'
res = p.products().list(
country='US',
source='public',
q='android t-shirt'
).execute()
print_items(res['items'])
# Search over a specific merchant's offers:
print
print 'Searching Google Store.'
res = p.products().list(
country='US',
source='public',
q='android t-shirt',
restrictBy='accountId:5968952',
).execute()
print_items(res['items'])
# Remember the Google Id of the last product
googleId = res['items'][0]['product']['googleId']
# Get data for the single public offer:
print
print 'Getting data for offer %s' % googleId
res = p.products().get(
source='public',
accountId='5968952',
productIdType='gid',
productId=googleId
).execute()
print_item(res)
def print_item(item):
"""Displays a single item: title, merchant, link."""
product = item['product']
print '- %s [%s] (%s)' % (product['title'],
product['author']['name'],
product['link'])
def print_items(items):
"""Displays a number of items."""
for item in items:
print_item(item)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Full text search query against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera".
This is achieved by using the q query parameter to the list method.
The "|" operator can be used to search for alternative search terms, for
example: q = 'banana|apple' will search for bananas or apples.
Search phrases such as those containing spaces can be specified by
surrounding them with double quotes, for example q='"mp3 player"'. This can
be useful when combining with the "|" operator such as q = '"mp3
player"|ipod'.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# Note the 'q' parameter, which will contain the value of the search query
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Basic query against the public shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products available in the
United States.
Note: The source and country arguments are required to pass to the list
method.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query that is restricted by a parameter against the public shopping search
API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera", that are created by "Canon" available in the
United States.
The "restrictBy" parameter controls which types of results are returned.
Multiple values for a single restrictBy can be separated by the "|" operator,
so to look for all products created by Canon, Sony, or Apple:
restrictBy = 'brand:canon|sony|apple'
Multiple restricting parameters should be separated by a comma, so for
products created by Sony with the word "32GB" in the title:
restrictBy = 'brand:sony,title:32GB'
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US',
restrictBy='brand:canon', q='Digital Camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Queries with paginated results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a the entire paginated feed of public products in the United
States.
Pagination is controlled with the "startIndex" parameter passed to the list
method of the resource.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The first request contains the information we need for the total items, and
# page size, as well as returning the first page of results.
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
itemsPerPage = response['itemsPerPage']
totalItems = response['totalItems']
for i in range(1, totalItems, itemsPerPage):
answer = raw_input('About to display results from %s to %s, y/(n)? ' %
(i, i + itemsPerPage))
if answer.strip().lower().startswith('n'):
# Stop if the user has had enough
break
else:
# Fetch this series of results
request = resource.list(source='public', country='US',
q=u'digital camera', startIndex=i)
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with grouping against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' and grouped by the 8 top brands.
The list method of the resource should be called with the "crowdBy"
parameter. Each parameter should be designed as <attribute>:<occurence>,
where <occurrence> is the number of that <attribute> that will be used. For
example, to crowd by the 5 top brands, the parameter would be "brand:5". The
possible rules for crowding are currently:
account_id:<occurrence> (eg account_id:5)
brand:<occurrence> (eg brand:5)
condition:<occurrence> (eg condition:3)
gtin:<occurrence> (eg gtin:10)
price:<occurrence> (eg price:10)
Multiple crowding rules should be specified by separating them with a comma,
for example to crowd by the top 5 brands and then condition of those items,
the parameter should be crowdBy="brand:5,condition:3"
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The crowdBy parameter to the list method causes the results to be grouped,
# in this case by the top 8 brands.
request = resource.list(source='public', country='US', q=u'digital camera',
crowdBy='brand:8')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' ranked by ascending price.
The list method for the resource should be called with the "rankBy"
parameter. 5 parameters to rankBy are currently supported by the API. They
are:
"relevancy"
"modificationTime:ascending"
"modificationTime:descending"
"price:ascending"
"price:descending"
These parameters can be combined
The default ranking is "relevancy" if the rankBy parameter is omitted.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The rankBy parameter to the list method causes results to be ranked, in
# this case by ascending price.
request = resource.list(source='public', country='US', q=u'digital camera',
rankBy='price:ascending')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sample application for Python documentation of APIs.
This is running live at http://api-python-client-doc.appspot.com where it
provides a list of APIs and PyDoc documentation for all the generated API
surfaces as they appear in the google-api-python-client. In addition it also
provides a Google Gadget.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import inspect
import os
import pydoc
import re
from apiclient.anyjson import simplejson
from apiclient import discovery
from apiclient.errors import HttpError
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
DISCOVERY_URI = 'https://www.googleapis.com/discovery/v1/apis?preferred=true'
def get_directory_doc():
http = httplib2.Http(memcache)
ip = os.environ.get('REMOTE_ADDR', None)
uri = DISCOVERY_URI
if ip:
uri += ('&userIp=' + ip)
resp, content = http.request(uri)
directory = simplejson.loads(content)['items']
return directory
class MainHandler(webapp.RequestHandler):
"""Handles serving the main landing page.
"""
def get(self):
directory = get_directory_doc()
for item in directory:
item['title'] = item.get('title', item.get('description', ''))
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
class GadgetHandler(webapp.RequestHandler):
"""Handles serving the Google Gadget.
"""
def get(self):
directory = get_directory_doc()
for item in directory:
item['title'] = item.get('title', item.get('description', ''))
path = os.path.join(os.path.dirname(__file__), 'gadget.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
self.response.headers.add_header('Content-Type', 'application/xml')
def _render(resource):
"""Use pydoc helpers on an instance to generate the help documentation.
"""
obj, name = pydoc.resolve(type(resource))
return pydoc.html.page(
pydoc.describe(obj), pydoc.html.document(obj, name))
class ResourceHandler(webapp.RequestHandler):
"""Handles serving the PyDoc for a given collection.
"""
def get(self, service_name, version, collection):
http = httplib2.Http(memcache)
try:
resource = discovery.build(service_name, version, http=http)
except:
return self.error(404)
# descend the object path
if collection:
try:
path = collection.split('/')
if path:
for method in path:
resource = getattr(resource, method)()
except:
return self.error(404)
page = _render(resource)
collections = []
for name in dir(resource):
if not "_" in name and callable(getattr(resource, name)) and hasattr(
getattr(resource, name), '__is_resource__'):
collections.append(name)
if collection is None:
collection_path = ''
else:
collection_path = collection + '/'
for name in collections:
page = re.sub('strong>(%s)<' % name,
r'strong><a href="/%s/%s/%s">\1</a><' % (
service_name, version, collection_path + name), page)
# TODO(jcgregorio) breadcrumbs
# TODO(jcgregorio) sample code?
page = re.sub('<p>', r'<a href="/">Home</a><p>', page, 1)
self.response.out.write(page)
def main():
application = webapp.WSGIApplication(
[
(r'/', MainHandler),
(r'/_gadget/', GadgetHandler),
(r'/([^\/]*)/([^\/]*)(?:/(.*))?', ResourceHandler),
],
debug=False)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample for the Group Settings API demonstrates get and update method.
Usage:
$ python groupsettings.py
You can also get help on all the command-line flags the program understands
by running:
$ python groupsettings.py --help
"""
__author__ = 'Shraddha Gupta <shraddhag@google.com>'
from optparse import OptionParser
import os
import pprint
import sys
from apiclient.discovery import build
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
def access_settings(service, groupId, settings):
"""Retrieves a group's settings and updates the access permissions to it.
Args:
service: object service for the Group Settings API.
groupId: string identifier of the group@domain.
settings: dictionary key-value pairs of properties of group.
"""
# Get the resource 'group' from the set of resources of the API.
# The Group Settings API has only one resource 'group'.
group = service.groups()
# Retrieve the group properties
g = group.get(groupUniqueId=groupId).execute()
print '\nGroup properties for group %s\n' % g['name']
pprint.pprint(g)
# If dictionary is empty, return without updating the properties.
if not settings.keys():
print '\nGive access parameters to update group access permissions\n'
return
body = {}
# Settings might contain null value for some keys(properties).
# Extract the properties with values and add to dictionary body.
for key in settings.iterkeys():
if settings[key] is not None:
body[key] = settings[key]
# Update the properties of group
g1 = group.update(groupUniqueId=groupId, body=body).execute()
print '\nUpdated Access Permissions to the group\n'
pprint.pprint(g1)
def main(argv):
"""Demos the setting of the access properties by the Groups Settings API."""
usage = 'usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('--groupId',
help='Group email address')
parser.add_option('--whoCanInvite',
help='Possible values: ALL_MANAGERS_CAN_INVITE, '
'ALL_MEMBERS_CAN_INVITE')
parser.add_option('--whoCanJoin',
help='Possible values: ALL_IN_DOMAIN_CAN_JOIN, '
'ANYONE_CAN_JOIN, CAN_REQUEST_TO_JOIN, '
'CAN_REQUEST_TO_JOIN')
parser.add_option('--whoCanPostMessage',
help='Possible values: ALL_IN_DOMAIN_CAN_POST, '
'ALL_MANAGERS_CAN_POST, ALL_MEMBERS_CAN_POST, '
'ANYONE_CAN_POST, NONE_CAN_POST')
parser.add_option('--whoCanViewGroup',
help='Possible values: ALL_IN_DOMAIN_CAN_VIEW, '
'ALL_MANAGERS_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, '
'ANYONE_CAN_VIEW')
parser.add_option('--whoCanViewMembership',
help='Possible values: ALL_IN_DOMAIN_CAN_VIEW, '
'ALL_MANAGERS_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, '
'ANYONE_CAN_VIEW')
(options, args) = parser.parse_args()
if options.groupId is None:
print 'Give the groupId for the group'
parser.print_help()
return
settings = {}
if (options.whoCanInvite or options.whoCanJoin or options.whoCanPostMessage
or options.whoCanPostMessage or options.whoCanViewMembership) is None:
print 'No access parameters given in input to update access permissions'
parser.print_help()
else:
settings = {'whoCanInvite': options.whoCanInvite,
'whoCanJoin': options.whoCanJoin,
'whoCanPostMessage': options.whoCanPostMessage,
'whoCanViewGroup': options.whoCanViewGroup,
'whoCanViewMembership': options.whoCanViewMembership}
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/apps.groups.settings',
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage('groupsettings.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
print 'invalid credentials'
# Save the credentials in storage to be used in subsequent runs.
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('groupssettings', 'v1', http=http)
access_settings(service=service, groupId=options.groupId, settings=settings)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line sample for the Google URL Shortener API.
Simple command-line example for Google URL Shortener API that shortens
a URI then expands it.
Usage:
$ python urlshortener.py
You can also get help on all the command-line flags the program understands
by running:
$ python urlshortener.py --help
To get detailed log output run:
$ python urlshortener.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='[[CLIENT ID GOES HERE]]',
client_secret='[[CLIENT SECRET GOES HERE]]',
scope='https://www.googleapis.com/auth/urlshortener',
user_agent='urlshortener-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('urlshortener.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("urlshortener", "v1", http=http)
try:
url = service.url()
# Create a shortened URL by inserting the URL into the url collection.
body = {"longUrl": "http://code.google.com/apis/urlshortener/" }
resp = url.insert(body=body).execute()
pprint.pprint(resp)
short_url = resp['id']
# Convert the shortened URL back into a long URL
resp = url.get(shortUrl=short_url).execute()
pprint.pprint(resp)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Starting template for Google App Engine applications.
Use this project as a starting point if you are just beginning to build a Google
App Engine project. Remember to download the OAuth 2.0 client secrets which can
be obtained from the Developer Console <https://code.google.com/apis/console/>
and save them as 'client_secrets.json' in the project directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import oauth2decorator_from_clientsecrets
from oauth2client.client import AccessTokenRefreshError
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
<h1>Warning: Please configure OAuth 2.0</h1>
<p>
To make this sample run you will need to populate the client_secrets.json file
found at:
</p>
<p>
<code>%s</code>.
</p>
<p>with information found on the <a
href="https://code.google.com/apis/console">APIs Console</a>.
</p>
""" % CLIENT_SECRETS
http = httplib2.Http(memcache)
service = build("plus", "v1", http=http)
decorator = oauth2decorator_from_clientsecrets(
CLIENT_SECRETS,
'https://www.googleapis.com/auth/plus.me',
MISSING_CLIENT_SECRETS_MESSAGE)
class MainHandler(webapp.RequestHandler):
@decorator.oauth_aware
def get(self):
path = os.path.join(os.path.dirname(__file__), 'grant.html')
variables = {
'url': decorator.authorize_url(),
'has_credentials': decorator.has_credentials()
}
self.response.out.write(template.render(path, variables))
class AboutHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
try:
http = decorator.http()
user = service.people().get(userId='me').execute(http)
text = 'Hello, %s!' % user['displayName']
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
self.response.out.write(template.render(path, {'text': text }))
except AccessTokenRefreshError:
self.redirect('/')
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/about', AboutHandler),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Google Prediction API
Command-line application that trains on your input data. This sample does
the same thing as the Hello Prediction! example. You might want to run
the setup.sh script to load the sample data to Google Storage.
Usage:
$ python prediction_language_id.py --model_id="foo"
--data_file="bucket/object"
You can also get help on all the command-line flags the program understands
by running:
$ python prediction_language_id.py --help
To get detailed log output run:
$ python prediction_language_id.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build_from_document
import apiclient.errors
import gflags
import httplib2
import logging
import os
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/prediction',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_string('model_id',
None,
'The unique name for the predictive model (ex foo)')
gflags.DEFINE_string('data_file',
None,
'Full Google Storage path of csv data (ex bucket/object)')
gflags.MarkFlagAsRequired('model_id')
gflags.MarkFlagAsRequired('data_file')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('prediction.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("prediction", "v1.4", http=http)
try:
# Start training on a data set
train = service.trainedmodels()
body = {'id': FLAGS.model_id, 'storageDataLocation': FLAGS.data_file}
start = train.insert(body=body).execute()
print 'Started training'
pprint.pprint(start)
import time
# Wait for the training to complete
while True:
try:
# We check the training job is completed. If it is not it will return
# an error code.
status = train.get(id=FLAGS.model_id).execute()
# Job has completed.
pprint.pprint(status)
break
except apiclient.errors.HttpError as error:
# Training job not yet completed.
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'csvInstance': ["mucho bueno"]}}
prediction = train.predict(body=body, id=FLAGS.model_id).execute()
print 'The prediction is:'
pprint.pprint(prediction)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Google Prediction API
Command-line application that trains on your input data. This sample does
the same thing as the Hello Prediction! example. You might want to run
the setup.sh script to load the sample data to Google Storage.
Usage:
$ python prediction.py --object_name="bucket/object"
You can also get help on all the command-line flags the program understands
by running:
$ python prediction.py --help
To get detailed log output run:
$ python prediction.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import apiclient.errors
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/prediction',
user_agent='prediction-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_string('object_name',
None,
'Full Google Storage path of csv data (ex bucket/object)')
gflags.MarkFlagAsRequired('object_name')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('prediction.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("prediction", "v1.3", http=http)
try:
# Start training on a data set
train = service.training()
body = {'id': FLAGS.object_name}
start = train.insert(body=body).execute()
print 'Started training'
pprint.pprint(start)
import time
# Wait for the training to complete
while True:
try:
# We check the training job is completed. If it is not it will return an error code.
status = train.get(data=FLAGS.object_name).execute()
# Job has completed.
pprint.pprint(status)
break
except apiclient.errors.HttpError as error:
# Training job not yet completed.
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'csvInstance': ["mucho bueno"]}}
prediction = train.predict(body=body, data=FLAGS.object_name).execute()
print 'The prediction is:'
pprint.pprint(prediction)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Google Prediction API
Command-line application that trains on your input data. This sample does
the same thing as the Hello Prediction! example. You might want to run
the setup.sh script to load both the sample data and the pmml file to
Google Storage.
Usage:
$ python prediction_number.py --model_id="foo"
--data_file="data_bucket/data_object" --pmml_file="pmml_bucket/pmml_object"
You can also get help on all the command-line flags the program understands
by running:
$ python prediction_number.py --help
To get detailed log output run:
$ python prediction_number.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build_from_document
import apiclient.errors
import gflags
import httplib2
import logging
import os
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/prediction',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_string('model_id',
None,
'The unique name for the predictive model (ex foo)')
gflags.DEFINE_string('data_file',
None,
'Full Google Storage path of csv data (ex bucket/object)')
gflags.DEFINE_string('pmml_file',
None,
'Full Google Storage path of pmml for '
'preprocessing (ex bucket/object)')
gflags.MarkFlagAsRequired('model_id')
gflags.MarkFlagAsRequired('data_file')
gflags.MarkFlagAsRequired('pmml_file')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('prediction.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("prediction", "v1.4", http=http)
try:
# Start training on a data set
train = service.trainedmodels()
body = {'id': FLAGS.model_id, 'storageDataLocation': FLAGS.data_file,
'storagePMMLLocation': FLAGS.pmml_file}
start = train.insert(body=body).execute()
print 'Started training'
pprint.pprint(start)
import time
# Wait for the training to complete
while True:
try:
# We check the training job is completed. If it is not it will return
# an error code.
status = train.get(id=FLAGS.model_id).execute()
# Job has completed.
pprint.pprint(status)
break
except apiclient.errors.HttpError as error:
# Training job not yet completed.
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'csvInstance': [ 5 ]}}
prediction = train.predict(body=body, id=FLAGS.model_id).execute()
print 'The prediction is:'
pprint.pprint(prediction)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the
directory containing %r. It appears you've customized things. You'll
have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError
somehow.)\n""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
import pickle
import base64
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import models
from oauth2client.django_orm import FlowField
from oauth2client.django_orm import CredentialsField
# The Flow could also be stored in memcache since it is short lived.
class FlowModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
flow = FlowField()
class CredentialsModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
credential = CredentialsField()
class CredentialsAdmin(admin.ModelAdmin):
pass
class FlowAdmin(admin.ModelAdmin):
pass
admin.site.register(CredentialsModel, CredentialsAdmin)
admin.site.register(FlowModel, FlowAdmin)
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
import os
import logging
import httplib2
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from oauth2client.django_orm import Storage
from oauth2client.client import OAuth2WebServerFlow
from django_sample.plus.models import CredentialsModel
from django_sample.plus.models import FlowModel
from apiclient.discovery import build
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
STEP2_URI = 'http://localhost:8000/oauth2callback'
@login_required
def index(request):
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
flow = OAuth2WebServerFlow(
client_id='[[Insert Client ID here.]]',
client_secret='[[Insert Client Secret here.]]',
scope='https://www.googleapis.com/auth/plus.me',
user_agent='plus-django-sample/1.0',
)
authorize_url = flow.step1_get_authorize_url(STEP2_URI)
f = FlowModel(id=request.user, flow=flow)
f.save()
return HttpResponseRedirect(authorize_url)
else:
http = httplib2.Http()
http = credential.authorize(http)
service = build("plus", "v1", http=http)
activities = service.activities()
activitylist = activities.list(collection='public',
userId='me').execute()
logging.info(activitylist)
return render_to_response('plus/welcome.html', {
'activitylist': activitylist,
})
@login_required
def auth_return(request):
try:
f = FlowModel.objects.get(id=request.user)
credential = f.flow.step2_exchange(request.REQUEST)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
f.delete()
return HttpResponseRedirect("/")
except FlowModel.DoesNotExist:
pass
| Python |
import os
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^$', 'django_sample.plus.views.index'),
(r'^oauth2callback', 'django_sample.plus.views.auth_return'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'plus/login.html'}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(os.path.dirname(__file__), 'static')
}),
)
| Python |
# Django settings for django_sample project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'database.sqlite3'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_=9hq-$t_uv1ckf&s!y2$9g$1dm*6p1cl%*!^mg=7gr)!zj32d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
ROOT_URLCONF = 'django_sample.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates')
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_sample.plus'
)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line example for Custom Search.
Command-line application that does a search.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pprint
from apiclient.discovery import build
def main():
# Build a service object for interacting with the API. Visit
# the Google APIs Console <http://code.google.com/apis/console>
# to get an API key for your own application.
service = build("customsearch", "v1",
developerKey="AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0")
res = service.cse().list(
q='lectures',
cx='017576662512468239146:omuauf_lfve',
).execute()
pprint.pprint(res)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands to interact with the TaskQueue object of the TaskQueue API."""
__version__ = '0.0.1'
from gtaskqueue.taskqueue_cmd_base import GoogleTaskQueueCommand
from google.apputils import appcommands
import gflags as flags
FLAGS = flags.FLAGS
class GetTaskQueueCommand(GoogleTaskQueueCommand):
"""Get properties of an existing task queue."""
def __init__(self, name, flag_values):
flags.DEFINE_boolean('get_stats',
False,
'Whether to get Stats',
flag_values=flag_values)
super(GetTaskQueueCommand, self).__init__(name, flag_values)
def build_request(self, taskqueue_api, flag_values):
"""Build a request to get properties of a TaskQueue.
Args:
taskqueue_api: The handle to the taskqueue collection API.
flag_values: The parsed command flags.
Returns:
The properties of the taskqueue.
"""
return taskqueue_api.get(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
getStats=flag_values.get_stats)
def add_commands():
appcommands.AddCmd('getqueue', GetTaskQueueCommand)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command line tool for interacting with Google TaskQueue API."""
__version__ = '0.0.1'
import logging
from gtaskqueue import task_cmds
from gtaskqueue import taskqueue_cmds
from google.apputils import appcommands
import gflags as flags
LOG_LEVELS = [logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.CRITICAL]
LOG_LEVEL_NAMES = map(logging.getLevelName, LOG_LEVELS)
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'log_level',
logging.getLevelName(logging.WARNING),
LOG_LEVEL_NAMES,
'Logging output level.')
def main(unused_argv):
log_level_map = dict(
[(logging.getLevelName(level), level) for level in LOG_LEVELS])
logging.getLogger().setLevel(log_level_map[FLAGS.log_level])
taskqueue_cmds.add_commands()
task_cmds.add_commands()
if __name__ == '__main__':
appcommands.Run()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log settings for taskqueue_puller module."""
import logging
import logging.config
from google.apputils import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'log_output_file',
'/tmp/taskqueue-puller.log',
'Logfile name for taskqueue_puller.')
logger = logging.getLogger('TaskQueueClient')
def set_logger():
"""Settings for taskqueue_puller logger."""
logger.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Set size of the log file and the backup count for rotated log files.
handler = logging.handlers.RotatingFileHandler(FLAGS.log_output_file,
maxBytes = 1024 * 1024,
backupCount = 5)
# add formatter to handler
handler.setFormatter(formatter)
# add formatter to handler
logger.addHandler(handler)
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to pull tasks from TaskQueues and execute them.
This module does the following in an infinite loop.
1. Connects to Task API (of TaskQueues API collection) to request lease on
certain number of tasks (specified by user).
2. Spawns parallel processes to execute the leased tasks.
3. Polls all the tasks continously till they finish.
4. Deletes the tasks from taskqueue on their successful completion.
5. It lets the user specify when to invoke the lease request instead of polling
tasks status in a tight loop for better resource utilization:
a. Invoke the Lease request when runnning tasks go beyound certain
threshold (min_running_tasks)
b. Wait time becomes more than specified poll-time-out interval.
6. Repeat the steps from 1 to 5 when either all tasks have finished executing
or one of the conditions in 5) is met. """
import sys
import time
from apiclient.errors import HttpError
from gtaskqueue.client_task import ClientTask
from gtaskqueue.taskqueue_client import TaskQueueClient
from gtaskqueue.taskqueue_logger import logger
from gtaskqueue.taskqueue_logger import set_logger
from google.apputils import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'project_name',
'default',
'The name of the Taskqueue API project.')
flags.DEFINE_string(
'taskqueue_name',
'testpuller',
'taskqueue to which client wants to connect to')
flags.DEFINE_integer(
'lease_secs',
30,
'The lease for the task in seconds')
flags.DEFINE_integer(
'num_tasks',
10,
'The number of tasks to lease')
flags.DEFINE_integer(
'min_running_tasks',
0,
'minmum number of tasks below which lease can be invoked')
flags.DEFINE_float(
'sleep_interval_secs',
2,
'sleep interval when no tasks are found in the taskqueue')
flags.DEFINE_float(
'timeout_secs_for_next_lease_request',
600,
'Wait time before next poll when no tasks are found in the'
'queue (in seconds)')
flags.DEFINE_integer(
'taskapi_requests_per_sec',
None,
'limit on task_api requests per second')
flags.DEFINE_float(
'sleep_before_next_poll_secs',
2,
'sleep interval before next poll')
class TaskQueuePuller(object):
"""Maintains state information for TaskQueuePuller."""
def __init__(self):
self._last_lease_time = None
self._poll_timeout_start = None
self._num_last_leased_tasks = 0
# Dictionary for running tasks's ids and their corresponding
# client_task object.
self._taskprocess_map = {}
try:
self.__tcq = TaskQueueClient()
self.task_api = self.__tcq.get_taskapi()
except HttpError, http_error:
logger.error('Could not get TaskQueue API handler and hence' \
'exiting: %s' % str(http_error))
sys.exit()
def _can_lease(self):
"""Determines if new tasks can be leased.
Determines if new taks can be leased based on
1. Number of tasks already running in the system.
2. Limit on accessing the taskqueue apirary.
Returns:
True/False.
"""
if self._num_tasks_to_lease() > 0 and not self._is_rate_exceeded():
return True
else:
return False
def _is_rate_exceeded(self):
"""Determines if requests/second to TaskQueue API has exceeded limit.
We do not access the APIs beyond the specified permissible limit.
If we have run N tasks in elapsed time since last lease, we have
already made N+1 requests to API (1 for collective lease and N for
their individual delete operations). If K reqs/sec is the limit on
accessing APIs, then we sould not invoke any request to API before
N+1/K sec approximately. The above condition is formulated in the
following method.
Returns:
True/False
"""
if not FLAGS.taskapi_requests_per_sec:
return False
if not self._last_lease_time:
return False
curr_time = time.time()
if ((curr_time - self._last_lease_time) <
((1.0 * (self._num_last_leased_tasks -
len(self._taskprocess_map)) /
FLAGS.taskapi_requests_per_sec))):
return True
else:
return False
def _num_tasks_to_lease(self):
"""Determines how many tasks can be leased.
num_tasks is upper limit to running tasks in the system and hence
number of tasks which could be leased is difference of numtasks and
currently running tasks.
Returns:
Number of tasks to lease.
"""
return FLAGS.num_tasks - len(self._taskprocess_map)
def _update_last_lease_info(self, result):
"""Updates the information regarding last lease.
Args:
result: Response object from TaskQueue API, containing list of
tasks.
"""
self._last_lease_time = time.time()
if result:
if result.get('items'):
self._num_last_leased_tasks = len(result.get('items'))
else:
self._num_last_leased_tasks = 0
else:
self._num_last_leased_tasks = 0
def _update_poll_timeout_start(self):
"""Updates the start time for poll-timeout."""
if not self._poll_timeout_start:
self._poll_timeout_start = time.time()
def _continue_polling(self):
"""Checks whether lease can be invoked based on running tasks and
timeout.
Lease can be invoked if
1. Running tasks in the sytem has gone below the specified
threshold (min_running_tasks).
2. Wait time has exceeded beyond time-out specified and at least one
tas has finished since last lease invocation.
By doing this, we are essentially trying to batch the lease requests.
If this is not done and we start off leasing N tasks, its likely tasks
may finish slightly one after another, and we make N lease requests for
each task for next N tasks and so on. This can result in unnecessary
lease API call and hence to avoid that, we try and batch the lease
requests. Also we put certain limit on wait time for batching the
requests by incororating the time-out.
Returns:
True/False
"""
if len(self._taskprocess_map) <= FLAGS.min_running_tasks:
return False
if self._poll_timeout_start:
elapsed_time = time.time() - self._poll_timeout_start
if elapsed_time > FLAGS.timeout_secs_for_next_lease_request:
self._poll_timeout_start = None
return False
return True
def _get_tasks_from_queue(self):
"""Gets the available tasks from the taskqueue.
Returns:
Lease response object.
"""
try:
tasks_to_fetch = self._num_tasks_to_lease()
lease_req = self.task_api.tasks().lease(
project=FLAGS.project_name,
taskqueue=FLAGS.taskqueue_name,
leaseSecs=FLAGS.lease_secs,
numTasks=tasks_to_fetch,
body={})
result = lease_req.execute()
return result
except HttpError, http_error:
logger.error('Error during lease request: %s' % str(http_error))
return None
def _create_subprocesses_for_tasks(self, result):
"""Spawns parallel sub processes to execute tasks for better
throughput.
Args:
result: lease resonse dictionary object.
"""
if not result:
logger.info('Error: result is not defined')
return None
if result.get('items'):
for task in result.get('items'):
task_id = task.get('id')
# Given that a task may be leased multiple times, we may get a
# task which we are currently executing on, so make sure we
# dont spaw another subprocess for it.
if task_id not in self._taskprocess_map:
ct = ClientTask(task)
# Check if tasks got initialized properly and then pu them
# in running tasks map.
if ct.init():
# Put the clientTask objects in a dictionary to keep
# track of stats and objects are used later to delete
# the tasks from taskqueue
self._taskprocess_map[ct.get_task_id()] = ct
def _poll_running_tasks(self):
"""Polls all the running tasks and delete them from taskqueue if
completed."""
if self._taskprocess_map:
for task in self._taskprocess_map.values():
if task.is_completed(self.task_api):
del self._taskprocess_map[task.get_task_id()]
# updates scheduling information for later use.
self._update_poll_timeout_start()
def _sleep_before_next_lease(self):
"""Sleeps before invoking lease if required based on last lease info.
It sleeps when no tasks were found on the taskqueue during last lease
request. To note, it discount the time taken in polling the tasks and
sleeps for (sleep_interval - time taken in poll). This avoids the
unnecessary wait if tasks could be leased. If no time was taken in
poll since there were not tasks in the system, it waits for full sleep
interval and thus optimizes the CPU cycles.
It does not sleep if the method is called for the first time (when no
lease request has ever been made).
"""
if not self._last_lease_time:
sleep_secs = 0
elif self._num_last_leased_tasks <= 0:
time_elpased_since_last_lease = time.time() - self._last_lease_time
sleep_secs = (FLAGS.sleep_interval_secs -
time_elpased_since_last_lease)
if sleep_secs > 0:
logger.info('No tasks found and hence sleeping for sometime')
time.sleep(FLAGS.sleep_interval_secs)
def lease_tasks(self):
"""Requests lease for specified number of tasks.
It invokes lease request for appropriate number of tasks, spawns
parallel processes to execute them and also maintains scheduling
information.
LeaseTask also takes care of waiting(sleeping) before invoking lease if
there are no tasks which can be leased in the taskqueue. This results
in better resource utilization. Apart from this, it also controls the
number of requests being sent to taskqueue APIs.
Returns:
True/False based on if tasks could be leased or not.
"""
self._sleep_before_next_lease()
if self._can_lease():
result = self._get_tasks_from_queue()
self._update_last_lease_info(result)
self._create_subprocesses_for_tasks(result)
return True
return False
def poll_tasks(self):
"""Polls the status of running tasks of the system.
Polls the status of tasks and then decides if it should continue to
poll depending on number of tasks running in the system and timeouts.
Instead of polling in a tight loop, it sleeps for sometime before the
next poll to avoid any unnecessary CPU cycles. poll_tasks returns
only when system has capability to accomodate at least one new task.
"""
self._poll_running_tasks()
while self._continue_polling():
logger.info('Sleeping before next poll')
time.sleep(FLAGS.sleep_before_next_poll_secs)
self._poll_running_tasks()
def main(argv):
"""Infinite loop to lease new tasks and poll them for completion."""
# Settings for logger
set_logger()
# Instantiate puller
puller = TaskQueuePuller()
while True:
puller.lease_tasks()
puller.poll_tasks()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands to interact with the Task object of the TaskQueue API."""
__version__ = '0.0.1'
from gtaskqueue.taskqueue_cmd_base import GoogleTaskCommand
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
FLAGS = flags.FLAGS
class GetTaskCommand(GoogleTaskCommand):
"""Get properties of an existing task."""
def __init__(self, name, flag_values):
super(GetTaskCommand, self).__init__(name, flag_values)
def build_request(self, task_api, flag_values):
"""Build a request to get properties of a Task.
Args:
task_api: The handle to the task collection API.
flag_values: The parsed command flags.
Returns:
The properties of the task.
"""
return task_api.get(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
task=flag_values.task_name)
class LeaseTaskCommand(GoogleTaskCommand):
"""Lease a new task from the queue."""
def __init__(self, name, flag_values):
flags.DEFINE_integer('lease_secs',
None,
'The lease for the task in seconds',
flag_values=flag_values)
flags.DEFINE_integer('num_tasks',
1,
'The number of tasks to lease',
flag_values=flag_values)
flags.DEFINE_integer('payload_size_to_display',
2 * 1024 * 1024,
'Size of the payload for leased tasks to show',
flag_values=flag_values)
super(LeaseTaskCommand, self).__init__(name,
flag_values,
need_task_flag=False)
def build_request(self, task_api, flag_values):
"""Build a request to lease a pending task from the TaskQueue.
Args:
task_api: The handle to the task collection API.
flag_values: The parsed command flags.
Returns:
A new leased task.
"""
if not flag_values.lease_secs:
raise app.UsageError('lease_secs must be specified')
return task_api.lease(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
leaseSecs=flag_values.lease_secs,
numTasks=flag_values.num_tasks,
body={})
def print_result(self, result):
"""Override to optionally strip the payload since it can be long."""
if result.get('items'):
items = []
for task in result.get('items'):
payloadlen = len(task['payloadBase64'])
if payloadlen > FLAGS.payload_size_to_display:
extra = payloadlen - FLAGS.payload_size_to_display
task['payloadBase64'] = ('%s(%d more bytes)' %
(task['payloadBase64'][:FLAGS.payload_size_to_display],
extra))
items.append(task)
result['items'] = items
GoogleTaskCommand.print_result(self, result)
class DeleteTaskCommand(GoogleTaskCommand):
"""Delete an existing task."""
def __init__(self, name, flag_values):
super(DeleteTaskCommand, self).__init__(name, flag_values)
def build_request(self, task_api, flag_values):
"""Build a request to delete a Task.
Args:
task_api: The handle to the taskqueue collection API.
flag_values: The parsed command flags.
Returns:
Whether the delete was successful.
"""
return task_api.delete(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
task=flag_values.task_name)
class ListTasksCommand(GoogleTaskCommand):
"""Lists all tasks in a queue (currently upto a max of 100)."""
def __init__(self, name, flag_values):
super(ListTasksCommand, self).__init__(name,
flag_values,
need_task_flag=False)
def build_request(self, task_api, flag_values):
"""Build a request to lists tasks in a queue.
Args:
task_api: The handle to the taskqueue collection API.
flag_values: The parsed command flags.
Returns:
A list of pending tasks in the queue.
"""
return task_api.list(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name)
class ClearTaskQueueCommand(GoogleTaskCommand):
"""Deletes all tasks in a queue (default to a max of 100)."""
def __init__(self, name, flag_values):
flags.DEFINE_integer('max_delete', 100, 'How many to clear at most',
flag_values=flag_values)
super(ClearTaskQueueCommand, self).__init__(name,
flag_values,
need_task_flag=False)
def run_with_api_and_flags(self, api, flag_values):
"""Run the command, returning the result.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
Returns:
The result of running the command.
"""
tasks_api = api.tasks()
self._flag_values = flag_values
self._to_delete = flag_values.max_delete
total_deleted = 0
while self._to_delete > 0:
n_deleted = self._delete_a_batch(tasks_api)
if n_deleted <= 0:
break
total_deleted += n_deleted
return {'deleted': total_deleted}
def _delete_a_batch(self, tasks):
"""Delete a batch of tasks.
Since the list method only gives us back 100 at a time, we may have
to call it several times to clear the entire queue.
Args:
tasks: The handle to the Google TaskQueue API Tasks resource.
Returns:
The number of tasks deleted.
"""
list_request = tasks.list(project=self._flag_values.project_name,
taskqueue=self._flag_values.taskqueue_name)
result = list_request.execute()
n_deleted = 0
if result:
for task in result.get('items', []):
if self._to_delete > 0:
self._to_delete -= 1
n_deleted += 1
print 'Deleting: %s' % task['id']
tasks.delete(project=self._flag_values.project_name,
taskqueue=self._flag_values.taskqueue_name,
task=task['id']).execute()
return n_deleted
def add_commands():
appcommands.AddCmd('listtasks', ListTasksCommand)
appcommands.AddCmd('gettask', GetTaskCommand)
appcommands.AddCmd('deletetask', DeleteTaskCommand)
appcommands.AddCmd('leasetask', LeaseTaskCommand)
appcommands.AddCmd('clear', ClearTaskQueueCommand)
| Python |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to get an Access Token to access an auth protected Appengine end point.
This tool talks to the appengine end point, and gets an Access Token that is
stored in a file. This token can be used by a tool to do authorized access to
an appengine end point.
"""
from google.apputils import app
import gflags as flags
import httplib2
import oauth2 as oauth
import time
FLAGS = flags.FLAGS
flags.DEFINE_string(
'appengine_host',
None,
'Appengine Host for whom we are trying to get an access token')
flags.DEFINE_string(
'access_token_file',
None,
'The file where the access token is stored')
def get_access_token():
if not FLAGS.appengine_host:
print('must supply the appengine host')
exit(1)
# setup
server = FLAGS.appengine_host
request_token_url = server + '/_ah/OAuthGetRequestToken'
authorization_url = server + '/_ah/OAuthAuthorizeToken'
access_token_url = server + '/_ah/OAuthGetAccessToken'
consumer = oauth.Consumer('anonymous', 'anonymous')
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
# The Http client that will be used to make the requests.
h = httplib2.Http()
# get request token
print '* Obtain a request token ...'
parameters = {}
# We dont have a callback server, we're going to use the browser to
# authorize.
#TODO: Add check for 401 etc
parameters['oauth_callback'] = 'oob'
oauth_req1 = oauth.Request.from_consumer_and_token(
consumer, http_url=request_token_url, parameters=parameters)
oauth_req1.sign_request(signature_method_hmac_sha1, consumer, None)
print 'Request headers: %s' % str(oauth_req1.to_header())
response, content = h.request(oauth_req1.to_url(), 'GET')
token = oauth.Token.from_string(content)
print 'GOT key: %s secret:%s' % (str(token.key), str(token.secret))
print '* Authorize the request token ...'
oauth_req2 = oauth.Request.from_token_and_callback(
token=token, callback='oob', http_url=authorization_url)
print 'Please run this URL in a browser and paste the token back here'
print oauth_req2.to_url()
verification_code = raw_input('Enter verification code: ').strip()
token.set_verifier(verification_code)
# get access token
print '* Obtain an access token ...'
oauth_req3 = oauth.Request.from_consumer_and_token(
consumer, token=token, http_url=access_token_url)
oauth_req3.sign_request(signature_method_hmac_sha1, consumer, token)
print 'Request headers: %s' % str(oauth_req3.to_header())
response, content = h.request(oauth_req3.to_url(), 'GET')
access_token = oauth.Token.from_string(content)
print 'Access Token key: %s secret:%s' % (str(access_token.key),
str(access_token.secret))
# Save the token to a file if its specified.
if FLAGS.access_token_file:
fhandle = open(FLAGS.access_token_file, 'w')
fhandle.write(access_token.to_string())
fhandle.close()
# Example : access some protected resources
print '* Checking the access token against protected resources...'
# Assumes that the server + "/" is protected.
test_url = server + "/"
oauth_req4 = oauth.Request.from_consumer_and_token(consumer,
token=token,
http_url=test_url)
oauth_req4.sign_request(signature_method_hmac_sha1, consumer, token)
resp, content = h.request(test_url, "GET", headers=oauth_req4.to_header())
print resp
print content
def main(argv):
get_access_token()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to encapsulate task related information and methods on task_puller."""
import base64
import oauth2 as oauth
import os
import subprocess
import tempfile
import time
import urllib2
from apiclient.errors import HttpError
from gtaskqueue.taskqueue_logger import logger
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'executable_binary',
'/bin/cat',
'path of the binary to be executed')
flags.DEFINE_string(
'output_url',
'',
'url to which output is posted. The url must include param name, '
'value for which is populated with task_id from puller while posting '
'the data. Format of output url is absolute url which handles the'
'post request from task queue puller.'
'(Eg: "http://taskpuller.appspot.com/taskdata?name=").'
'The Param value is always the task_id. The handler for this post'
'should be able to associate the task with its id and take'
'appropriate action. Use the appengine_access_token.py tool to'
'generate the token and store it in a file before you start.')
flags.DEFINE_string(
'appengine_access_token_file',
None,
'File containing an Appengine Access token, if any. If present this'
'token is added to the output_url request, so that the output_url can'
'be an authenticated end-point. Use the appengine_access_token.py tool'
'to generate the token and store it in a file before you start.')
flags.DEFINE_float(
'task_timeout_secs',
'3600',
'timeout to kill the task')
class ClientTaskInitError(Exception):
"""Raised when initialization of client task fails."""
def __init__(self, task_id, error_str):
Exception.__init__(self)
self.task_id = task_id
self.error_str = error_str
def __str__(self):
return ('Error initializing task "%s". Error details "%s". '
% (self.task_id, self.error_str))
class ClientTask(object):
"""Class to encapsulate task information pulled by taskqueue_puller module.
This class is responsible for creating an independent client task object by
taking some information from lease response task object. It encapsulates
methods responsible for spawning an independent subprocess for executing
the task, tracking the status of the task and also deleting the task from
taskqeueue when completed. It also has the functionality to give the output
back to the application by posting to the specified url.
"""
def __init__(self, task):
self._task = task
self._process = None
self._output_file = None
# Class method that caches the Appengine Access Token if any
@classmethod
def get_access_token(cls):
if not FLAGS.appengine_access_token_file:
return None
if not _access_token:
fhandle = open(FLAGS.appengine_access_token_file, 'rb')
_access_token = oauth.Token.from_string(fhandle.read())
fhandle.close()
return _access_token
def init(self):
"""Extracts information from task object and intializes processing.
Extracts id and payload from task object, decodes the payload and puts
it in input file. After this, it spawns a subprocess to execute the
task.
Returns:
True if everything till task execution starts fine.
False if anything goes wrong in initialization of task execution.
"""
try:
self.task_id = self._task.get('id')
self._payload = self._decode_base64_payload(
self._task.get('payloadBase64'))
self._payload_file = self._dump_payload_to_file()
self._start_task_execution()
return True
except ClientTaskInitError, ctie:
logger.error(str(ctie))
return False
def _decode_base64_payload(self, encoded_str):
"""Method to decode payload encoded in base64."""
try:
# If the payload is empty, do not try to decode it. Payload usually
# not expected to be empty and hence log a warning and then
# continue.
if encoded_str:
decoded_str = base64.urlsafe_b64decode(
encoded_str.encode('utf-8'))
return decoded_str
else:
logger.warn('Empty paylaod for task %s' % self.task_id)
return ''
except base64.binascii.Error, berror:
logger.error('Error decoding payload for task %s. Error details %s'
% (self.task_id, str(berror)))
raise ClientTaskInitError(self.task_id, 'Error decoding payload')
# Generic catch block to avoid crashing of puller due to some bad
# encoding issue wih payload of any task.
except:
raise ClientTaskInitError(self.task_id, 'Error decoding payload')
def _dump_payload_to_file(self):
"""Method to write input extracted from payload to a temporary file."""
try:
(fd, fname) = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(self._payload)
f.close()
return fname
except OSError:
logger.error('Error dumping payload %s. Error details %s' %
(self.task_id, str(OSError)))
raise ClientTaskInitError(self.task_id, 'Error dumping payload')
def _get_input_file(self):
return self._payload_file
def _post_output(self):
"""Posts the outback back to specified url in the form of a byte
array.
It reads the output generated by the task as a byte-array. It posts the
response to specified url appended with the taskId. The application
using the taskqueue must have a handler to handle the data being posted
from puller. Format of body of response object is byte-array to make
the it genric for any kind of output generated.
Returns:
True/False based on post status.
"""
if FLAGS.output_url:
try:
f = open(self._get_output_file(), 'rb')
body = f.read()
f.close()
url = FLAGS.output_url + self.task_id
logger.debug('Posting data to url %s' % url)
headers = {'Content-Type': 'byte-array'}
# Add an access token to the headers if specified.
# This enables the output_url to be authenticated and not open.
access_token = ClientTask.get_access_token()
if access_token:
consumer = oauth.Consumer('anonymous', 'anonymous')
oauth_req = oauth.Request.from_consumer_and_token(
consumer,
token=access_token,
http_url=url)
headers.update(oauth_req.to_header())
# TODO: Use httplib instead of urllib for consistency.
req = urllib2.Request(url, body, headers)
urllib2.urlopen(req)
except ValueError:
logger.error('Error posting data back %s. Error details %s'
% (self.task_id, str(ValueError)))
return False
except Exception:
logger.error('Exception while posting data back %s. Error'
'details %s' % (self.task_id, str(Exception)))
return False
return True
def _get_output_file(self):
"""Returns the output file if it exists, else creates it and returns
it."""
if not self._output_file:
(_, self._output_file) = tempfile.mkstemp()
return self._output_file
def get_task_id(self):
return self.task_id
def _start_task_execution(self):
"""Method to spawn subprocess to execute the tasks.
This method splits the commands/executable_binary to desired arguments
format for Popen API. It appends input and output files to the
arguments. It is assumed that commands/executable_binary expects input
and output files as first and second positional parameters
respectively.
"""
# TODO: Add code to handle the cleanly shutdown when a process is killed
# by Ctrl+C.
try:
cmdline = FLAGS.executable_binary.split(' ')
cmdline.append(self._get_input_file())
cmdline.append(self._get_output_file())
self._process = subprocess.Popen(cmdline)
self.task_start_time = time.time()
except OSError:
logger.error('Error creating subprocess %s. Error details %s'
% (self.task_id, str(OSError)))
self._cleanup()
raise ClientTaskInitError(self.task_id,
'Error creating subprocess')
except ValueError:
logger.error('Invalid arguments while executing task ',
self.task_id)
self._cleanup()
raise ClientTaskInitError(self.task_id,
'Invalid arguments while executing task')
def is_completed(self, task_api):
"""Method to check if task has finished executing.
This is responsible for checking status of task execution. If the task
has already finished executing, it deletes the task from the task
queue. If the task has been running since long time then it assumes
that there is high proabbility that it is dfunct and hence kills the
corresponding subprocess. In this case, task had not completed
successfully and hence we do not delete it form the taskqueue. In above
two cases, task completion status is returned as true since there is
nothing more to run in the task. In all other cases, task is still
running and hence we return false as completion status.
Args:
task_api: handle for taskqueue api collection.
Returns:
Task completion status (True/False)
"""
status = False
try:
task_status = self._process.poll()
if task_status == 0:
status = True
if self._post_output():
self._delete_task_from_queue(task_api)
self._cleanup()
elif self._has_timedout():
status = True
self._kill_subprocess()
except OSError:
logger.error('Error during polling status of task %s, Error '
'details %s' % (self.task_id, str(OSError)))
return status
def _cleanup(self):
"""Cleans up temporary input/output files used in task execution."""
try:
if os.path.exists(self._get_input_file()):
os.remove(self._get_input_file())
if os.path.exists(self._get_output_file()):
os.remove(self._get_output_file())
except OSError:
logger.error('Error during file cleanup for task %s. Error'
'details %s' % (self.task_id, str(OSError)))
def _delete_task_from_queue(self, task_api):
"""Method to delete the task from the taskqueue.
First, it tries to post the output back to speified url. On successful
post, the task is deleted from taskqueue since the task has produced
expected output. If the post was unsuccessful, the task is not deleted
form the tskqueue since the expected output has yet not reached the
application. In either case cleanup is performed on the task.
Args:
task_api: handle for taskqueue api collection.
Returns:
Delete status (True/False)
"""
try:
delete_request = task_api.tasks().delete(
project=FLAGS.project_name,
taskqueue=FLAGS.taskqueue_name,
task=self.task_id)
delete_request.execute()
except HttpError, http_error:
logger.error('Error deleting task %s from taskqueue.'
'Error details %s'
% (self.task_id, str(http_error)))
def _has_timedout(self):
"""Checks if task has been running since long and has timedout."""
if (time.time() - self.task_start_time) > FLAGS.task_timeout_secs:
return True
else:
return False
def _kill_subprocess(self):
"""Kills the process after cleaning up the task."""
self._cleanup()
try:
self._process.kill()
logger.info('Trying to kill task %s, since it has been running '
'for long' % self.task_id)
except OSError:
logger.error('Error killing task %s. Error details %s'
% (self.task_id, str(OSError)))
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google TaskQueue."""
__version__ = '0.0.1'
import os
import sys
import urlparse
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.anyjson import simplejson as json
import httplib2
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'service_version',
'v1beta1',
'Google taskqueue api version.')
flags.DEFINE_string(
'api_host',
'https://www.googleapis.com/',
'API host name')
flags.DEFINE_string(
'project_name',
'default',
'The name of the Taskqueue API project.')
flags.DEFINE_bool(
'use_developer_key',
False,
'User wants to use the developer key while accessing taskqueue apis')
flags.DEFINE_string(
'developer_key_file',
'~/.taskqueue.apikey',
'Developer key provisioned from api console')
flags.DEFINE_bool(
'dump_request',
False,
'Prints the outgoing HTTP request along with headers and body.')
flags.DEFINE_string(
'credentials_file',
'taskqueue.dat',
'File where you want to store the auth credentails for later user')
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the Identity tab on
# the Google APIs Console <http://code.google.com/apis/console>
FLOW = OAuth2WebServerFlow(
client_id='157776985798.apps.googleusercontent.com',
client_secret='tlpVCmaS6yLjxnnPu0ARIhNw',
scope='https://www.googleapis.com/auth/taskqueue',
user_agent='taskqueue-cmdline-sample/1.0')
class GoogleTaskQueueCommandBase(appcommands.Cmd):
"""Base class for all the Google TaskQueue client commands."""
DEFAULT_PROJECT_PATH = 'projects/default'
def __init__(self, name, flag_values):
super(GoogleTaskQueueCommandBase, self).__init__(name, flag_values)
def _dump_request_wrapper(self, http):
"""Dumps the outgoing HTTP request if requested.
Args:
http: An instance of httplib2.Http or something that acts like it.
Returns:
httplib2.Http like object.
"""
request_orig = http.request
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Overrides the http.request method to add some utilities."""
if (FLAGS.api_host + "discovery/" not in uri and
FLAGS.use_developer_key):
developer_key_path = os.path.expanduser(
FLAGS.developer_key_file)
if not os.path.isfile(developer_key_path):
print 'Please generate developer key from the Google APIs' \
'Console and store it in %s' % (FLAGS.developer_key_file)
sys.exit()
developer_key_file = open(developer_key_path, 'r')
try:
developer_key = developer_key_file.read().strip()
except IOError, io_error:
print 'Error loading developer key from file %s' % (
FLAGS.developer_key_file)
print 'Error details: %s' % str(io_error)
sys.exit()
finally:
developer_key_file.close()
s = urlparse.urlparse(uri)
query = 'key=' + developer_key
if s.query:
query = s.query + '&key=' + developer_key
d = urlparse.ParseResult(s.scheme,
s.netloc,
s.path,
s.params,
query,
s.fragment)
uri = urlparse.urlunparse(d)
if FLAGS.dump_request:
print '--request-start--'
print '%s %s' % (method, uri)
if headers:
for (h, v) in headers.iteritems():
print '%s: %s' % (h, v)
print ''
if body:
print json.dumps(json.loads(body), sort_keys=True, indent=2)
print '--request-end--'
return request_orig(uri,
method,
body,
headers,
redirections,
connection_type)
http.request = new_request
return http
def Run(self, argv):
"""Run the command, printing the result.
Args:
argv: The non-flag arguments to the command.
"""
if not FLAGS.project_name:
raise app.UsageError('You must specify a project name'
' using the "--project_name" flag.')
discovery_uri = (
FLAGS.api_host + 'discovery/v1/apis/{api}/{apiVersion}/rest')
try:
# If the Credentials don't exist or are invalid run through the
# native client flow. The Storage object will ensure that if
# successful the good Credentials will get written back to a file.
# Setting FLAGS.auth_local_webserver to false since we can run our
# tool on Virtual Machines and we do not want to run the webserver
# on VMs.
FLAGS.auth_local_webserver = False
storage = Storage(FLAGS.credentials_file)
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
http = credentials.authorize(self._dump_request_wrapper(
httplib2.Http()))
api = build('taskqueue',
FLAGS.service_version,
http=http,
discoveryServiceUrl=discovery_uri)
result = self.run_with_api_and_flags_and_args(api, FLAGS, argv)
self.print_result(result)
except HttpError, http_error:
print 'Error Processing request: %s' % str(http_error)
def run_with_api_and_flags_and_args(self, api, flag_values, unused_argv):
"""Run the command given the API, flags, and args.
The default implementation of this method discards the args and
calls into run_with_api_and_flags.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
unused_argv: The non-flag arguments to the command.
Returns:
The result of running the command
"""
return self.run_with_api_and_flags(api, flag_values)
def print_result(self, result):
"""Pretty-print the result of the command.
The default behavior is to dump a formatted JSON encoding
of the result.
Args:
result: The JSON-serializable result to print.
"""
# We could have used the pprint module, but it produces
# noisy output due to all of our keys and values being
# unicode strings rather than simply ascii.
print json.dumps(result, sort_keys=True, indent=2)
class GoogleTaskQueueCommand(GoogleTaskQueueCommandBase):
"""Base command for working with the taskqueues collection."""
def __init__(self, name, flag_values):
super(GoogleTaskQueueCommand, self).__init__(name, flag_values)
flags.DEFINE_string('taskqueue_name',
'myqueue',
'TaskQueue name',
flag_values=flag_values)
def run_with_api_and_flags(self, api, flag_values):
"""Run the command, returning the result.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
Returns:
The result of running the command.
"""
taskqueue_request = self.build_request(api.taskqueues(), flag_values)
return taskqueue_request.execute()
class GoogleTaskCommand(GoogleTaskQueueCommandBase):
"""Base command for working with the tasks collection."""
def __init__(self, name, flag_values, need_task_flag=True):
super(GoogleTaskCommand, self).__init__(name, flag_values)
# Common flags that are shared by all the Task commands.
flags.DEFINE_string('taskqueue_name',
'myqueue',
'TaskQueue name',
flag_values=flag_values)
# Not all task commands need the task_name flag.
if need_task_flag:
flags.DEFINE_string('task_name',
None,
'Task name',
flag_values=flag_values)
def run_with_api_and_flags(self, api, flag_values):
"""Run the command, returning the result.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
flags.DEFINE_string('payload',
None,
'Payload of the task')
Returns:
The result of running the command.
"""
task_request = self.build_request(api.tasks(), flag_values)
return task_request.execute()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to connect to TaskQueue API."""
import os
import sys
import urlparse
from apiclient.anyjson import simplejson as json
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from gtaskqueue.taskqueue_logger import logger
from google.apputils import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'service_version',
'v1beta1',
'Google taskqueue api version.')
flags.DEFINE_string(
'api_host',
'https://www.googleapis.com/',
'API host name')
flags.DEFINE_bool(
'use_developer_key',
False,
'User wants to use the developer key while accessing taskqueue apis')
flags.DEFINE_string(
'developer_key_file',
'~/.taskqueue.apikey',
'Developer key provisioned from api console')
flags.DEFINE_bool(
'dump_request',
False,
'Prints the outgoing HTTP request along with headers and body.')
flags.DEFINE_string(
'credentials_file',
'taskqueue.dat',
'File where you want to store the auth credentails for later user')
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the Identity tab on
# the Google APIs Console <http://code.google.com/apis/console>
FLOW = OAuth2WebServerFlow(
client_id='157776985798.apps.googleusercontent.com',
client_secret='tlpVCmaS6yLjxnnPu0ARIhNw',
scope='https://www.googleapis.com/auth/taskqueue',
user_agent='taskqueue-cmdline-sample/1.0')
class TaskQueueClient:
"""Class to setup connection with taskqueue API."""
def __init__(self):
if not FLAGS.project_name:
raise app.UsageError('You must specify a project name'
' using the "--project_name" flag.')
discovery_uri = (
FLAGS.api_host + 'discovery/v1/apis/{api}/{apiVersion}/rest')
logger.info(discovery_uri)
try:
# If the Credentials don't exist or are invalid run through the
# native clien flow. The Storage object will ensure that if
# successful the good Credentials will get written back to a file.
# Setting FLAGS.auth_local_webserver to false since we can run our
# tool on Virtual Machines and we do not want to run the webserver
# on VMs.
FLAGS.auth_local_webserver = False
storage = Storage(FLAGS.credentials_file)
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
http = credentials.authorize(self._dump_request_wrapper(
httplib2.Http()))
self.task_api = build('taskqueue',
FLAGS.service_version,
http=http,
discoveryServiceUrl=discovery_uri)
except HttpError, http_error:
logger.error('Error gettin task_api: %s' % http_error)
def get_taskapi(self):
"""Returns handler for tasks API from taskqueue API collection."""
return self.task_api
def _dump_request_wrapper(self, http):
"""Dumps the outgoing HTTP request if requested.
Args:
http: An instance of httplib2.Http or something that acts like it.
Returns:
httplib2.Http like object.
"""
request_orig = http.request
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Overrides the http.request method to add some utilities."""
if (FLAGS.api_host + "discovery/" not in uri and
FLAGS.use_developer_key):
developer_key_path = os.path.expanduser(
FLAGS.developer_key_file)
if not os.path.isfile(developer_key_path):
print 'Please generate developer key from the Google API' \
'Console and store it in %s' % (FLAGS.developer_key_file)
sys.exit()
developer_key_file = open(developer_key_path, 'r')
try:
developer_key = developer_key_file.read().strip()
except IOError, io_error:
print 'Error loading developer key from file %s' % (
FLAGS.developer_key_file)
print 'Error details: %s' % str(io_error)
sys.exit()
finally:
developer_key_file.close()
s = urlparse.urlparse(uri)
query = 'key=' + developer_key
if s.query:
query = s.query + '&key=' + developer_key
d = urlparse.ParseResult(s.scheme,
s.netloc,
s.path,
s.params,
query,
s.fragment)
uri = urlparse.urlunparse(d)
if FLAGS.dump_request:
print '--request-start--'
print '%s %s' % (method, uri)
if headers:
for (h, v) in headers.iteritems():
print '%s: %s' % (h, v)
print ''
if body:
print json.dumps(json.loads(body),
sort_keys=True,
indent=2)
print '--request-end--'
return request_orig(uri,
method,
body,
headers,
redirections,
connection_type)
http.request = new_request
return http
def print_result(self, result):
"""Pretty-print the result of the command.
The default behavior is to dump a formatted JSON encoding
of the result.
Args:
result: The JSON-serializable result to print.
"""
# We could have used the pprint module, but it produces
# noisy output due to all of our keys and values being
# unicode strings rather than simply ascii.
print json.dumps(result, sort_keys=True, indent=2)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for the Google TaskQueue API command-line tool."""
__version__ = '1.0.2'
import sys
try:
from setuptools import setup
print 'Loaded setuptools'
except ImportError:
from distutils.core import setup
print 'Loaded distutils.core'
PACKAGE_NAME = 'google-taskqueue-client'
INSTALL_REQUIRES = ['google-apputils==0.1',
'google-api-python-client',
'httplib2',
'oauth2',
'python-gflags']
setup(name=PACKAGE_NAME,
version=__version__,
description='Google TaskQueue API command-line tool and utils',
author='Google Inc.',
author_email='google-appengine@googlegroups.com',
url='http://code.google.com/appengine/docs/python/taskqueue/pull/overview.html',
install_requires=INSTALL_REQUIRES,
packages=['gtaskqueue'],
scripts=['gtaskqueue/gtaskqueue', 'gtaskqueue/gtaskqueue_puller',
'gtaskqueue/gen_appengine_access_token'],
license='Apache 2.0',
keywords='google taskqueue api client',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from apiclient.discovery import build
import httplib2
from oauth2client.appengine import OAuth2Decorator
import settings
decorator = OAuth2Decorator(client_id=settings.CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope=settings.SCOPE,
user_agent='mytasks')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_aware
def get(self):
if decorator.has_credentials():
service = build('tasks', 'v1', http=decorator.http())
result = service.tasks().list(tasklist='@default').execute()
tasks = result.get('items', [])
for task in tasks:
task['title_short'] = truncate(task['title'], 26)
self.response.out.write(template.render('templates/index.html',
{'tasks': tasks}))
else:
url = decorator.authorize_url()
self.response.out.write(template.render('templates/index.html',
{'tasks': [],
'authorize_url': url}))
def truncate(s, l):
return s[:l] + '...' if len(s) > l else s
application = webapp.WSGIApplication([('/', MainHandler)], debug=True)
def main():
run_wsgi_app(application)
| Python |
# Set up the system so that this development
# version of google-api-python-client is run, even if
# an older version is installed on the system.
#
# To make this totally automatic add the following to
# your ~/.bash_profile:
#
# export PYTHONPATH=/path/to/where/you/checked/out/apiclient
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
| Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re,datetime
from google.appengine.api import urlfetch
from xml.dom import minidom
HEADERS = { 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9'}
class FeedList(db.Model):
author = db.StringProperty(multiline=False,default='robot')
name = db.StringProperty(multiline=False,default='Fred')
feedurl = db.StringProperty(multiline=False,default='http://hi.baidu.com/429263181/rss')
latest = db.StringProperty(multiline=False,default='last')
abconf = db.StringListProperty()
repReg = db.StringProperty(multiline=False)
autoUrl = db.BooleanProperty(default=False)
autoCategory = db.BooleanProperty(default=False)
autoTimeFormat = db.StringProperty(multiline=False,default=None)
allow_comment = db.BooleanProperty(default=True) #allow comment
class pick_rss(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="Fred"
self.authoruri="mailto:ad@fengsage.cn"
self.uri="http://fengsageblog.appspot.com"
self.description="RSS采集"
self.name="PickRss"
self.version="0.3"
self.register_urlmap('pick',self.getFeed)
#RSS列表、删除RSS
def get(self,page):
if page.param("delid")=='':
listit = FeedList()
querys = listit.all()
return self.render_content("pick_rss.html",{'list':querys})
else:
#删除
listit = FeedList()
querys=listit.all().filter('name =', page.param("delid")).fetch(1)
for query in querys:
query.delete()
return u'删除成功! <a href="?">返回</a>'
#新增RSS
def post(self,page):
query = FeedList()
query.name =page.param("name")
query.feedurl = page.param("feedurl")
query.abconf = page.request.get_all('abconf')
query.author = page.param("author")
if page.param("allowComment") == 'on':
query.allow_comment = True
else:
query.allow_comment = False
if page.param("autoUrl") == 'on':
query.autoUrl = True
else:
query.autoUrl = False
if page.param("autoCategory") == 'on':
query.autoCategory = True
else:
query.autoCategory = False
if page.param("repReg"):
query.repReg = page.param("repReg")
if page.param("autoTime") == 'on':
query.autoTimeFormat = page.param("spDateMod") + " " + page.param("spTimeMod")
# logging.info(query.autoTimeFormat)
# logging.info(query.autoCategory)
# logging.info(query.author)
query.put()
return self.get(page)
def getFeed(self,page=None,*arg1,**arg2):
listit = FeedList()
querys = listit.all()
#遍历数据库记录数据
for detal in querys:
#rss adr
url=str(detal.feedurl)
# logging.info("**"+url);
#读取RSS连接,模拟浏览器
result = urlfetch.fetch(url,None,urlfetch.GET,HEADERS)
# logging.info(rss_source)
if result.status_code == 200:
rss_source = result.content
#编码转换
if 'encoding="gb2312"' in rss_source:
rss_source = result.content.decode('gb2312').encode('UTF-8')
rss_source = rss_source.replace('encoding="gb2312"','encoding="utf-8"')
if 'encoding="GBK"' in rss_source:
rss_source = result.content.decode('GBK').encode('UTF-8')
rss_source = rss_source.replace('encoding="GBK"','encoding="utf-8"')
#解析XML开始
file_xml = minidom.parseString(rss_source)
rssNOs=file_xml.getElementsByTagName('rss')
rssver='2.0'
#判断RSS版本
for rssNO in rssNOs:
rssver=rssNO.getAttribute('version')
if rssver=='1.0':
return u'暂不支持RSS1.0 <a href="?">返回</a>'
else:
artList='item'
artTitle='title'
artLink='link'
artText='description'
artTime='pubDate'
artCategory = 'category'
#获得RSS内所有文章根节点
items = file_xml.getElementsByTagName(artList)
flag=''
latestId=detal.latest
ifFirst=0
#按顺序遍历插入数据,默认情况下与录入数据时间与现实相反的
#可以开启同步时间,保持远程RSS与本地数据时间同步
for item in items:
entry=Entry()
entry.author_name = detal.author
entry.title=item.getElementsByTagName(artTitle)[0].firstChild.data
rss_category = item.getElementsByTagName(artCategory)[0].firstChild.data
# logging.info("***********"+rss_category)
#同步分类
if detal.autoCategory:
newcates=[]
c = Category.all().filter('name =',rss_category)
#判断分类是否存在
if c and c.count()>0:
newcates.append(c[0].key())
else:
cat= Category(name=rss_category,slug=autoSlug(rss_category))
cat.put()
newcates.append(cat.key())
entry.categorie_keys=newcates;
#自动构建URL(同步GoogleTranslate)
if detal.autoUrl:
entry.slug = autoSlug(entry.title)
#自动时间同步
if detal.autoTimeFormat:
#格式化时间
format = detal.autoTimeFormat
article_date = item.getElementsByTagName(artTime)[0].firstChild.data
entry.date = formatDate(article_date, format)
#评论是否开启
if detal.allow_comment:
entry.allow_comment = True
else:
entry.allow_comment = False
if rssver=='1.0':
flag=item.getElementsByTagName(artLink)[0].getAttribute('href')
else:
flag=item.getElementsByTagName(artLink)[0].firstChild.data
#保存最后一条记录,防止重复采集
if latestId=='last':
detal.latest=flag
latestId=flag
ifFirst=1
db.put(detal)
else:
if flag==latestId:
break
else:
if ifFirst==0:
detal.latest=flag
db.put(detal)
ifFirst=1
db.put(detal)
artContent=item.getElementsByTagName(artText)[0].firstChild.data
# logging.info(artContent)
#过滤内容
for abc in detal.abconf:
#过滤所有a标签
if abc=='0' :
artContent = re.sub("<a([^>]*?)>","",artContent)
artContent = re.sub("</a>","",artContent)
#过滤所有img标签
if abc=='1' :
artContent = re.sub("<img(.*?>)","",artContent)
#根据表达式替换文本
if abc=='2':
if detal.repReg:
for txt in detal.repReg.split(','):
rep = txt.split(':')
artContent = re.sub(rep[0],rep[1],artContent)
#替换所有换行符
artContent=artContent.replace('\n', "<br />")
entry.content=artContent
entry.save(True)
def formatDate(str,format):
return datetime.datetime.strptime(str,format)
def autoSlug(str):
translate_url = 'http://translate.google.com/translate_a/t?client=t&text=%s&hl=en&sl=zh-CN&tl=en&pc=0'%str.replace(' ','-').encode("utf-8")
translate_result = urlfetch.fetch(translate_url,None,urlfetch.GET,HEADERS)
if translate_result.status_code == 200:
trans_reg = u'"trans":"([^"]*)"'
translate_content = re.findall(trans_reg, translate_result.content)[0]
translate_content = translate_content \
.replace(' ','-') \
.replace('\\','-')
translate_content = re.sub(u'[^a-zA-Z\d_]','-', translate_content)
logging.info("*********"+translate_content)
return translate_content
if __name__=="__main__":
starttime = '2010-05-15 14:14'
format="%Y-%m-%d %H:%M";
# print datetime.datetime.strptime(starttime,format)
# print autoSlugTest('Django Admin Inreface's Note (admin.py)')
| Python |
from pick_rss import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re,datetime,logging
from google.appengine.api import urlfetch
REG_BAIDU_URL = u'http://hi.baidu.com/([^/]*)'
REG_BLOG_PAGE = u'<a href=([/,"]?/[^>]*/blog/index/\d*["]?)>'
REG_BLOG_TITLE = u'<div class="tit"><a href="(/[^>]*/blog/item/[^>]*.html)" target="_blank">'
REG_ARTICLE_TITLE = u'<div class="tit">([^>,/,<]*)</div>'
REG_ARTICLE_CONTENT = u'<td><div id="blog_text" class="[^>]*" [\s\S]*?</div></td>'
REG_ARTICLE_CATAGORY = u'<a href="/[^>]*/blog/category/[^>]*>([^>]*)</a>'
REG_ARTICLE_DATETIME = u'<div class="date">([^>,/,<]*)</div>'
class baidu_move(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author= u"Fred"
self.authoruri="mailto:me@fengsage.com"
self.uri="http://fengsageblog.appspot.com"
self.description= u"百度搬家工具"
self.name="BaiduMove"
self.version="0.1"
def get(self,page):
return self.render_content("baidu_move.html",{'msg':''})
def post(self,page):
baidu_url = page.param("url").strip()
# logging.info('Prepare move : %s'%baidu_url)
if re.findall(REG_BAIDU_URL, baidu_url):
baidu_name = re.findall(REG_BAIDU_URL, baidu_url)[0]
baidu_url = 'http://hi.baidu.com/%s'%baidu_name
html = self.readPage('%s/blog'%baidu_url)
blog_page_url = re.findall(REG_BLOG_PAGE,html)
if blog_page_url==[]:
return self.render_content("baidu_move.html",{'msg':'未找到该空间或该文章数少于1页'})
min_page = int(blog_page_url[0].replace('"','').replace('/%s/blog/index/'%baidu_name,'')) #the first page
max_page = int(blog_page_url[-1].replace('"','').replace('/%s/blog/index/'%baidu_name,'')) #the last page
if min_page == max_page-1:
max_page = int(blog_page_url[-2].replace('"','').replace('/%s/blog/index/'%baidu_name,'')) #he last page
logging.info('Blog %s total page %s '%(baidu_name,max_page+1) )
for page_index in range(0,max_page+1):
page_url = '%s/blog/index/'%baidu_url+str(page_index)
logging.info('*****Page %d analysis %s...'%(page_index+1,page_url))
page_content = self.readPage(page_url)
article_url = re.findall(REG_BLOG_TITLE, page_content)
# logging.info('*****Page read %s...'%article_url)
# article_url = self.findList(page_content, REG_BLOG_TITLE)
for article in article_url:
article_url = 'http://hi.baidu.com/%s'%article[1:]
# article_list.append(article_url)
#circulating all article
article = self.articleBody(article_url)
# logging.info(article)
entry=Entry()
entry.title = article.get('article_title')
entry.author_name = article.get('article_author')
# entry.date = self.formatDate(article.article_date, '')
newcates=[]
c = Category.all().filter('name =',article.get('article_category'))
#判断分类是否存在
if c and c.count()>0:
newcates.append(c[0].key())
else:
cat= Category(name=article.get('article_category'))
cat.put()
newcates.append(cat.key())
entry.categorie_keys = newcates;
entry.content = article.get('article_content')
entry.save(True)
return self.render_content("baidu_move.html",{'msg':u'搬家完毕!'})
else:
return self.render_content("baidu_move.html",{'msg':''})
def formatDate(self,str,format):
return datetime.datetime.strptime(str,format)
def readPage(self,url):
'''
Page reader
'''
logging.info('Read page:%s'%url)
header = { 'Host':'hi.baidu.com',\
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9',\
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\
'Accept-Language':'zh-cn,zh;q=0.5'}
try:
response = urlfetch.fetch(url=url,headers=header)
except:
logging.error('open exception')
return 0
if response.status_code == 200:
html_content = response.content.replace(' ','').decode('gbk') #baidu use GBK encode
return html_content
def articleBody(self,url):
article_html = self.readPage(url).replace('\n','')
#Get blog title
article_title = re.findall(REG_ARTICLE_TITLE, article_html)[0]
article_title = re.sub('&#(\d+);', '-', article_title)
#Get blog time
article_date = re.findall(REG_ARTICLE_DATETIME,article_html)[0]
#Get blog category
article_category = re.findall(REG_ARTICLE_CATAGORY,article_html)[0].replace(u'类别:','')
#Get blog content
article_content = re.findall(REG_ARTICLE_CONTENT,article_html)[0]\
.replace('id="blog_text" class="cnt"','')[12:][:-11]
article = {'article_url':url,\
'article_title':article_title,\
'article_date':article_date,\
'article_category':article_category,\
'article_content':article_content,\
'article_author':self.author}
return article | Python |
from baidu_move import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re,datetime
from google.appengine.api import urlfetch
from xml.dom import minidom
from HTMLParser import HTMLParser
HEADERS = { 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9'}
class FeedList(db.Model):
author = db.StringProperty(multiline=False,default='robot')
name = db.StringProperty(multiline=False,default='Fred')
feedurl = db.StringProperty(multiline=False,default='http://hi.baidu.com/429263181/rss')
latest = db.StringProperty(multiline=False,default='last')
abconf = db.StringListProperty()
repReg = db.StringProperty(multiline=False)
autoUrl = db.BooleanProperty(default=False)
autoTimeFormat = db.StringProperty(multiline=False,default=None)
allow_comment = db.BooleanProperty(default=True) #allow comment
class pick_rss(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="Fred"
self.authoruri="mailto:ad@fengsage.cn"
self.uri="http://fengsageblog.appspot.com"
self.description="RSS采集"
self.name="PickRss"
self.version="0.2"
self.register_urlmap('pick',self.getFeed)
#RSS列表、删除RSS
def get(self,page):
if page.param("delid")=='':
listit = FeedList()
querys = listit.all()
return self.render_content("pick_rss.html",{'list':querys})
else:
#删除
listit = FeedList()
querys=listit.all().filter('name =', page.param("delid")).fetch(1)
for query in querys:
query.delete()
return "Delete it successfully! <a href='?'>Click here BACK</a>"
#新增RSS
def post(self,page):
query = FeedList()
query.name =page.param("name")
query.feedurl = page.param("feedurl")
query.abconf = page.request.get_all('abconf')
query.autoTimeFormat = page.param("autoTimeFormat")
logging.info(query.abconf)
if page.param("allowComment") == 'on':
query.allow_comment = True
else:
query.allow_comment = False
if page.param("autoUrl") == 'on':
query.autoUrl = True
else:
query.autoUrl = False
if page.param("repReg"):
query.repReg = page.param("repReg")
query.put()
return self.get(page)
def getFeed(self,page=None,*arg1,**arg2):
listit = FeedList()
querys = listit.all()
#遍历数据库记录数据
for detal in querys:
#rss adr
url=str(detal.feedurl)
# logging.info("**"+url);
#读取RSS连接,模拟浏览器
result = urlfetch.fetch(url,None,urlfetch.GET,HEADERS)
# logging.info(rss_source)
if result.status_code == 200:
rss_source = result.content
#编码转换
if 'encoding="gb2312"' in rss_source:
rss_source = result.content.decode('gb2312').encode('UTF-8')
rss_source = rss_source.replace('encoding="gb2312"','encoding="utf-8"')
if 'encoding="GBK"' in rss_source:
rss_source = result.content.decode('GBK').encode('UTF-8')
rss_source = rss_source.replace('encoding="GBK"','encoding="utf-8"')
#解析XML开始
file_xml = minidom.parseString(rss_source)
rssNOs=file_xml.getElementsByTagName('rss')
rssver='1.0'
#判断RSS版本
for rssNO in rssNOs:
rssver=rssNO.getAttribute('version')
if rssver=='1.0':
artList='entry'
artTitle='title'
artLink='link'
artText='content'
artTime='date'
else:
artList='item'
artTitle='title'
artLink='link'
artText='description'
artTime='pubDate'
#获得RSS内所有文章根节点
items = file_xml.getElementsByTagName(artList)
flag=''
latestId=detal.latest
ifFirst=0
#按顺序遍历插入数据,默认情况下与录入数据时间与现实相反的
#可以开启同步时间,保持远程RSS与本地数据时间同步
for item in items:
entry=Entry()
entry.title=item.getElementsByTagName(artTitle)[0].firstChild.data
entry.author_name = detal.author
#自动构建URL(同步GoogleTranslate)
if detal.autoUrl:
entry.slug = autoSlug(entry.title)
#自动时间同步
if detal.autoTimeFormat:
#格式化时间
format = detal.autoTimeFormat
article_date = item.getElementsByTagName(artTime)[0].firstChild.data
entry.date = formatDate(article_date, format)
#评论是否开启
if detal.allow_comment:
entry.allow_comment = True
else:
entry.allow_comment = False
if rssver=='1.0':
flag=item.getElementsByTagName(artLink)[0].getAttribute('href')
else:
flag=item.getElementsByTagName(artLink)[0].firstChild.data
#保存最后一条记录,防止重复采集
if latestId=='last':
detal.latest=flag
latestId=flag
ifFirst=1
db.put(detal)
else:
if flag==latestId:
break
else:
if ifFirst==0:
detal.latest=flag
db.put(detal)
ifFirst=1
db.put(detal)
artContent=item.getElementsByTagName(artText)[0].firstChild.data
# logging.info(artContent)
#过滤内容
for abc in detal.abconf:
#过滤所有a标签
if abc=='0' :
artContent = re.sub("<a([^>]*?)>","",artContent)
artContent = re.sub("</a>","",artContent)
#过滤所有img标签
if abc=='1' :
artContent = re.sub("<img(.*?>)","",artContent)
#根据表达式替换文本
if abc=='2':
if detal.repReg:
for txt in detal.repReg.split(','):
rep = txt.split(':')
artContent = re.sub(rep[0],rep[1],artContent)
#替换所有换行符
artContent=artContent.replace('\n', "<br />")
entry.content=artContent
entry.save(True)
def formatDate(str,format):
return datetime.datetime.strptime(str,format)
def autoSlug(str):
translate_url = 'http://translate.google.com/translate_a/t?client=t&text=%s&hl=en&sl=zh-CN&tl=en&pc=0'%str.replace(' ','-').encode("utf-8")
translate_result = urlfetch.fetch(translate_url,None,urlfetch.GET,HEADERS)
if translate_result.status_code == 200:
trans_reg = u'"trans":"([^"]*)"'
translate_content = re.findall(trans_reg, translate_result.content)[0]
translate_content = translate_content \
.replace(' ','-') \
.replace('\\','-')
translate_content = re.sub(u'[^a-zA-Z\d_]','-', translate_content)
logging.info("*********"+translate_content)
return translate_content
if __name__=="__main__":
starttime = '2010-05-15 14:14'
format="%Y-%m-%d %H:%M";
print datetime.datetime.strptime(starttime,format)
| Python |
from pick_rss import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re
from google.appengine.api import urlfetch
from xml.dom import minidom
from HTMLParser import HTMLParser
class pick_rss(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="Fred"
self.authoruri="mailto:ad@fengsage.cn"
self.uri="http://fengsageblog.appspot.com"
self.description="RSS采集"
self.name="PickRss"
self.version="0.1"
self.register_urlmap('pick',self.getFeed)
#RSS列表、删除RSS
def get(self,page):
if page.param("delid")=='':
listit = FeedList()
querys = listit.all()
return self.render_content("pick_rss.html",{'list':querys})
else:
#删除
listit = FeedList()
querys=listit.all().filter('name =', page.param("delid")).fetch(1)
for query in querys:
query.delete()
#self.redirect('/')
return "Delete it successfully! <a href='?'>Click here BACK</a>"
#新增RSS
def post(self,page):
query = FeedList()
query.name =page.param("name")
query.feedurl = page.param("feedurl")
query.abconf = page.param("abconf")
if page.param("allowComment") == 'on':
query.allow_comment = True
else:
query.allow_comment = False
if page.param("autoUrl") == 'on':
query.autoUrl = True
else:
query.autoUrl = False
if page.param("repReg"):
query.repReg = page.param("repReg")
query.put()
return self.get(page)
def getFeed(self,page=None,*arg1,**arg2):
listit = FeedList()
querys = listit.all()
#遍历数据库记录数据
for detal in querys:
#rss adr
url=str(detal.feedurl)
# logging.info("**"+url);
#读取RSS连接,模拟浏览器
headers = { 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9'}
result = urlfetch.fetch(url)
rss_source = result.content
# logging.info(rss_source)
#gb2312编码转换为UTF-8
if 'encoding="gb2312"' in rss_source:
rss_source = result.content.decode('gb2312').encode('UTF-8')
rss_source = rss_source.replace('encoding="gb2312"','encoding="utf-8"')
# logging.info(rss_source)
if result.status_code == 200:
#解析XML开始
file_xml = minidom.parseString(rss_source)
rssNOs=file_xml.getElementsByTagName('rss')
rssver='1.0'
#判断RSS版本
for rssNO in rssNOs:
rssver=rssNO.getAttribute('version')
if rssver=='1.0':
artList='entry'
artTitle='title'
artLink='link'
artText='content'
# artTime='date'
else:
artList='item'
artTitle='title'
artLink='link'
artText='description'
# artTime='pubDate'
#获得文章根节点
items = file_xml.getElementsByTagName(artList)
flag=''
latestId=detal.latest
ifFirst=0
#按顺序遍历插入数据,与实际数据时间是相反的
for item in items:
entry=Entry()
entry.title=item.getElementsByTagName(artTitle)[0].firstChild.data
entry.author_name = 'robot'
#自动构建URL(同步GoogleTranslate)
if detal.autoUrl:
translate_url = 'http://translate.google.com/translate_a/t?client=t&text=%s&hl=en&sl=zh-CN&tl=en&pc=0'%entry.title.replace(' ','_').encode("utf-8")
translate_result = urlfetch.fetch(translate_url,None,urlfetch.GET,headers)
if translate_result.status_code == 200:
translate_content = translate_result.content.split(',')[0] \
.replace('{"sentences":[{"trans":','') \
.replace('\"','') \
.replace('-','') \
.replace(' ','_') \
.replace(' ','_')
logging.info("*********"+translate_content)
entry.slug = translate_content
if detal.allow_comment:
entry.allow_comment = True
else:
entry.allow_comment = False
if rssver=='1.0':
flag=item.getElementsByTagName(artLink)[0].getAttribute('href')
else:
flag=item.getElementsByTagName(artLink)[0].firstChild.data
#print flag
#break
#保存最后一条记录,防止重复采集
if latestId=='last':
detal.latest=flag
latestId=flag
ifFirst=1
db.put(detal)
else:
if flag==latestId:
break
else:
if ifFirst==0:
detal.latest=flag
db.put(detal)
ifFirst=1
db.put(detal)
#???<![CDATA[?????????
# if datal.synTime:
# artPubDate=item.getElementsByTagName(artTime)[0].firstChild.data
# entry.date = artPubDate
artContent=item.getElementsByTagName(artText)[0].firstChild.data
# logging.info(artContent)
#根据表达式替换文本
if detal.abconf=='1':
if detal.repReg:
for txt in detal.repReg.split(','):
rep = txt.split(':')
artContent = re.sub(rep[0],rep[1],artContent)
#过滤所有img标签
if detal.abconf=='2' :
artContent = re.sub("<img(.*?>)","",artContent)
#过滤所有a标签
if detal.abconf=='3' :
artContent = re.sub("<a([^>]*?)>","",artContent)
artContent = re.sub("</a>","",artContent)
if detal.abconf=='4' :
artContent = re.sub("<img(.*?>)","",artContent)
artContent = re.sub("<a([^>]*?)>","",artContent)
artContent = re.sub("</a>","",artContent)
#替换所有换行符
artContent=artContent.replace('\n', "<br />")
entry.content=artContent
entry.save(True)
#it works
#print "Successful"
class FeedList(db.Model):
name = db.StringProperty(multiline=False,default='Fred')
feedurl = db.StringProperty(multiline=False,default='http://hi.baidu.com/429263181/rss')
latest = db.StringProperty(multiline=False,default='last')
abconf = db.StringProperty(multiline=False,default='0')
repReg = db.StringProperty(multiline=False)
# synTime = db.BooleanProperty(default=True)
autoUrl = db.BooleanProperty(default=False)
allow_comment = db.BooleanProperty(default=True) #allow comment | Python |
from pick_rss import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
import datetime,logging,re
from google.appengine.api import urlfetch
'''
Created on 2010-7-1
@author: 火中仙
'''
class today_history(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="Fred"
self.authoruri="mailto:me@fengsage.com"
self.uri="http://fengsageblog.appspot.com"
self.description="历史上的今天"
self.name="TodayHistory"
self.version="0.1"
self.register_filter('history',self.getTodayHistory)
def getTodayHistory(self,page=None,*arg1,**arg2):
month = datetime.date.today().month
day = datetime.date.today().day
month_list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
if day<10: day = '0%s'%day
url = u'http://www.guxiang.com/lishi/others/jintian/%s/%s.htm'%(month_list[month-1],str(day))
html = self.readPage(url).replace('<br>','')
REG_STR = u'<a href="../../../../([^>]*)" target="_blank">([^/]*)</a>'
list = re.findall(REG_STR, html)
if list:
return self.render_content("today_history.html",{'list':list[:10]})
else:
return self.render_content("today_history.html")
def readPage(self,url):
'''
Page reader
'''
logging.info('Read page:%s'%url)
header = { 'Host':'hi.baidu.com',\
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9',\
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\
'Accept-Language':'zh-cn,zh;q=0.5'}
try:
response = urlfetch.fetch(url=url,headers=header)
except:
logging.error('open exception')
return 0
if response.status_code == 200:
html_content = response.content.replace(' ','').decode('gb2312') #baidu use GBK encode
return html_content
| Python |
from today_history import * | Python |
# -*- coding: utf-8 -*-
# Django settings for the example project.
import os
DEBUG = True
TEMPLATE_DEBUG = False
##LANGUAGE_CODE = 'zh-CN'
##LANGUAGE_CODE = 'fr'
LOCALE_PATHS = 'locale'
USE_I18N = True
TEMPLATE_LOADERS=('django.template.loaders.filesystem.load_template_source',
'ziploader.zip_loader.load_template_source') | Python |
# -*- coding: utf-8 -*-
import os,stat
import sys
import logging
import wsgiref.handlers
from mimetypes import types_map
from datetime import datetime, timedelta
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from google.appengine.ext.zipserve import *
sys.path.append('modules')
from model import *
# {{{ Handlers
cwd = os.getcwd()
theme_path = os.path.join(cwd, 'themes')
file_modifieds={}
max_age = 600 #expires in 10 minutes
def Error404(handler):
handler.response.set_status(404)
html = template.render(os.path.join(cwd,'views/404.html'), {'error':404})
handler.response.out.write(html)
class GetFile(webapp.RequestHandler):
def get(self,prefix,name):
request_path = self.request.path[8:]
server_path = os.path.normpath(os.path.join(cwd, 'themes', request_path))
try:
fstat=os.stat(server_path)
except:
#use zipfile
theme_file=os.path.normpath(os.path.join(cwd, 'themes', prefix))
if os.path.exists(theme_file+".zip"):
#is file exist?
fstat=os.stat(theme_file+".zip")
zipdo=ZipHandler()
zipdo.initialize(self.request,self.response)
return zipdo.get(theme_file,name)
else:
Error404(self)
return
fmtime=datetime.fromtimestamp(fstat[stat.ST_MTIME])
if self.request.if_modified_since and self.request.if_modified_since.replace(tzinfo=None) >= fmtime:
self.response.headers['Date'] = format_date(datetime.utcnow())
self.response.headers['Last-Modified'] = format_date(fmtime)
cache_expires(self.response, max_age)
self.response.set_status(304)
self.response.clear()
elif server_path.startswith(theme_path):
ext = os.path.splitext(server_path)[1]
if types_map.has_key(ext):
mime_type = types_map[ext]
else:
mime_type = 'application/octet-stream'
try:
self.response.headers['Content-Type'] = mime_type
self.response.headers['Last-Modified'] = format_date(fmtime)
cache_expires(self.response, max_age)
self.response.out.write(open(server_path, 'rb').read())
except Exception, e:
Error404(self)
else:
Error404(self)
class NotFound(webapp.RequestHandler):
def get(self):
Error404(self)
#}}}
def format_date(dt):
return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
def cache_expires(response, seconds=0, **kw):
"""
Set expiration on this request. This sets the response to
expire in the given seconds, and any other attributes are used
for cache_control (e.g., private=True, etc).
this function is modified from webob.Response
it will be good if google.appengine.ext.webapp.Response inherits from this class...
"""
if not seconds:
# To really expire something, you have to force a
# bunch of these cache control attributes, and IE may
# not pay attention to those still so we also set
# Expires.
response.headers['Cache-Control'] = 'max-age=0, must-revalidate, no-cache, no-store'
response.headers['Expires'] = format_date(datetime.utcnow())
if 'last-modified' not in self.headers:
self.last_modified = format_date(datetime.utcnow())
response.headers['Pragma'] = 'no-cache'
else:
response.headers['Cache-Control'] = 'max-age=%d' % seconds
response.headers['Expires'] = format_date(datetime.utcnow() + timedelta(seconds=seconds))
def main():
application = webapp.WSGIApplication(
[
('/themes/[\\w\\-]+/templates/.*', NotFound),
('/themes/(?P<prefix>[\\w\\-]+)/(?P<name>.+)', GetFile),
('.*', NotFound),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
import optparse
import os
import sys
def compile_messages(locale=None):
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "This script should be run from the Django SVN tree or your project or app tree."
sys.exit(1)
if locale is not None:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('-l', '--locale', dest='locale',
help="The locale to process. Default is to process all.")
options, args = parser.parse_args()
if len(args):
parser.error("This program takes no arguments")
compile_messages(options.locale)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "this script should be run from the django svn tree or your project or app tree"
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
open('%s.po' % pf, 'w').write(msg)
if __name__ == "__main__":
unique_messages()
| Python |
#!/usr/bin/env python
# Need to ensure that the i18n framework is enabled
from django.conf import settings
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
import re
import os
import sys
import getopt
pythonize_re = re.compile(r'\n\s*//')
def make_messages():
localedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
print "This script should be run from the django svn tree or your project or app tree."
print "If you did indeed run it from the svn checkout or your project or application,"
print "maybe you are just missing the conf/locale (in the django tree) or locale (for project"
print "and application) directory?"
print "make-messages.py doesn't create it automatically, you have to create it by hand if"
print "you want to enable i18n for your project or application."
sys.exit(1)
(opts, args) = getopt.getopt(sys.argv[1:], 'l:d:va')
lang = None
domain = 'django'
verbose = False
all = False
for o, v in opts:
if o == '-l':
lang = v
elif o == '-d':
domain = v
elif o == '-v':
verbose = True
elif o == '-a':
all = True
if domain not in ('django', 'djangojs'):
print "currently make-messages.py only supports domains 'django' and 'djangojs'"
sys.exit(1)
if (lang is None and not all) or domain is None:
print "usage: make-messages.py -l <language>"
print " or: make-messages.py -a"
sys.exit(1)
languages = []
if lang is not None:
languages.append(lang)
elif all:
languages = [el for el in os.listdir(localedir) if not el.startswith('.')]
for lang in languages:
print "processing language", lang
basedir = os.path.join(localedir, lang, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for (dirpath, dirnames, filenames) in os.walk("."):
for file in filenames:
if domain == 'djangojs' and file.endswith('.js'):
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rb").read()
src = pythonize_re.sub('\n#', src)
open(os.path.join(dirpath, '%s.py' % file), "wb").write(src)
thefile = '%s.py' % file
cmd = 'xgettext %s -d %s -L Perl --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file.endswith('.py') or file.endswith('.html')):
thefile = file
if file.endswith('.html'):
src = open(os.path.join(dirpath, file), "rb").read()
open(os.path.join(dirpath, '%s.py' % file), "wb").write(templatize(src))
thefile = '%s.py' % file
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = 'xgettext %s -d %s -L Python --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
if thefile != file:
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
(stdin, stdout, stderr) = os.popen3('msguniq "%s"' % potfile, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msguniq"
print errors
sys.exit(8)
open(potfile, 'w').write(msgs)
if os.path.exists(pofile):
(stdin, stdout, stderr) = os.popen3('msgmerge -q "%s" "%s"' % (pofile, potfile), 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msgmerge"
print errors
sys.exit(8)
open(pofile, 'wb').write(msgs)
os.unlink(potfile)
if __name__ == "__main__":
make_messages()
| Python |
#!/usr/bin/env python
import optparse
import os
import sys
def compile_messages(locale=None):
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "This script should be run from the Django SVN tree or your project or app tree."
sys.exit(1)
if locale is not None:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('-l', '--locale', dest='locale',
help="The locale to process. Default is to process all.")
options, args = parser.parse_args()
if len(args):
parser.error("This program takes no arguments")
compile_messages(options.locale)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
# Need to ensure that the i18n framework is enabled
from django.conf import settings
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
import re
import os
import sys
import getopt
pythonize_re = re.compile(r'\n\s*//')
def make_messages():
localedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
print "This script should be run from the django svn tree or your project or app tree."
print "If you did indeed run it from the svn checkout or your project or application,"
print "maybe you are just missing the conf/locale (in the django tree) or locale (for project"
print "and application) directory?"
print "make-messages.py doesn't create it automatically, you have to create it by hand if"
print "you want to enable i18n for your project or application."
sys.exit(1)
(opts, args) = getopt.getopt(sys.argv[1:], 'l:d:va')
lang = None
domain = 'django'
verbose = False
all = False
for o, v in opts:
if o == '-l':
lang = v
elif o == '-d':
domain = v
elif o == '-v':
verbose = True
elif o == '-a':
all = True
if domain not in ('django', 'djangojs'):
print "currently make-messages.py only supports domains 'django' and 'djangojs'"
sys.exit(1)
if (lang is None and not all) or domain is None:
print "usage: make-messages.py -l <language>"
print " or: make-messages.py -a"
sys.exit(1)
languages = []
if lang is not None:
languages.append(lang)
elif all:
languages = [el for el in os.listdir(localedir) if not el.startswith('.')]
for lang in languages:
print "processing language", lang
basedir = os.path.join(localedir, lang, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for (dirpath, dirnames, filenames) in os.walk("."):
for file in filenames:
if domain == 'djangojs' and file.endswith('.js'):
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rb").read()
src = pythonize_re.sub('\n#', src)
open(os.path.join(dirpath, '%s.py' % file), "wb").write(src)
thefile = '%s.py' % file
cmd = 'xgettext %s -d %s -L Perl --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file.endswith('.py') or file.endswith('.html')):
thefile = file
if file.endswith('.html'):
src = open(os.path.join(dirpath, file), "rb").read()
open(os.path.join(dirpath, '%s.py' % file), "wb").write(templatize(src))
thefile = '%s.py' % file
if verbose: sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = 'xgettext %s -d %s -L Python --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy --from-code UTF-8 -o - "%s"' % (
os.path.exists(potfile) and '--omit-header' or '', domain, os.path.join(dirpath, thefile))
(stdin, stdout, stderr) = os.popen3(cmd, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running xgettext on %s" % file
print errors
sys.exit(8)
if thefile != file:
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if msgs:
open(potfile, 'ab').write(msgs)
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
(stdin, stdout, stderr) = os.popen3('msguniq "%s"' % potfile, 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msguniq"
print errors
sys.exit(8)
open(potfile, 'w').write(msgs)
if os.path.exists(pofile):
(stdin, stdout, stderr) = os.popen3('msgmerge -q "%s" "%s"' % (pofile, potfile), 'b')
msgs = stdout.read()
errors = stderr.read()
if errors:
print "errors happened while running msgmerge"
print errors
sys.exit(8)
open(pofile, 'wb').write(msgs)
os.unlink(potfile)
if __name__ == "__main__":
make_messages()
| Python |
#!/usr/bin/env python
import os
import sys
def unique_messages():
basedir = None
if os.path.isdir(os.path.join('conf', 'locale')):
basedir = os.path.abspath(os.path.join('conf', 'locale'))
elif os.path.isdir('locale'):
basedir = os.path.abspath('locale')
else:
print "this script should be run from the django svn tree or your project or app tree"
sys.exit(1)
for (dirpath, dirnames, filenames) in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
cmd = 'msguniq "%s.po"' % pf
stdout = os.popen(cmd)
msg = stdout.read()
open('%s.po' % pf, 'w').write(msg)
if __name__ == "__main__":
unique_messages()
| Python |
from micolog_plugin import *
import logging
from model import *
from google.appengine.api import users
class highsyntax(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="HighSyntax Plugin."
self.name="HighSyntax plugin"
self.version="0.1"
self.register_filter('footer',self.footer)
self.register_urlzip('/syntaxhighlighter/(.*)','syntaxhighlighter.zip')
self.theme=OptionSet.getValue("highsyntax_theme",default="Default")
def footer(self,content,blog=None,*arg1,**arg2):
return content+'''
<script type="text/javascript">
if ($('pre[class^=brush:]').length > 0)
{
$.getScript("/syntaxhighlighter/scripts/shCore.js", function() {
SyntaxHighlighter.boot("/syntaxhighlighter/", {theme : "'''+str(self.theme)+'''", stripBrs : true}, {});
});
}
</script>
'''
def get(self,page):
return '''<h3>HighSyntax Plugin</h3>
<p>HighSyntax plugin for micolog.</p>
<p>This plugin based on <a href="http://alexgorbatchev.com/wiki/SyntaxHighlighter" target="_blank">SyntaxHighlighter</a>
and <a href="http://www.outofwhatbox.com/blog/syntaxhighlighter-downloads/" target="_blank">SyntaxHighlighter.boot()</a></p>
<form action="" method="post">
<p><B>Require:</B>
<ol>
<li><b>{%mf footer%} </b>in template "base.html".</li>
<li><a href="http://jquery.org" target="_blank">Jquery</a> version 1.3.2 or new.</li>
</ol>
</p>
<p><b>Theme:</b>
</p>
<p>
<select name="theme" id="theme">
<option value="Default">Default</option>
<option value="Django">Django</option>
<option value="Eclipse">Eclipse</option>
<option value="Emacs">Emacs</option>
<option value="FadeToGrey">FadeToGrey</option>
<option value="Midnight">Midnight</option>
<option value="RDark">RDark</option>
</select>
</p>
<p>
<input type="submit" value="submit">
</p>
</form>
<script>
$("#theme").val("'''+str(self.theme)+'''");</script>
'''
def post(self,page):
self.theme=page.param("theme")
OptionSet.setValue("highsyntax_theme",self.theme)
return self.get(page) | Python |
from highsyntax import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re,datetime,logging
from google.appengine.api import urlfetch
REG_BAIDU_URL = u'http://hi.baidu.com/([^/]*)'
REG_BLOG_PAGE = u'<a href=([/,"]?/[^>]*/blog/index/\d*["]?)>'
REG_BLOG_TITLE = u'<div class="tit"><a href="(/[^>]*/blog/item/[^>]*.html)" target="_blank">'
REG_ARTICLE_TITLE = u'<div class="tit">([^>,/,<]*)</div>'
REG_ARTICLE_CONTENT = u'<td><div id="blog_text" class="[^>]*" [\s\S]*?</div></td>'
REG_ARTICLE_CATAGORY = u'<a href="/[^>]*/blog/category/[^>]*>([^>]*)</a>'
REG_ARTICLE_DATETIME = u'<div class="date">([^>,/,<]*)</div>'
class baidu_move(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author= u"Fred"
self.authoruri="mailto:me@fengsage.com"
self.uri="http://fengsageblog.appspot.com"
self.description= u"百度搬家工具"
self.name="BaiduMove"
self.version="0.1"
def get(self,page):
return self.render_content("baidu_move.html",{'msg':''})
def post(self,page):
baidu_url = page.param("url").strip()
# logging.info('Prepare move : %s'%baidu_url)
if re.findall(REG_BAIDU_URL, baidu_url):
baidu_name = re.findall(REG_BAIDU_URL, baidu_url)[0]
baidu_url = 'http://hi.baidu.com/%s'%baidu_name
html = self.readPage('%s/blog'%baidu_url)
blog_page_url = re.findall(REG_BLOG_PAGE,html)
if blog_page_url==[]:
return self.render_content("baidu_move.html",{'msg':'未找到该空间或该文章数少于1页'})
min_page = int(blog_page_url[0].replace('"','').replace('/%s/blog/index/'%baidu_name,'')) #the first page
max_page = int(blog_page_url[-1].replace('"','').replace('/%s/blog/index/'%baidu_name,'')) #the last page
if min_page == max_page-1:
max_page = int(blog_page_url[-2].replace('"','').replace('/%s/blog/index/'%baidu_name,'')) #he last page
logging.info('Blog %s total page %s '%(baidu_name,max_page+1) )
for page_index in range(0,max_page+1):
page_url = '%s/blog/index/'%baidu_url+str(page_index)
logging.info('*****Page %d analysis %s...'%(page_index+1,page_url))
page_content = self.readPage(page_url)
article_url = re.findall(REG_BLOG_TITLE, page_content)
# logging.info('*****Page read %s...'%article_url)
# article_url = self.findList(page_content, REG_BLOG_TITLE)
for article in article_url:
article_url = 'http://hi.baidu.com/%s'%article[1:]
# article_list.append(article_url)
#circulating all article
article = self.articleBody(article_url)
# logging.info(article)
entry=Entry()
entry.title = article.get('article_title')
entry.author_name = article.get('article_author')
# entry.date = self.formatDate(article.article_date, '')
newcates=[]
c = Category.all().filter('name =',article.get('article_category'))
#判断分类是否存在
if c and c.count()>0:
newcates.append(c[0].key())
else:
cat= Category(name=article.get('article_category'))
cat.put()
newcates.append(cat.key())
entry.categorie_keys = newcates;
entry.content = article.get('article_content')
entry.save(True)
return self.render_content("baidu_move.html",{'msg':u'搬家完毕!'})
else:
return self.render_content("baidu_move.html",{'msg':''})
def formatDate(self,str,format):
return datetime.datetime.strptime(str,format)
def readPage(self,url):
'''
Page reader
'''
logging.info('Read page:%s'%url)
header = { 'Host':'hi.baidu.com',\
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9',\
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\
'Accept-Language':'zh-cn,zh;q=0.5'}
try:
response = urlfetch.fetch(url=url,headers=header)
except:
logging.error('open exception')
return 0
if response.status_code == 200:
html_content = response.content.replace(' ','').decode('gbk') #baidu use GBK encode
return html_content
def articleBody(self,url):
article_html = self.readPage(url).replace('\n','')
#Get blog title
article_title = re.findall(REG_ARTICLE_TITLE, article_html)[0]
article_title = re.sub('&#(\d+);', '-', article_title)
#Get blog time
article_date = re.findall(REG_ARTICLE_DATETIME,article_html)[0]
#Get blog category
article_category = re.findall(REG_ARTICLE_CATAGORY,article_html)[0].replace(u'类别:','')
#Get blog content
article_content = re.findall(REG_ARTICLE_CONTENT,article_html)[0]\
.replace('id="blog_text" class="cnt"','')[12:][:-11]
article = {'article_url':url,\
'article_title':article_title,\
'article_date':article_date,\
'article_category':article_category,\
'article_content':article_content,\
'article_author':self.author}
return article | Python |
from baidu_move import * | Python |
from micolog_plugin import *
from model import OptionSet
class googleAnalytics(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="Plugin for put google Analytics into micolog."
self.name="google Analytics"
self.version="0.1"
self.register_filter('footer',self.filter)
def filter(self,content,*arg1,**arg2):
code=OptionSet.getValue("googleAnalytics_code",default="")
return content+str(code)
def get(self,page):
code=OptionSet.getValue("googleAnalytics_code",default="")
return '''<h3>Google Anslytics</h3>
<form action="" method="post">
<p>Analytics Code:</p>
<textarea name="code" style="width:500px;height:100px">%s</textarea>
<br>
<input type="submit" value="submit">
</form>'''%code
def post(self,page):
code=page.param("code")
OptionSet.setValue("googleAnalytics_code",code)
return self.get(page)
| Python |
from xheditor import * | Python |
from micolog_plugin import *
import logging,os
from model import *
from google.appengine.api import users
class xheditor(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="xheditor."
self.name="xheditor plugin"
self.version="0.1"
self.register_urlzip('/xheditor/(.*)','xheditor.zip')
self.register_filter('editor_header',self.head)
def head(self,content,blog=None,*arg1,**arg2):
if blog.language=='zh_CN':
js='xheditor-zh-cn.js'
else:
js='xheditor-en.js'
sret='''<script type="text/javascript" src="/xheditor/%s"></script>
<script type="text/javascript">
$(function(){
$("#content").xheditor(true,{
upImgUrl:'!/admin/uploadex?ext=jpg|png|jpeg|gif',
upFlashUrl:'!/admin/uploadex?ext=swf',
upMediaUrl:'!/admin/uploadex?ext=wmv|avi|wma|mp3|mid'});
});
</script>'''%js
return sret
def get(self,page):
return '''<h3>xheditor Plugin </h3>
<p>This is a demo for write editor plugin.</p>
<h4>feature</h4>
<p><ol>
<li>Change editor as xheditor.</li>
</ol></p>
'''
| Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
import datetime,logging,re
from google.appengine.api import urlfetch
'''
Created on 2010-7-1
@author: 火中仙
'''
class today_history(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="Fred"
self.authoruri="mailto:me@fengsage.com"
self.uri="http://fengsageblog.appspot.com"
self.description="历史上的今天"
self.name="TodayHistory"
self.version="0.2"
self.register_filter('history',self.getTodayHistory)
def getTodayHistory(self,page=None,*arg1,**arg2):
year = datetime.date.today().year
month = datetime.date.today().month
day = datetime.date.today().day
if day<10: day = '0%s'%day
if month<10: month = '0%s'%month
# url = u'http://www.guxiang.com/lishi/others/jintian/%s/%s.htm'%(month_list[month-1],str(day))
url = u'http://time.hudong.com/3%s%s%s'%(year,month,day)
# url = u'http://www.guxiang.com/lishi/others/jintian/Jul/04.htm'
logging.info(url)
html = self.readPage(url).replace('—', '')
if html:
html = re.sub("<a([^>]*?)>","",html)
html = re.sub("</a>","",html)
# logging.info(html)
REG_STR = u'<dd>(.*)</dd>'
list = re.findall(REG_STR, html)
# logging.info(list)
if list:
return self.render_content("today_history.html",{'list':list})
else:
return self.render_content("today_history.html")
def readPage(self,url):
'''
Page reader
'''
logging.info('Read page:%s'%url)
header = { 'Host':'time.hudong.com',\
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9',\
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\
'Accept-Language':'zh-cn,zh;q=0.5'}
try:
response = urlfetch.fetch(url=url,headers=header)
# logging.info(response.content)
except:
logging.error('open exception')
return 0
if response.status_code == 200:
html_content = response.content.decode('utf-8').replace(' ','')
return str(html_content)
| Python |
from today_history import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
from model import *
import re,datetime
from google.appengine.api import urlfetch
from xml.dom import minidom
HEADERS = { 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.0; zh-CN; rv:1.9.1.9) Gecko/20100315 Firefox/3.5.9'}
class FeedList(db.Model):
author = db.StringProperty(multiline=False,default='robot')
name = db.StringProperty(multiline=False,default='Fred')
feedurl = db.StringProperty(multiline=False,default='http://hi.baidu.com/429263181/rss')
latest = db.StringProperty(multiline=False,default='last')
abconf = db.StringListProperty()
repReg = db.StringProperty(multiline=False)
autoUrl = db.BooleanProperty(default=False)
autoCategory = db.BooleanProperty(default=False)
autoTimeFormat = db.StringProperty(multiline=False,default=None)
allow_comment = db.BooleanProperty(default=True) #allow comment
class pick_rss(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="Fred"
self.authoruri="mailto:ad@fengsage.cn"
self.uri="http://fengsageblog.appspot.com"
self.description="RSS采集"
self.name="PickRss"
self.version="0.3"
self.register_urlmap('pick',self.getFeed)
#RSS列表、删除RSS
def get(self,page):
if page.param("delid")=='':
listit = FeedList()
querys = listit.all()
return self.render_content("pick_rss.html",{'list':querys})
else:
#删除
listit = FeedList()
querys=listit.all().filter('name =', page.param("delid")).fetch(1)
for query in querys:
query.delete()
return u'删除成功! <a href="?">返回</a>'
#新增RSS
def post(self,page):
query = FeedList()
query.name =page.param("name")
query.feedurl = page.param("feedurl")
query.abconf = page.request.get_all('abconf')
query.author = page.param("author")
if page.param("allowComment") == 'on':
query.allow_comment = True
else:
query.allow_comment = False
if page.param("autoUrl") == 'on':
query.autoUrl = True
else:
query.autoUrl = False
if page.param("autoCategory") == 'on':
query.autoCategory = True
else:
query.autoCategory = False
if page.param("repReg"):
query.repReg = page.param("repReg")
if page.param("autoTime") == 'on':
query.autoTimeFormat = page.param("spDateMod") + " " + page.param("spTimeMod")
# logging.info(query.autoTimeFormat)
# logging.info(query.autoCategory)
# logging.info(query.author)
query.put()
return self.get(page)
def getFeed(self,page=None,*arg1,**arg2):
listit = FeedList()
querys = listit.all()
#遍历数据库记录数据
for detal in querys:
#rss adr
url=str(detal.feedurl)
# logging.info("**"+url);
#读取RSS连接,模拟浏览器
result = urlfetch.fetch(url,None,urlfetch.GET,HEADERS)
# logging.info(rss_source)
if result.status_code == 200:
rss_source = result.content
#编码转换
if 'encoding="gb2312"' in rss_source:
rss_source = result.content.decode('gb2312').encode('UTF-8')
rss_source = rss_source.replace('encoding="gb2312"','encoding="utf-8"')
if 'encoding="GBK"' in rss_source:
rss_source = result.content.decode('GBK').encode('UTF-8')
rss_source = rss_source.replace('encoding="GBK"','encoding="utf-8"')
#解析XML开始
file_xml = minidom.parseString(rss_source)
rssNOs=file_xml.getElementsByTagName('rss')
rssver='2.0'
#判断RSS版本
for rssNO in rssNOs:
rssver=rssNO.getAttribute('version')
if rssver=='1.0':
return u'暂不支持RSS1.0 <a href="?">返回</a>'
else:
artList='item'
artTitle='title'
artLink='link'
artText='description'
artTime='pubDate'
artCategory = 'category'
#获得RSS内所有文章根节点
items = file_xml.getElementsByTagName(artList)
flag=''
latestId=detal.latest
ifFirst=0
#按顺序遍历插入数据,默认情况下与录入数据时间与现实相反的
#可以开启同步时间,保持远程RSS与本地数据时间同步
for item in items:
entry=Entry()
entry.author_name = detal.author
entry.title=item.getElementsByTagName(artTitle)[0].firstChild.data
rss_category = item.getElementsByTagName(artCategory)[0].firstChild.data
# logging.info("***********"+rss_category)
#同步分类
if detal.autoCategory:
newcates=[]
c = Category.all().filter('name =',rss_category)
#判断分类是否存在
if c and c.count()>0:
newcates.append(c[0].key())
else:
cat= Category(name=rss_category,slug=autoSlug(rss_category))
cat.put()
newcates.append(cat.key())
entry.categorie_keys=newcates;
#自动构建URL(同步GoogleTranslate)
if detal.autoUrl:
entry.slug = autoSlug(entry.title)
#自动时间同步
if detal.autoTimeFormat:
#格式化时间
format = detal.autoTimeFormat
article_date = item.getElementsByTagName(artTime)[0].firstChild.data
entry.date = formatDate(article_date, format)
#评论是否开启
if detal.allow_comment:
entry.allow_comment = True
else:
entry.allow_comment = False
if rssver=='1.0':
flag=item.getElementsByTagName(artLink)[0].getAttribute('href')
else:
flag=item.getElementsByTagName(artLink)[0].firstChild.data
#保存最后一条记录,防止重复采集
if latestId=='last':
detal.latest=flag
latestId=flag
ifFirst=1
db.put(detal)
else:
if flag==latestId:
break
else:
if ifFirst==0:
detal.latest=flag
db.put(detal)
ifFirst=1
db.put(detal)
artContent=item.getElementsByTagName(artText)[0].firstChild.data
# logging.info(artContent)
#过滤内容
for abc in detal.abconf:
#过滤所有a标签
if abc=='0' :
artContent = re.sub("<a([^>]*?)>","",artContent)
artContent = re.sub("</a>","",artContent)
#过滤所有img标签
if abc=='1' :
artContent = re.sub("<img(.*?>)","",artContent)
#根据表达式替换文本
if abc=='2':
if detal.repReg:
for txt in detal.repReg.split(','):
rep = txt.split(':')
artContent = re.sub(rep[0],rep[1],artContent)
#替换所有换行符
artContent=artContent.replace('\n', "<br />")
entry.content=artContent
entry.save(True)
def formatDate(str,format):
return datetime.datetime.strptime(str,format)
def autoSlug(str):
translate_url = 'http://translate.google.com/translate_a/t?client=t&text=%s&hl=en&sl=zh-CN&tl=en&pc=0'%str.replace(' ','-').encode("utf-8")
translate_result = urlfetch.fetch(translate_url,None,urlfetch.GET,HEADERS)
if translate_result.status_code == 200:
trans_reg = u'"trans":"([^"]*)"'
translate_content = re.findall(trans_reg, translate_result.content)[0]
translate_content = translate_content \
.replace(' ','-') \
.replace('\\','-')
translate_content = re.sub(u'[^a-zA-Z\d_]','-', translate_content)
logging.info("*********"+translate_content)
return translate_content
if __name__=="__main__":
starttime = '2010-05-15 14:14'
format="%Y-%m-%d %H:%M";
# print datetime.datetime.strptime(starttime,format)
# print autoSlugTest('Django Admin Inreface's Note (admin.py)')
| Python |
from pick_rss import * | Python |
from sys_plugin import * | Python |
# -*- coding: utf-8 -*-
from micolog_plugin import *
import logging,re
from google.appengine.api import mail
from model import *
from google.appengine.api import users
from base import BaseRequestHandler,urldecode
from google.appengine.ext.webapp import template
SBODY='''New comment on your post "%(title)s"
Author : %(author)s
E-mail : %(email)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
BBODY='''Hi~ New reference on your comment for post "%(title)s"
Author : %(author)s
URL : %(weburl)s
Comment:
%(content)s
You can see all comments on this post here:
%(commenturl)s
'''
class NotifyHandler(BaseRequestHandler):
def __init__(self):
BaseRequestHandler.__init__(self)
self.current="config"
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def get(self):
self.template_vals.update({'self':self})
content=template.render('plugins/sys_plugin/setup.html',self.template_vals)
self.render2('views/admin/setup_base.html',{'m_id':'sysplugin_notify','content':content})
#Also you can use:
#self.render2('plugins/sys_plugin/setup2.html',{'m_id':'sysplugin_notify','self':self})
def post(self):
self.bbody=self.param('bbody')
self.sbody=self.param('sbody')
self.blog.comment_notify_mail=self.parambool('comment_notify_mail')
self.blog.put()
OptionSet.setValue('sys_plugin_sbody',self.sbody)
OptionSet.setValue('sys_plugin_bbody',self.bbody)
self.get()
class sys_plugin(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="System plugin for micolog"
self.name="Sys Plugin"
self.version="0.2"
self.blocklist=OptionSet.getValue("sys_plugin_blocklist",default="")
self.register_filter('head',self.head)
self.register_filter('footer',self.footer)
self.register_urlmap('sys_plugin/setup',self.setup)
self.register_urlhandler('/admin/sys_plugin/notify',NotifyHandler)
self.register_setupmenu('sysplugin_notify',_('Notify'),'/admin/sys_plugin/notify')
self.register_action('pre_comment',self.pre_comment)
self.register_action('save_comment',self.save_comment)
self.sbody=OptionSet.getValue('sys_plugin_sbody',SBODY)
self.bbody=OptionSet.getValue('sys_plugin_bbody',BBODY)
def head(self,content,blog=None,*arg1,**arg2):
content=content+'<meta name="generator" content="Micolog %s" />'%blog.version
return content
def footer(self,content,blog=None,*arg1,**arg2):
return content+'<!--Powered by micolog %s-->'%blog.version
def setup(self,page=None,*arg1,**arg2):
if not page.is_login:
page.redirect(users.create_login_url(page.request.uri))
tempstr='''
<p>blocklist:</p>
<form action="" method="post">
<p>
<textarea name="ta_list" style="width:400px;height:300px">%s</textarea>
</p>
<input type="submit" value="submit">
</form>'''
if page.request.method=='GET':
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
else:
self.blocklist=page.param("ta_list")
OptionSet.setValue("sys_plugin_blocklist",self.blocklist)
page.render2('views/admin/base.html',{'m_id':'sysplugin_block','content':tempstr%self.blocklist})
def get(self,page):
return '''<h3>Sys Plugin</h3>
<p>This is a system plugin for micolog. <br>Also a demo for how to write plugin for micolog.</p>
<h4>feature</h4>
<p><ol>
<li>Add Meta <meta name="generator" content="Micolog x.x" /></li>
<li>Add footer "<!--Powered by micolog x.x-->"</li>
<li>Comments Filter with blocklist <a href="/e/sys_plugin/setup">Setup</a></li>
<li>Comment Notify <a href="/admin/sys_plugin/notify">Setup</a></li>
</ol></p>
'''
def pre_comment(self,comment,*arg1,**arg2):
for s in self.blocklist.splitlines():
if comment.content.find(s)>-1:
raise Exception
def save_comment(self,comment,*arg1,**arg2):
if self.blog.comment_notify_mail:
self.notify(comment)
def notify(self,comment):
sbody=self.sbody.decode('utf-8')
bbody=self.bbody.decode('utf-8')
if self.blog.comment_notify_mail and self.blog.owner and not users.is_current_user_admin() :
sbody=sbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
mail.send_mail_to_admins(self.blog.owner.email(),'Comments:'+comment.entry.title, sbody,reply_to=comment.email)
#reply comment mail notify
refers = re.findall(r'#comment-(\d+)', comment.content)
if len(refers)!=0:
replyIDs=[int(a) for a in refers]
commentlist=comment.entry.comments()
emaillist=[c.email for c in commentlist if c.reply_notify_mail and c.key().id() in replyIDs]
emaillist = {}.fromkeys(emaillist).keys()
for refer in emaillist:
if self.blog.owner and mail.is_email_valid(refer):
emailbody = bbody%{'title':comment.entry.title,
'author':comment.author,
'weburl':comment.weburl,
'email':comment.email,
'content':comment.content,
'commenturl':comment.entry.fullurl+"#comment-"+str(comment.key().id())
}
message = mail.EmailMessage(sender = self.blog.owner.email(),subject = 'Comments:'+comment.entry.title)
message.to = refer
message.body = emailbody
message.send()
| Python |
from micolog_plugin import *
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from wp_import import *
from model import *
import logging,math
from django.utils import simplejson
from base import BaseRequestHandler,urldecode
class waphandler(BaseRequestHandler):
def get(self):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
action=self.param('action')
if action=='stop':
memcache.delete("imt")
#OptionSet.remove('wpimport_data')
self.write('"ok"')
return
imt=memcache.get('imt')
#imt=OptionSet.getValue('wpimport_data')
if imt and imt.cur_do:
process=100-math.ceil(imt.count()*100/imt.total)
if imt.cur_do[0]=='cat':
msg="importing category '%s'"%imt.cur_do[1]['name']
elif imt.cur_do[0]=='entry':
msg="importing entry '%s'"%imt.cur_do[1]['title']
else:
msg="start importing..."
self.write(simplejson.dumps((process,msg,not process==100)))
else:
self.write(simplejson.dumps((-1,"Have no data to import!",False)))
def post(self):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
try:
#global imt
imt=memcache.get("imt")
#imt=OptionSet.getValue('wpimport_data')
import_data=imt.pop()
#if tdata=='men':
memcache.set('imt',imt)
#else:
# OptionSet.setValue('wpimport_data',imt)
if import_data:
try:
if import_data[0]=='cat':
_cat=import_data[1]
nicename=_cat['slug']
cat=Category.get_by_key_name(nicename)
if not cat:
cat=Category(key_name=nicename)
cat.name=_cat['name']
cat.slug=nicename
cat.put()
elif import_data[0]=='entry':
_entry=import_data[1]
logging.debug('importing:'+_entry['title'])
hashkey=str(hash(_entry['title']))
entry=Entry.get_by_key_name(hashkey)
if not entry:
entry=Entry(key_name=hashkey)
entry.title=_entry['title']
entry.author=self.login_user
entry.is_wp=True
#entry.date=datetime.strptime( _entry['pubDate'],"%a, %d %b %Y %H:%M:%S +0000")
try:
entry.date=datetime.strptime( _entry['pubDate'][:-6],"%a, %d %b %Y %H:%M:%S")
except:
try:
entry.date=datetime.strptime( _entry['pubDate'][0:19],"%Y-%m-%d %H:%M:%S")
except:
entry.date=datetime.now()
entry.entrytype=_entry['post_type']
entry.content=_entry['content']
entry.excerpt=_entry['excerpt']
entry.post_id=_entry['post_id']
entry.slug=urldecode(_entry['post_name'])
entry.entry_parent=_entry['post_parent']
entry.menu_order=_entry['menu_order']
for cat in _entry['categories']:
c=Category.get_by_key_name(cat['slug'])
if c:
entry.categorie_keys.append(c.key())
entry.settags(','.join(_entry['tags']))
## for tag in _entry['tags']:
## entry.tags.append(tag)
if _entry['published']:
entry.save(True)
else:
entry.save()
for com in _entry['comments']:
try:
date=datetime.strptime(com['date'][0:19],"%Y-%m-%d %H:%M:%S")
except:
date=datetime.now()
comment=Comment(author=com['author'],
content=com['content'],
entry=entry,
date=date
)
try:
comment.email=com['email']
comment.weburl=com['weburl']
except:
pass
comment.save()
finally:
queue=taskqueue.Queue("import")
queue.add(taskqueue.Task( url="/admin/wp_import"))
except Exception,e :
logging.info("import error: %s"%e.message)
class wordpress(Plugin_importbase):
def __init__(self):
Plugin_importbase.__init__(self,__file__,"wordpress","Import posts, pages, comments, categories, and tags from a WordPress export file.")
self.author="xuming"
self.authoruri="http://xuming.net"
self.uri="http://xuming.net"
self.description="Plugin for import wxr file."
self.name="Wordpress Import"
self.version="0.7"
self.register_urlhandler('/admin/wp_import',waphandler)
def get(self,page):
return self.render_content("wpimport.html",{'name':self.name})
def post(self,page):
try:
queue=taskqueue.Queue("import")
wpfile=page.param('wpfile')
#global imt
imt=import_wordpress(wpfile)
imt.parse()
#OptionSet.setValue('wpimport_data',imt)
memcache.set("imt",imt)
queue.add(taskqueue.Task( url="/admin/wp_import"))
return self.render_content("wpimport.html",{'postback':True})
except Exception , e:
return self.error("Import Error:<p style='color:red;font-size:11px;font-weight:normal'>%s</p>"%e.message)
| Python |
###Import post,page,category,tag from wordpress export file
import xml.etree.ElementTree as et
import logging
###import from wxr file
class import_wordpress:
def __init__(self,source):
self.categories=[]
self.tags=[]
self.entries=[]
self.source=source
self.doc=et.fromstring(source)
#use namespace
self.wpns='{http://wordpress.org/export/1.0/}'
self.contentns="{http://purl.org/rss/1.0/modules/content/}"
self.excerptns="{http://wordpress.org/export/1.0/excerpt/}"
et._namespace_map[self.wpns]='wp'
et._namespace_map[self.contentns]='content'
et._namespace_map[self.excerptns]='excerpt'
self.channel=self.doc.find('channel')
self.dict={'category':self.wpns+'category','tag':self.wpns+'tag','item':'item'}
self.cur_do=None
def parse(self):
categories=self.channel.findall(self.wpns+'category')
#parse categories
for cate in categories:
slug=cate.findtext(self.wpns+'category_nicename')
name=cate.findtext(self.wpns+'cat_name')
self.categories.append({'slug':slug,'name':name})
#parse tags
tags=self.channel.findall(self.wpns+'tag')
for tag in tags:
slug=tag.findtext(self.wpns+'tag_slug')
name=tag.findtext(self.wpns+'tag_name')
self.tags.append({'slug':slug,'name':name})
#parse entries
items=self.channel.findall('item')
for item in items:
title=item.findtext('title')
try:
entry={}
entry['title']=item.findtext('title')
logging.info(title)
entry['pubDate']=item.findtext('pubDate')
entry['post_type']=item.findtext(self.wpns+'post_type')
entry['content']= item.findtext(self.contentns+'encoded')
entry['excerpt']= item.findtext(self.excerptns+'encoded')
entry['post_id']=int(item.findtext(self.wpns+'post_id'))
entry['post_name']=item.findtext(self.wpns+'post_name')
entry['post_parent']=int(item.findtext(self.wpns+'post_parent'))
entry['menu_order']=int(item.findtext(self.wpns+'menu_order'))
entry['tags']=[]
entry['categories']=[]
cats=item.findall('category')
for cat in cats:
if cat.attrib.has_key('nicename'):
nicename=cat.attrib['nicename']
cat_type=cat.attrib['domain']
if cat_type=='tag':
entry['tags'].append(cat.text)
else:
entry['categories'].append({'slug':nicename,'name':cat.text})
pub_status=item.findtext(self.wpns+'status')
if pub_status=='publish':
entry['published']=True
else:
entry['published']=False
entry['comments']=[]
comments=item.findall(self.wpns+'comment')
for com in comments:
try:
comment_approved=int(com.findtext(self.wpns+'comment_approved'))
except:
comment_approved=0
if comment_approved:
comment=dict(author=com.findtext(self.wpns+'comment_author'),
content=com.findtext(self.wpns+'comment_content'),
email=com.findtext(self.wpns+'comment_author_email'),
weburl=com.findtext(self.wpns+'comment_author_url'),
date=com.findtext(self.wpns+'comment_date')
)
self.entries.append(entry)
except:
logging.info("parse wordpress file error")
self.total=self.count()
self.cur_do=("begin","begin")
self.source=None
self.doc=None
def count(self):
return len(self.categories)+len(self.entries)
def pop(self):
if len(self.categories)>0:
self.cur_do=('cat',self.categories.pop())
return self.cur_do
if len(self.entries)>0:
self.cur_do=('entry', self.entries.pop())
return self.cur_do
return None
def __getstate__(self):
if self.cur_do[0]=='cat':
c=('cat',self.cur_do[1]['name'])
elif self.cur_do[0]=='entry':
c=('entry',self.cur_do[1]['title'])
else:
c=('begin','begin')
return (c,self.total,self.categories,self.tags,self.entries)
def __setstate__(self,data):
c=data[0]
if c[0]=='cat':
self.cur_do=('cat',{'name':c[1]})
elif c[0]=='entry':
self.cur_do=('entry',{'title':c[1]})
else:
self.cur_do=c
self.total,self.categories,self.tags,self.entries=data[1:]
if __name__=='__main__':
import sys
#f=sys.argv[1]
f='D:\\work\\micolog\\wordpress.xml'
wp=import_wordpress(open(f).read())
wp.parse()
print wp.count()
item=wp.pop()
while item:
print item[0]
item=wp.pop()
| Python |
from wordpress import * | Python |
# -*- coding: utf-8 -*-
import os,logging
import re
from functools import wraps
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from google.appengine.api import urlfetch
##import app.webapp as webapp2
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.utils.translation import activate
from django.template import TemplateDoesNotExist
from django.conf import settings
settings._target = None
#from model import g_blog,User
#activate(g_blog.language)
from google.appengine.api.labs import taskqueue
import wsgiref.handlers
from mimetypes import types_map
from datetime import datetime, timedelta
import urllib
import traceback
import micolog_template
logging.info('module base reloaded')
def urldecode(value):
return urllib.unquote(urllib.unquote(value)).decode('utf8')
def urlencode(value):
return urllib.quote(value.encode('utf8'))
def sid():
now=datetime.datetime.now()
return now.strftime('%y%m%d%H%M%S')+str(now.microsecond)
def requires_admin(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if not self.is_login:
self.redirect(users.create_login_url(self.request.uri))
return
elif not (self.is_admin
or self.author):
return self.error(403)
else:
return method(self, *args, **kwargs)
return wrapper
def printinfo(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
print self #.__name__
print dir(self)
for x in self.__dict__:
print x
return method(self, *args, **kwargs)
return wrapper
#only ajax methed allowed
def ajaxonly(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.headers["X-Requested-With"]=="XMLHttpRequest":
self.error(404)
else:
return method(self, *args, **kwargs)
return wrapper
#only request from same host can passed
def hostonly(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.headers['Referer'].startswith(os.environ['HTTP_HOST'],7):
return method(self, *args, **kwargs)
else:
self.error(404)
return wrapper
def format_date(dt):
return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
def cache(key="",time=3600):
def _decorate(method):
def _wrapper(*args, **kwargs):
from model import g_blog
if not g_blog.enable_memcache:
method(*args, **kwargs)
return
request=args[0].request
response=args[0].response
skey=key+ request.path_qs
#logging.info('skey:'+skey)
html= memcache.get(skey)
#arg[0] is BaseRequestHandler object
if html:
logging.info('cache:'+skey)
response.last_modified =html[1]
ilen=len(html)
if ilen>=3:
response.set_status(html[2])
if ilen>=4:
for skey,value in html[3].items():
response.headers[skey]=value
response.out.write(html[0])
else:
if 'last-modified' not in response.headers:
response.last_modified = format_date(datetime.utcnow())
method(*args, **kwargs)
result=response.out.getvalue()
status_code = response._Response__status[0]
logging.debug("Cache:%s"%status_code)
memcache.set(skey,(result,response.last_modified,status_code,response.headers),time)
return _wrapper
return _decorate
#-------------------------------------------------------------------------------
class PingbackError(Exception):
"""Raised if the remote server caused an exception while pingbacking.
This is not raised if the pingback function is unable to locate a
remote server.
"""
_ = lambda x: x
default_messages = {
16: _(u'source URL does not exist'),
17: _(u'The source URL does not contain a link to the target URL'),
32: _(u'The specified target URL does not exist'),
33: _(u'The specified target URL cannot be used as a target'),
48: _(u'The pingback has already been registered'),
49: _(u'Access Denied')
}
del _
def __init__(self, fault_code, internal_message=None):
Exception.__init__(self)
self.fault_code = fault_code
self._internal_message = internal_message
def as_fault(self):
"""Return the pingback errors XMLRPC fault."""
return Fault(self.fault_code, self.internal_message or
'unknown server error')
@property
def ignore_silently(self):
"""If the error can be ignored silently."""
return self.fault_code in (17, 33, 48, 49)
@property
def means_missing(self):
"""If the error means that the resource is missing or not
accepting pingbacks.
"""
return self.fault_code in (32, 33)
@property
def internal_message(self):
if self._internal_message is not None:
return self._internal_message
return self.default_messages.get(self.fault_code) or 'server error'
@property
def message(self):
msg = self.default_messages.get(self.fault_code)
if msg is not None:
return _(msg)
return _(u'An unknown server error (%s) occurred') % self.fault_code
class util:
@classmethod
def do_trackback(cls, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None):
taskqueue.add(url='/admin/do/trackback_ping',
params={'tbUrl': tbUrl,'title':title,'excerpt':excerpt,'url':url,'blog_name':blog_name})
#pingback ping
@classmethod
def do_pingback(cls,source_uri, target_uri):
taskqueue.add(url='/admin/do/pingback_ping',
params={'source': source_uri,'target':target_uri})
##cache variable
class Pager(object):
def __init__(self, model=None,query=None, items_per_page=10):
if model:
self.query = model.all()
else:
self.query=query
self.items_per_page = items_per_page
def fetch(self, p):
if hasattr(self.query,'__len__'):
max_offset=len(self.query)
else:
max_offset = self.query.count()
n = max_offset / self.items_per_page
if max_offset % self.items_per_page != 0:
n += 1
if p < 0 or p > n:
p = 1
offset = (p - 1) * self.items_per_page
if hasattr(self.query,'fetch'):
results = self.query.fetch(self.items_per_page, offset)
else:
results = self.query[offset:offset+self.items_per_page]
links = {'count':max_offset,'page_index':p,'prev': p - 1, 'next': p + 1, 'last': n}
if links['next'] > n:
links['next'] = 0
return (results, links)
class BaseRequestHandler(webapp.RequestHandler):
def __init__(self):
self.current='home'
def initialize(self, request, response):
webapp.RequestHandler.initialize(self, request, response)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from model import g_blog,User
self.blog = g_blog
self.login_user = users.get_current_user()
self.is_login = (self.login_user != None)
self.loginurl=users.create_login_url(self.request.uri)
self.logouturl=users.create_logout_url(self.request.uri)
self.is_admin = users.is_current_user_admin()
if self.is_admin:
self.auth = 'admin'
self.author=User.all().filter('email =',self.login_user.email()).get()
if not self.author:
self.author=User(dispname=self.login_user.nickname(),email=self.login_user.email())
self.author.isadmin=True
self.author.user=self.login_user
self.author.put()
elif self.is_login:
self.author=User.all().filter('email =',self.login_user.email()).get()
if self.author:
self.auth='author'
else:
self.auth = 'login'
else:
self.auth = 'guest'
try:
self.referer = self.request.headers['referer']
except:
self.referer = None
self.template_vals = {'self':self,'blog':self.blog,'current':self.current}
def __before__(self,*args):
pass
def __after__(self,*args):
pass
def error(self,errorcode,message='an error occured'):
if errorcode == 404:
message = 'Sorry, we were not able to find the requested page. We have logged this error and will look into it.'
elif errorcode == 403:
message = 'Sorry, that page is reserved for administrators. '
elif errorcode == 500:
message = "Sorry, the server encountered an error. We have logged this error and will look into it."
message+="<p><pre>"+traceback.format_exc()+"</pre><br></p>"
self.template_vals.update( {'errorcode':errorcode,'message':message})
if errorcode>0:
self.response.set_status(errorcode)
#errorfile=getattr(self.blog.theme,'error'+str(errorcode))
#logging.debug(errorfile)
## if not errorfile:
## errorfile=self.blog.theme.error
errorfile='error'+str(errorcode)+".html"
try:
content=micolog_template.render(self.blog.theme,errorfile, self.template_vals)
except TemplateDoesNotExist:
try:
content=micolog_template.render(self.blog.theme,"error.html", self.template_vals)
except TemplateDoesNotExist:
content=micolog_template.render(self.blog.default_theme,"error.html", self.template_vals)
except:
content=message
except:
content=message
self.response.out.write(content)
def get_render(self,template_file,values):
template_file=template_file+".html"
self.template_vals.update(values)
try:
#sfile=getattr(self.blog.theme, template_file)
logging.debug("get_render:"+template_file)
html = micolog_template.render(self.blog.theme, template_file, self.template_vals)
except TemplateDoesNotExist:
#sfile=getattr(self.blog.default_theme, template_file)
html = micolog_template.render(self.blog.default_theme, template_file, self.template_vals)
return html
def render(self,template_file,values):
"""
Helper method to render the appropriate template
"""
html=self.get_render(template_file,values)
self.response.out.write(html)
def message(self,msg,returl=None,title='Infomation'):
self.render('msg',{'message':msg,'title':title,'returl':returl})
def render2(self,template_file,template_vals={}):
"""
Helper method to render the appropriate template
"""
self.template_vals.update(template_vals)
path = os.path.join(self.blog.rootdir, template_file)
self.response.out.write(template.render(path, self.template_vals))
def param(self, name, **kw):
return self.request.get(name, **kw)
def paramint(self, name, default=0):
try:
return int(self.request.get(name))
except:
return default
def parambool(self, name, default=False):
try:
return self.request.get(name)=='on'
except:
return default
def write(self, s):
self.response.out.write(s)
def chk_login(self, redirect_url='/'):
if self.is_login:
return True
else:
self.redirect(redirect_url)
return False
def chk_admin(self, redirect_url='/'):
if self.is_admin:
return True
else:
self.redirect(redirect_url)
return False
| Python |
# -*- coding: utf-8 -*-
import logging
from django import template
from model import *
import django.template.defaultfilters as defaultfilters
import urllib
register = template.Library()
from datetime import *
@register.filter
def datetz(date,format): #datetime with timedelta
t=timedelta(seconds=3600*g_blog.timedelta)
return defaultfilters.date(date+t,format)
@register.filter
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
@register.filter
def urlencode(value):
return urllib.quote(value.encode('utf8'))
@register.filter
def check_current(v1,v2):
if v1==v2:
return "current"
else:
return ""
@register.filter
def excerpt_more(entry,value='..more'):
return entry.get_content_excerpt(value.decode('utf8'))
@register.filter
def dict_value(v1,v2):
return v1[v2]
from app.html_filter import html_filter
plog_filter = html_filter()
plog_filter.allowed = {
'a': ('href', 'target', 'name'),
'b': (),
'blockquote': (),
'pre': (),
'em': (),
'i': (),
'img': ('src', 'width', 'height', 'alt', 'title'),
'strong': (),
'u': (),
'font': ('color', 'size'),
'p': (),
'h1': (),
'h2': (),
'h3': (),
'h4': (),
'h5': (),
'h6': (),
'table': (),
'tr': (),
'th': (),
'td': (),
'ul': (),
'ol': (),
'li': (),
'br': (),
'hr': (),
}
plog_filter.no_close += ('br',)
plog_filter.allowed_entities += ('nbsp','ldquo', 'rdquo', 'hellip',)
plog_filter.make_clickable_urls = False # enable this will get a bug about a and img
@register.filter
def do_filter(data):
return plog_filter.go(data)
'''
tag like {%mf header%}xxx xxx{%endmf%}
'''
@register.tag("mf")
def do_mf(parser, token):
nodelist = parser.parse(('endmf',))
parser.delete_first_token()
return MfNode(nodelist,token)
class MfNode(template.Node):
def __init__(self, nodelist,token):
self.nodelist = nodelist
self.token=token
def render(self, context):
tokens= self.token.split_contents()
if len(tokens)<2:
raise TemplateSyntaxError, "'mf' tag takes one argument: the filter name is needed"
fname=tokens[1]
output = self.nodelist.render(context)
return g_blog.tigger_filter(fname,output) | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A simple wrapper for Django templates.
The main purpose of this module is to hide all of the package import pain
you normally have to go through to get Django to work. We expose the Django
Template and Context classes from this module, handling the import nonsense
on behalf of clients.
Typical usage:
from google.appengine.ext.webapp import template
print template.render('templates/index.html', {'foo': 'bar'})
Django uses a global setting for the directory in which it looks for templates.
This is not natural in the context of the webapp module, so our load method
takes in a complete template path, and we set these settings on the fly
automatically. Because we have to set and use a global setting on every
method call, this module is not thread safe, though that is not an issue
for applications.
Django template documentation is available at:
http://www.djangoproject.com/documentation/templates/
"""
import md5
import os,logging
try:
from django import v0_96
except ImportError:
pass
import django
import django.conf
try:
django.conf.settings.configure(
DEBUG=False,
TEMPLATE_DEBUG=False,
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.load_template_source',
),
)
except (EnvironmentError, RuntimeError):
pass
import django.template
import django.template.loader
from google.appengine.ext import webapp
def render(theme,template_file, template_dict, debug=False):
"""Renders the template at the given path with the given dict of values.
Example usage:
render("templates/index.html", {"name": "Bret", "values": [1, 2, 3]})
Args:
template_path: path to a Django template
template_dict: dictionary of values to apply to the template
"""
t = load(theme,template_file, debug)
return t.render(Context(template_dict))
template_cache = {}
def load(theme,template_file, debug=False):
"""Loads the Django template from the given path.
It is better to use this function than to construct a Template using the
class below because Django requires you to load the template with a method
if you want imports and extends to work in the template.
"""
#template_file=os.path.join("templates",template_file)
if theme.isZip:
theme_path=theme.server_dir
else:
theme_path=os.path.join( theme.server_dir,"templates")
abspath =os.path.join( theme_path,template_file)
logging.debug("theme_path:%s,abspath:%s"%(theme_path,abspath))
if not debug:
template = template_cache.get(abspath, None)
else:
template = None
if not template:
#file_name = os.path.split(abspath)
new_settings = {
'TEMPLATE_DIRS': (theme_path,),
'TEMPLATE_DEBUG': debug,
'DEBUG': debug,
}
old_settings = _swap_settings(new_settings)
try:
template = django.template.loader.get_template(template_file)
finally:
_swap_settings(old_settings)
if not debug:
template_cache[abspath] = template
def wrap_render(context, orig_render=template.render):
URLNode = django.template.defaulttags.URLNode
save_urlnode_render = URLNode.render
old_settings = _swap_settings(new_settings)
try:
URLNode.render = _urlnode_render_replacement
return orig_render(context)
finally:
_swap_settings(old_settings)
URLNode.render = save_urlnode_render
template.render = wrap_render
return template
def _swap_settings(new):
"""Swap in selected Django settings, returning old settings.
Example:
save = _swap_settings({'X': 1, 'Y': 2})
try:
...new settings for X and Y are in effect here...
finally:
_swap_settings(save)
Args:
new: A dict containing settings to change; the keys should
be setting names and the values settings values.
Returns:
Another dict structured the same was as the argument containing
the original settings. Original settings that were not set at all
are returned as None, and will be restored as None by the
'finally' clause in the example above. This shouldn't matter; we
can't delete settings that are given as None, since None is also a
legitimate value for some settings. Creating a separate flag value
for 'unset' settings seems overkill as there is no known use case.
"""
settings = django.conf.settings
old = {}
for key, value in new.iteritems():
old[key] = getattr(settings, key, None)
setattr(settings, key, value)
return old
def create_template_register():
"""Used to extend the Django template library with custom filters and tags.
To extend the template library with a custom filter module, create a Python
module, and create a module-level variable named "register", and register
all custom filters to it as described at
http://www.djangoproject.com/documentation/templates_python/
#extending-the-template-system:
templatefilters.py
==================
register = webapp.template.create_template_register()
def cut(value, arg):
return value.replace(arg, '')
register.filter(cut)
Then, register the custom template module with the register_template_library
function below in your application module:
myapp.py
========
webapp.template.register_template_library('templatefilters')
"""
return django.template.Library()
def register_template_library(package_name):
"""Registers a template extension module to make it usable in templates.
See the documentation for create_template_register for more information."""
if not django.template.libraries.get(package_name, None):
django.template.add_to_builtins(package_name)
Template = django.template.Template
Context = django.template.Context
def _urlnode_render_replacement(self, context):
"""Replacement for django's {% url %} block.
This version uses WSGIApplication's url mapping to create urls.
Examples:
<a href="{% url MyPageHandler "overview" %}">
{% url MyPageHandler implicit_args=False %}
{% url MyPageHandler "calendar" %}
{% url MyPageHandler "jsmith","calendar" %}
"""
args = [arg.resolve(context) for arg in self.args]
try:
app = webapp.WSGIApplication.active_instance
handler = app.get_registered_handler_by_name(self.view_name)
return handler.get_url(implicit_args=True, *args)
except webapp.NoUrlFoundError:
return ''
| Python |
# -*- coding: utf-8 -*-
import wsgiref.handlers
import xmlrpclib
from xmlrpclib import Fault
import sys
import cgi
import base64
#from datetime import datetime
import app.mktimefix as datetime
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
from functools import wraps
from django.utils.html import strip_tags
sys.path.append('modules')
from base import *
from model import *
from micolog_plugin import *
from urlparse import urlparse
def checkauth(pos=1):
def _decorate(method):
def _wrapper(*args, **kwargs):
username = args[pos+0]
password = args[pos+1]
if not (username and password and g_blog.rpcuser and g_blog.rpcpassword
and (g_blog.rpcuser==username)
and (g_blog.rpcpassword==password)):
raise ValueError("Authentication Failure")
args = args[0:pos]+args[pos+2:]
return method(*args, **kwargs)
return _wrapper
return _decorate
def format_date(d):
if not d: return None
#return xmlrpclib.DateTime(d.isoformat())
return xmlrpclib.DateTime(d)
def post_struct(entry):
if not entry:
raise Fault(404, "Post does not exist")
categories=[]
if entry.categorie_keys:
categories =[cate.name for cate in entry.categories]
struct = {
'postid': entry.key().id(),
'title': entry.title,
'link': entry.fullurl,
'permaLink': entry.fullurl,
'description': unicode(entry.content),
'categories': categories,
'userid': '1',
'mt_keywords':','.join(entry.tags),
'mt_excerpt': '',
'mt_text_more': '',
'mt_allow_comments': entry.allow_comment and 1 or 0,
'mt_allow_pings': entry.allow_trackback and 1 or 0,
'custom_fields':[],
'post_status':entry.post_status,
'sticky':entry.sticky,
'wp_author_display_name': entry.get_author_user().dispname,
'wp_author_id': str(entry.get_author_user().key().id()),
'wp_password': entry.password,
'wp_slug':entry.slug
}
if entry.date:
t=timedelta(seconds=3600*g_blog.timedelta)
struct['dateCreated'] = format_date(entry.date+t)
struct['date_created_gmt'] = format_date(entry.date)
return struct
def page_struct(entry):
if not entry:
raise Fault(404, "Post does not exist")
categories=[]
if entry.categorie_keys:
categories =[cate.name for cate in entry.categories]
struct = {
'page_id': entry.key().id(),
'title': entry.title,
'link': entry.fullurl,
'permaLink': entry.fullurl,
'description': unicode(entry.content),
'categories': categories,
'userid': '1',
'mt_allow_comments': entry.allow_comment and 1 or 0,
'mt_allow_pings': entry.allow_trackback and 1 or 0,
'custom_fields':[],
'page_status':entry.post_status,
'sticky':entry.sticky,
'wp_author_display_name': entry.get_author_user().dispname,
'wp_author_id': str(entry.get_author_user().key().id()),
'wp_password': entry.password,
'wp_slug':entry.slug,
'text_more': '',
'wp_author': 'admin',
'wp_page_order': entry.menu_order,
'wp_page_parent_id': 0,
'wp_page_parent_title': '',
'wp_page_template': 'default',
}
if entry.date:
struct['dateCreated'] = format_date(entry.date)
struct['date_created_gmt'] = format_date(entry.date)
return struct
def entry_title_struct(entry):
if not entry:
raise Fault(404, "Post does not exist")
struct = {
'postid': str(entry.key().id()),
'title': entry.title,
'userid': '1',
}
if entry.date:
struct['dateCreated'] = format_date(entry.date)
return struct
class Logger(db.Model):
request = db.TextProperty()
response = db.TextProperty()
date = db.DateTimeProperty(auto_now_add=True)
#-------------------------------------------------------------------------------
# blogger
#-------------------------------------------------------------------------------
@checkauth()
def blogger_getUsersBlogs(discard):
return [{'url' : g_blog.baseurl, 'blogid' : '1','isAdmin':True, 'blogName' : g_blog.title,'xmlrpc':g_blog.baseurl+"/rpc"}]
@checkauth(pos=2)
def blogger_deletePost(appkey, postid, publish=False):
post=Entry.get_by_id(int(postid))
post.delete()
return True
@checkauth()
def blogger_getUserInfo(appkey):
for user in User.all():
if user.isadmin:
return {'email':user.email,'firstname':'','nickname':user.dispname,'userid':str(user.key().id()),
'url':'','lastname':''}
return None
#-------------------------------------------------------------------------------
# metaWeblog
#-------------------------------------------------------------------------------
@checkauth()
def metaWeblog_newPost(blogid, struct, publish):
if struct.has_key('categories'):
cates = struct['categories']
else:
cates = []
newcates=[]
for cate in cates:
c=Category.all().filter('name =',cate)
if c:
newcates.append(c[0].key())
entry=Entry(title = struct['title'],
content = struct['description'],
categorie_keys=newcates
)
if struct.has_key('mt_text_more'):
content=struct['mt_text_more']
if content:
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
if struct.has_key('mt_keywords'):
entry.settags(struct['mt_keywords'])
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('mt_excerpt'):
entry.excerpt=struct['mt_excerpt']
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('sticky'):
entry.sticky=struct['sticky']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
if publish:
entry.save(True)
if struct.has_key('mt_tb_ping_urls'):
for url in struct['mt_tb_ping_urls']:
util.do_trackback(url,entry.title,entry.get_content_excerpt(more='')[:60],entry.fullurl,g_blog.title)
g_blog.tigger_action("xmlrpc_publish_post",entry)
else:
entry.save()
postid =entry.key().id()
return str(postid)
@checkauth()
def metaWeblog_newMediaObject(blogid,struct):
name=struct['name']
if struct.has_key('type'):
mtype=struct['type']
else:
st=name.split('.')
if len(st)>1:
mtype=st[-1]
else:
mtype=None
bits=db.Blob(str(struct['bits']))
media=Media(name=name,mtype=mtype,bits=bits)
media.put()
return {'url':g_blog.baseurl+'/media/'+str(media.key())}
@checkauth()
def metaWeblog_editPost(postid, struct, publish):
if struct.has_key('categories'):
cates = struct['categories']
else:
cates = []
newcates=[]
for cate in cates:
c=Category.all().filter('name =',cate).fetch(1)
if c:
newcates.append(c[0].key())
entry=Entry.get_by_id(int(postid))
if struct.has_key('mt_keywords'):
entry.settags(struct['mt_keywords'])
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('mt_excerpt'):
entry.excerpt=struct['mt_excerpt']
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('sticky'):
entry.sticky=struct['sticky']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
entry.title = struct['title']
entry.content = struct['description']
if struct.has_key('mt_text_more'):
content=struct['mt_text_more']
if content:
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
entry.categorie_keys=newcates
if publish:
entry.save(True)
else:
entry.save()
return True
@checkauth()
def metaWeblog_getCategories(blogid):
categories =Category.all()
cates=[]
for cate in categories:
cates.append({ 'categoryDescription':'',
'categoryId' : str(cate.ID()),
'parentId':'0',
'description':cate.name,
'categoryName':cate.name,
'htmlUrl':'',
'rssUrl':''
})
return cates
@checkauth()
def metaWeblog_getPost(postid):
entry = Entry.get_by_id(int(postid))
return post_struct(entry)
@checkauth()
def metaWeblog_getRecentPosts(blogid, num):
entries = Entry.all().filter('entrytype =','post').order('-date').fetch(min(num, 20))
return [post_struct(entry) for entry in entries]
#-------------------------------------------------------------------------------
# WordPress API
#-------------------------------------------------------------------------------
@checkauth(pos=0)
def wp_getUsersBlogs():
#return [{'url' : g_blog.baseurl, 'blog_id' : 1,'is_admin':True, 'blog_name' : g_blog.title,'xmlrpc_url':g_blog.baseurl+"/xmlrpc.php"}]
return [{'url' : g_blog.baseurl, 'blogid' : '1','isAdmin':True, 'blogName' : g_blog.title,'xmlrpc':g_blog.baseurl+"/rpc"}]
@checkauth()
def wp_getTags(blog_id):
def func(blog_id):
for tag in Tag.all():
yield {'tag_ID':'0','name':tag.tag,'count':str(tag.tagcount),'slug':tag.tag,'html_url':'','rss_url':''}
return list(func(blog_id))
@checkauth()
def wp_getCommentCount(blog_id,postid):
entry = Entry.get_by_id(postid)
if entry:
return {'approved':entry.commentcount,'awaiting_moderation':0,'spam':0,'total_comments':entry.commentcount}
@checkauth()
def wp_getPostStatusList(blogid):
return {'draft': 'Draft',
'pending': 'Pending Review',
'private': 'Private',
'publish': 'Published'}
@checkauth()
def wp_getPageStatusList(blogid):
return {'draft': 'Draft', 'private': 'Private', 'publish': 'Published'}
@checkauth()
def wp_getPageTemplates(blogid):
return {}
@checkauth()
def wp_setOptions(blogid,options):
for name,value in options,options.values():
if hasattr(g_blog,name):
setattr(g_blog,name,value)
return options
@checkauth()
def wp_getOptions(blogid,options):
#todo:Options is None ,return all attrbutes
mdict={}
if options:
for option in options:
if hasattr(g_blog,option):
mdict[option]={'desc':option,
'readonly:':False,
'value':getattr(g_blog,option)}
return mdict
@checkauth()
def wp_newCategory(blogid,struct):
name=struct['name']
category=Category.all().filter('name =',name).fetch(1)
if category and len(category):
return category[0].ID()
else:
#category=Category(key_name=urlencode(name), name=name,slug=urlencode(name))
category=Category(name=name,slug=name)
category.put()
return category.ID()
@checkauth()
def wp_newPage(blogid,struct,publish):
entry=Entry(title = struct['title'],
content = struct['description'],
)
if struct.has_key('mt_text_more'):
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('wp_page_order'):
entry.menu_order=int(struct['wp_page_order'])
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
entry.entrytype='page'
if publish:
entry.save(True)
else:
entry.save()
postid =entry.key().id()
return str(postid)
@checkauth(2)
def wp_getPage(blogid,pageid):
entry = Entry.get_by_id(int(pageid))
return page_struct(entry)
@checkauth()
def wp_getPages(blogid,num=20):
entries = Entry.all().filter('entrytype =','page').order('-date').fetch(min(num, 20))
return [page_struct(entry) for entry in entries]
@checkauth(2)
def wp_editPage(blogid,pageid,struct,publish):
entry=Entry.get_by_id(int(pageid))
## if struct.has_key('mt_keywords'):
## entry.tags=struct['mt_keywords'].split(',')
if struct.has_key('wp_slug'):
entry.slug=struct['wp_slug']
if struct.has_key('wp_page_order'):
entry.menu_order=int(struct['wp_page_order'])
try:
if struct.has_key('date_created_gmt'): #如果有日期属性
entry.date=datetime.strptime(str(struct['date_created_gmt']), "%Y%m%dT%H:%M:%S")
elif struct.has_key('dateCreated'): #如果有日期属性
entry.date=datetime.strptime(str(struct['dateCreated']), "%Y%m%dT%H:%M:%S")-timedelta(seconds=3600*g_blog.timedelta)
except:
pass
if struct.has_key('wp_password'):
entry.password=struct['wp_password']
if struct.has_key('wp_author_id'):
author=User.get_by_id(int(struct['wp_author_id']))
entry.author=author.user
entry.author_name=author.dispname
else:
entry.author=g_blog.owner
entry.author_name=g_blog.author
entry.title = struct['title']
entry.content = struct['description']
if struct.has_key('mt_text_more'):
entry.content=entry.content+"<!--more-->"+struct['mt_text_more']
entry.save(True)
return True
@checkauth()
def wp_deletePage(blogid,pageid):
post=Entry.get_by_id(int(pageid))
post.delete()
return True
@checkauth()
def wp_getAuthors(blogid):
ulist=[]
i=1
for user in User.all():
ulist.append({'user_id':str(user.key().id()),'user_login':'admin','display_name':user.dispname})
i=i+1
return ulist
@checkauth()
def wp_deleteComment(blogid,commentid):
try:
comment=Comment.get_by_id(int(commentid))
if comment:
comment.delit()
return True
except:
return False
@checkauth()
def wp_editComment(blogid,commentid,struct):
try:
comment=Comment.get_by_id(int(commentid))
if comment:
url=struct['author_url']
if url:
try:
comment.weburl=url
except:
comment.weburl=None
#comment.date= format_date(datetime.now())
comment.author=struct['author']
#comment.weburl=struct['author_url']
comment.email=struct['author_email']
comment.content=struct['content']
#comment.status=struct['status']
comment.save()
return True
except:
raise
return False
@checkauth()
def wp_newComment(blogid,postid,struct):
post=Entry.get_by_id(postid)
if not post:
raise Fault(404, "Post does not exist")
comment=Comment(entry=post,content=struct['content'],
author=struct['author'],
email=struct['author_email'])
url=struct['author_url']
if url:
try:
comment.weburl=url
except:
comment.weburl=None
comment.save()
return comment.key().id()
@checkauth()
def wp_getCommentStatusList(blogid):
return {'hold':0,'approve':Comment.all().count(),'spam':0}
@checkauth()
def wp_getPageList(blogid,num=20):
def func(blogid):
entries = Entry.all().filter('entrytype =','page').order('-date').fetch(min(num, 20))
for entry in entries:
yield {'page_id':str(entry.key().id()),'page_title':entry.title,'page_parent_id':0,'dateCreated': format_date(entry.date),'date_created_gmt': format_date(entry.date)}
return list(func(blogid))
@checkauth()
def wp_deleteCategory(blogid,cateid):
try:
cate=Category.get_from_id(int(cateid))
cate.delete()
return True
except:
return False
@checkauth()
def wp_suggestCategories(blogid,category,max_result):
categories=Category.all()
cates=[]
for cate in categories:
cates.append({ 'categoryId' : str(cate.ID()),
'categoryName':cate.name
})
return cates[:max_result]
@checkauth()
def wp_getComment(blogid,commentid):
comment=Comment.get_by_id(int(commentid))
return {
'dateCreated':format_date(comment.date),
'date_created_gmt':format_date(comment.date),
'user_id':'0',
'comment_id':str(comment.key().id()),
'parent':'',
'status':'approve',
'content':unicode(comment.content),
'link':comment.entry.link+"#comment-"+str(comment.key().id()),
'post_id':str(comment.entry.key().id()),
'post_title':comment.entry.title,
'author':comment.author,
'author_url':str(comment.weburl),
'author_email':str(comment.email),
'author_ip':comment.ip,
'type':''
}
@checkauth()
def wp_getComments(blogid,data):
def func(blogid,data):
number=int(data['number'])
try:
offset=int(data['offset'])
except:
offset=0
comments=[]
if data['post_id']:
postid=int(data['post_id'])
post=Entry.get_by_id(postid)
if post:
comments=post.comments()
else:
comments=Comment.all()
for comment in comments.fetch(number,offset):
yield {
'dateCreated':format_date(comment.date),
'date_created_gmt':format_date(comment.date),
'user_id':'0',
'comment_id':str(comment.key().id()),
'parent':'',
'status':'approve',
'content':unicode(comment.content),
'link':comment.entry.link+"#comment-"+str(comment.key().id()),
'post_id':str(comment.entry.key().id()),
'post_title':comment.entry.title,
'author':comment.author,
'author_url':str(comment.weburl),
'author_email':str(comment.email),
'author_ip':comment.ip,
'type':''
}
return list(func(blogid,data))
@checkauth()
def mt_getPostCategories(postid):
post=Entry.get_by_id(int(postid))
categories=post.categories
cates=[]
for cate in categories:
#cate=Category(key)
cates.append({'categoryId' : str(cate.ID()),
'categoryName':cate.name,
'isPrimary':True
})
return cates
@checkauth()
def mt_getCategoryList(blogid):
categories=Category.all()
cates=[]
for cate in categories:
cates.append({ 'categoryId' : str(cate.ID()),
'categoryName':cate.name
})
return cates
@checkauth()
def mt_setPostCategories(postid,cates):
try:
entry=Entry.get_by_id(int(postid))
newcates=[]
for cate in cates:
if cate.has_key('categoryId'):
id=int(cate['categoryId'])
c=Category.get_from_id(int(cate['categoryId']))
if c:
newcates.append(c.key())
entry.categorie_keys=newcates
entry.put()
return True
except:
return False
@checkauth()
def mt_publishPost(postid):
try:
entry=Entry.get_by_id(int(postid))
entry.save(True)
return entry.key().id()
except:
return 0
@checkauth()
def mt_getRecentPostTitles(blogid,num):
entries = Entry.all().filter('entrytype =','post').order('-date').fetch(min(num, 20))
return [entry_title_struct(entry) for entry in entries]
#------------------------------------------------------------------------------
#pingback
#------------------------------------------------------------------------------
_title_re = re.compile(r'<title>(.*?)</title>(?i)')
_pingback_re = re.compile(r'<link rel="pingback" href="([^"]+)" ?/?>(?i)')
_chunk_re = re.compile(r'\n\n|<(?:p|div|h\d)[^>]*>')
def pingback_ping(source_uri, target_uri):
# next we check if the source URL does indeed exist
if not g_blog.allow_pingback:
raise Fault(49,"Access denied.")
try:
g_blog.tigger_action("pre_ping",source_uri,target_uri)
response = urlfetch.fetch(source_uri)
except Exception ,e :
#logging.info(e.message)
raise Fault(16, 'The source URL does not exist.%s'%source_uri)
# we only accept pingbacks for links below our blog URL
blog_url = g_blog.baseurl
if not blog_url.endswith('/'):
blog_url += '/'
if not target_uri.startswith(blog_url):
raise Fault(32, 'The specified target URL does not exist.')
path_info = target_uri[len(blog_url):]
pingback_post(response,source_uri,target_uri,path_info)
try:
return "Micolog pingback succeed!"
except:
raise Fault(49,"Access denied.")
def get_excerpt(response, url_hint, body_limit=1024 * 512):
"""Get an excerpt from the given `response`. `url_hint` is the URL
which will be used as anchor for the excerpt. The return value is a
tuple in the form ``(title, body)``. If one of the two items could
not be calculated it will be `None`.
"""
contents = response.content[:body_limit]
title_match = _title_re.search(contents)
title = title_match and strip_tags(title_match.group(1)) or None
link_re = re.compile(r'<a[^>]+?"\s*%s\s*"[^>]*>(.*?)</a>(?is)' %
re.escape(url_hint))
for chunk in _chunk_re.split(contents):
match = link_re.search(chunk)
if not match:
continue
before = chunk[:match.start()]
after = chunk[match.end():]
raw_body = '%s\0%s' % (strip_tags(before).replace('\0', ''),
strip_tags(after).replace('\0', ''))
body_match = re.compile(r'(?:^|\b)(.{0,120})\0(.{0,120})(?:\b|$)') \
.search(raw_body)
if body_match:
break
else:
return title, None
before, after = body_match.groups()
link_text = strip_tags(match.group(1))
if len(link_text) > 60:
link_text = link_text[:60] + u' …'
bits = before.split()
bits.append(link_text)
bits.extend(after.split())
return title, u'[…] %s […]' % u' '.join(bits)
def pingback_post(response,source_uri, target_uri, slug):
"""This is the pingback handler for posts."""
entry = Entry.all().filter("published =", True).filter('link =', slug).get()
#use allow_trackback as allow_pingback
if entry is None or not entry.allow_trackback:
raise Fault(33, 'no such post')
title, excerpt = get_excerpt(response, target_uri)
if not title:
raise Fault(17, 'no title provided')
elif not excerpt:
raise Fault(17, 'no useable link to target')
comment = Comment.all().filter("entry =", entry).filter("weburl =", source_uri).get()
if comment:
raise Fault(48, 'pingback has already been registered')
return
comment=Comment(author=urlparse(source_uri).hostname,
content="<strong>"+title[:250]+"...</strong><br/>" +
excerpt[:250] + '...',
weburl=source_uri,
entry=entry)
comment.ctype=COMMENT_PINGBACK
try:
comment.save()
g_blog.tigger_action("pingback_post",comment)
memcache.delete("/"+entry.link)
return True
except:
raise Fault(49,"Access denied.")
return
##------------------------------------------------------------------------------
class PlogXMLRPCDispatcher(SimpleXMLRPCDispatcher):
def __init__(self, funcs):
SimpleXMLRPCDispatcher.__init__(self, True, 'utf-8')
self.funcs = funcs
self.register_introspection_functions()
dispatcher = PlogXMLRPCDispatcher({
'blogger.getUsersBlogs' : blogger_getUsersBlogs,
'blogger.deletePost' : blogger_deletePost,
'blogger.getUserInfo': blogger_getUserInfo,
'metaWeblog.newPost' : metaWeblog_newPost,
'metaWeblog.editPost' : metaWeblog_editPost,
'metaWeblog.getCategories' : metaWeblog_getCategories,
'metaWeblog.getPost' : metaWeblog_getPost,
'metaWeblog.getRecentPosts' : metaWeblog_getRecentPosts,
'metaWeblog.newMediaObject':metaWeblog_newMediaObject,
'wp.getUsersBlogs':wp_getUsersBlogs,
'wp.getTags':wp_getTags,
'wp.getCommentCount':wp_getCommentCount,
'wp.getPostStatusList':wp_getPostStatusList,
'wp.getPageStatusList':wp_getPageStatusList,
'wp.getPageTemplates':wp_getPageTemplates,
'wp.getOptions':wp_getOptions,
'wp.setOptions':wp_setOptions,
'wp.getCategories':metaWeblog_getCategories,
'wp.newCategory':wp_newCategory,
'wp.newPage':wp_newPage,
'wp.getPage':wp_getPage,
'wp.getPages':wp_getPages,
'wp.editPage':wp_editPage,
'wp.getPageList':wp_getPageList,
'wp.deletePage':wp_deletePage,
'wp.getAuthors':wp_getAuthors,
'wp.deleteComment':wp_deleteComment,
'wp.editComment':wp_editComment,
'wp.newComment':wp_newComment,
'wp.getCommentStatusList':wp_getCommentStatusList,
'wp.deleteCategory':wp_deleteCategory,
'wp.suggestCategories':wp_suggestCategories,
'wp.getComment':wp_getComment,
'wp.getComments':wp_getComments,
'wp.uploadFile':metaWeblog_newMediaObject,
'mt.setPostCategories':mt_setPostCategories,
'mt.getPostCategories':mt_getPostCategories,
'mt.getCategoryList':mt_getCategoryList,
'mt.publishPost':mt_publishPost,
'mt.getRecentPostTitles':mt_getRecentPostTitles,
##pingback
'pingback.ping':pingback_ping,
})
# {{{ Handlers
class CallApi(BaseRequestHandler):
def get(self):
Logger(request = self.request.uri, response = '----------------------------------').put()
self.write('<h1>please use POST</h1>')
def post(self):
#self.response.headers['Content-Type'] = 'application/xml; charset=utf-8'
request = self.request.body
response = dispatcher._marshaled_dispatch(request)
Logger(request = unicode(request, 'utf-8'), response = unicode(response, 'utf-8')).put()
self.write(response)
class View(BaseRequestHandler):
@requires_admin
def get(self):
self.write('<html><body><h1>Logger</h1>')
for log in Logger.all().order('-date').fetch(5,0):
self.write("<p>date: %s</p>" % log.date)
self.write("<h1>Request</h1>")
self.write('<pre>%s</pre>' % cgi.escape(log.request))
self.write("<h1>Reponse</h1>")
self.write('<pre>%s</pre>' % cgi.escape(log.response))
self.write("<hr />")
self.write('</body></html>')
class DeleteLog(BaseRequestHandler):
def get(self):
if self.chk_admin():
for log in Logger.all():
log.delete()
self.redirect('/rpc/view')
#}}}
def main():
#webapp.template.register_template_library("filter")
application = webapp.WSGIApplication(
[
('/rpc', CallApi),
('/xmlrpc\.php',CallApi),
('/rpc/view', View),
('/rpc/dellog', DeleteLog),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
# -*- coding: utf-8 -*-
import cgi, os,sys,math
import wsgiref.handlers
import google.appengine.api
# Google App Engine imports.
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template, \
WSGIApplication
from google.appengine.api import users
##import app.webapp as webapp2
from google.appengine.ext import db
# Force Django to reload its settings.
from datetime import datetime ,timedelta
import base64,random
from django.utils import simplejson
import filter as myfilter
from django.template.loader import *
##settings.configure(LANGUAGE_CODE = 'zh-cn')
# Must set this env var before importing any part of Django
from app.safecode import Image
from app.gmemsess import Session
from base import *
from model import *
##os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
##from django.utils.translation import activate
##from django.conf import settings
##settings._target = None
##activate(g_blog.language)
from google.appengine.ext import zipserve
def doRequestHandle(old_handler,new_handler,**args):
new_handler.initialize(old_handler.request,old_handler.response)
return new_handler.get(**args)
def doRequestPostHandle(old_handler,new_handler,**args):
new_handler.initialize(old_handler.request,old_handler.response)
return new_handler.post(**args)
class BasePublicPage(BaseRequestHandler):
def initialize(self, request, response):
BaseRequestHandler.initialize(self,request, response)
m_pages=Entry.all().filter('entrytype =','page')\
.filter('published =',True)\
.filter('entry_parent =',0)\
.order('menu_order')
blogroll=Link.all().filter('linktype =','blogroll')
archives=Archive.all().order('-year').order('-month').fetch(12)
alltags=Tag.all()
self.template_vals.update({
'menu_pages':m_pages,
'categories':Category.all(),
'blogroll':blogroll,
'archives':archives,
'tags':Tag.all(),
'alltags':alltags,
'recent_comments':Comment.all().order('-date').fetch(5)
})
def m_list_pages(self):
menu_pages=None
entry=None
if self.template_vals.has_key('menu_pages'):
menu_pages= self.template_vals['menu_pages']
if self.template_vals.has_key('entry'):
entry=self.template_vals['entry']
ret=''
current=''
for page in menu_pages:
if entry and entry.entrytype=='page' and entry.key()==page.key():
current= 'current_page_item'
else:
current= 'page_item'
#page is external page ,and page.slug is none.
if page.is_external_page and not page.slug:
ret+='<li class="%s"><a href="%s" target="%s" >%s</a></li>'%( current,page.link,page.target, page.title)
else:
ret+='<li class="%s"><a href="/%s" target="%s">%s</a></li>'%( current,page.link, page.target,page.title)
return ret
def sticky_entrys(self):
return Entry.all().filter('entrytype =','post')\
.filter('published =',True)\
.filter('sticky =',True)\
.order('-date')
class MainPage(BasePublicPage):
def get(self,page=1):
postid=self.param('p')
if postid:
try:
postid=int(postid)
return doRequestHandle(self,SinglePost(),postid=postid) #singlepost.get(postid=postid)
except:
return self.error(404)
self.doget(page)
def post(self):
postid=self.param('p')
if postid:
try:
postid=int(postid)
return doRequestPostHandle(self,SinglePost(),postid=postid) #singlepost.get(postid=postid)
except:
return self.error(404)
@cache()
def doget(self,page):
page=int(page)
entrycount=g_blog.postscount()
max_page = entrycount / g_blog.posts_per_page + ( entrycount % g_blog.posts_per_page and 1 or 0 )
if page < 1 or page > max_page:
return self.error(404)
entries = Entry.all().filter('entrytype =','post').\
filter("published =", True).order('-date').\
fetch(self.blog.posts_per_page, offset = (page-1) * self.blog.posts_per_page)
show_prev =entries and (not (page == 1))
show_next =entries and (not (page == max_page))
#print page,max_page,g_blog.entrycount,self.blog.posts_per_page
return self.render('index',{'entries':entries,
'show_prev' : show_prev,
'show_next' : show_next,
'pageindex':page,
'ishome':True,
'pagecount':max_page,
'postscount':entrycount})
class entriesByCategory(BasePublicPage):
@cache()
def get(self,slug=None):
if not slug:
self.error(404)
return
try:
page_index=int(self.param('page'))
except:
page_index=1
slug=urldecode(slug)
cats=Category.all().filter('slug =',slug).fetch(1)
if cats:
entries=Entry.all().filter("published =", True).filter('categorie_keys =',cats[0].key()).order("-date")
entries,links=Pager(query=entries,items_per_page=20).fetch(page_index)
self.render('category',{'entries':entries,'category':cats[0],'pager':links})
else:
self.error(414,slug)
class archive_by_month(BasePublicPage):
@cache()
def get(self,year,month):
try:
page_index=int (self.param('page'))
except:
page_index=1
firstday=datetime(int(year),int(month),1)
if int(month)!=12:
lastday=datetime(int(year),int(month)+1,1)
else:
lastday=datetime(int(year)+1,1,1)
entries=db.GqlQuery("SELECT * FROM Entry WHERE date > :1 AND date <:2 AND entrytype =:3 AND published = True ORDER BY date DESC",firstday,lastday,'post')
entries,links=Pager(query=entries).fetch(page_index)
self.render('month',{'entries':entries,'year':year,'month':month,'pager':links})
class entriesByTag(BasePublicPage):
@cache()
def get(self,slug=None):
if not slug:
self.error(404)
return
try:
page_index=int (self.param('page'))
except:
page_index=1
import urllib
slug=urldecode(slug)
entries=Entry.all().filter("published =", True).filter('tags =',slug).order("-date")
entries,links=Pager(query=entries,items_per_page=20).fetch(page_index)
self.render('tag',{'entries':entries,'tag':slug,'pager':links})
class SinglePost(BasePublicPage):
@cache()
def get(self,slug=None,postid=None):
if postid:
entries = Entry.all().filter("published =", True).filter('post_id =', postid).fetch(1)
else:
slug=urldecode(slug)
entries = Entry.all().filter("published =", True).filter('link =', slug).fetch(1)
if not entries or len(entries) == 0:
return self.error(404)
mp=self.paramint("mp",1)
entry=entries[0]
if entry.is_external_page:
return self.redirect(entry.external_page_address,True)
if g_blog.allow_pingback and entry.allow_trackback:
self.response.headers['X-Pingback']="%s/rpc"%str(g_blog.baseurl)
entry.readtimes += 1
entry.put()
self.entry=entry
comments=entry.get_comments_by_page(mp,self.blog.comments_per_page)
## commentuser=self.request.cookies.get('comment_user', '')
## if commentuser:
## commentuser=commentuser.split('#@#')
## else:
commentuser=['','','']
comments_nav=self.get_comments_nav(mp,entry.comments().count())
if entry.entrytype=='post':
self.render('single',
{
'entry':entry,
'relateposts':entry.relateposts,
'comments':comments,
'user_name':commentuser[0],
'user_email':commentuser[1],
'user_url':commentuser[2],
'checknum1':random.randint(1,10),
'checknum2':random.randint(1,10),
'comments_nav':comments_nav,
})
else:
self.render('page',
{'entry':entry,
'relateposts':entry.relateposts,
'comments':comments,
'user_name':commentuser[0],
'user_email':commentuser[1],
'user_url':commentuser[2],
'checknum1':random.randint(1,10),
'checknum2':random.randint(1,10),
'comments_nav':comments_nav,
})
def post(self,slug=None,postid=None):
'''handle trackback'''
error = '''
<?xml version="1.0" encoding="utf-8"?>
<response>
<error>1</error>
<message>%s</message>
</response>
'''
success = '''
<?xml version="1.0" encoding="utf-8"?>
<response>
<error>0</error>
</response>
'''
if not g_blog.allow_trackback:
self.response.out.write(self.error % "Trackback denied.")
return
self.response.headers['Content-Type'] = "text/xml"
if postid:
entries = Entry.all().filter("published =", True).filter('post_id =', postid).fetch(1)
else:
slug=urldecode(slug)
entries = Entry.all().filter("published =", True).filter('link =', slug).fetch(1)
if not entries or len(entries) == 0 :#or (postid and not entries[0].link.endswith(g_blog.default_link_format%{'post_id':postid})):
self.response.out.write(error % "empty slug/postid")
return
#check code ,rejest spam
entry=entries[0]
#key=self.param("code")
#if (self.request.uri!=entry.trackbackurl) or entry.is_external_page or not entry.allow_trackback:
import cgi
param=cgi.parse_qs(self.request.uri)
if param.has_key('code'):
code=param['code'][0]
if (not str(entry.key())==code) or entry.is_external_page or not entry.allow_trackback:
self.response.out.write(error % "Invalid trackback url.")
return
coming_url = self.param('url')
blog_name = myfilter.do_filter(self.param('blog_name'))
excerpt = myfilter.do_filter(self.param('excerpt'))
title = myfilter.do_filter(self.param('title'))
if not coming_url or not blog_name or not excerpt or not title:
self.response.out.write(error % "not enough post info")
return
import time
#wait for half second in case otherside hasn't been published
time.sleep(0.5)
## #also checking the coming url is valid and contains our link
## #this is not standard trackback behavior
## try:
##
## result = urlfetch.fetch(coming_url)
## if result.status_code != 200 :
## #or ((g_blog.baseurl + '/' + slug) not in result.content.decode('ascii','ignore')):
## self.response.out.write(error % "probably spam")
## return
## except Exception, e:
## logging.info("urlfetch error")
## self.response.out.write(error % "urlfetch error")
## return
comment = Comment.all().filter("entry =", entry).filter("weburl =", coming_url).get()
if comment:
self.response.out.write(error % "has pinged before")
return
comment=Comment(author=blog_name,
content="<strong>"+title[:250]+"...</strong><br/>" +
excerpt[:250] + '...',
weburl=coming_url,
entry=entry)
comment.ip=self.request.remote_addr
comment.ctype=COMMENT_TRACKBACK
try:
comment.save()
memcache.delete("/"+entry.link)
self.write(success)
g_blog.tigger_action("pingback_post",comment)
except:
self.response.out.write(error % "unknow error")
def get_comments_nav(self,pindex,count):
maxpage=count / g_blog.comments_per_page + ( count % g_blog.comments_per_page and 1 or 0 )
if maxpage==1:
return ""
result=""
if pindex>1:
result="<a class='comment_prev' href='"+self.get_comments_pagenum_link(pindex-1)+"'>«</a>"
minr=max(pindex-3,1)
maxr=min(pindex+3,maxpage)
if minr>2:
result+="<a class='comment_num' href='"+self.get_comments_pagenum_link(1)+"'>1</a>"
result+="<span class='comment_dot' >...</span>"
for n in range(minr,maxr+1):
if n==pindex:
result+="<span class='comment_current'>"+str(n)+"</span>"
else:
result+="<a class='comment_num' href='"+self.get_comments_pagenum_link(n)+"'>"+str(n)+"</a>"
if maxr<maxpage-1:
result+="<span class='comment_dot' >...</span>"
result+="<a class='comment_num' href='"+self.get_comments_pagenum_link(maxpage)+"'>"+str(maxpage)+"</a>"
if pindex<maxpage:
result+="<a class='comment_next' href='"+self.get_comments_pagenum_link(pindex+1)+"'>»</a>"
return {'nav':result,'current':pindex,'maxpage':maxpage}
def get_comments_pagenum_link(self,pindex):
url=str(self.entry.link)
if url.find('?')>=0:
return "/"+url+"&mp="+str(pindex)+"#comments"
else:
return "/"+url+"?mp="+str(pindex)+"#comments"
class FeedHandler(BaseRequestHandler):
@cache(time=600)
def get(self,tags=None):
entries = Entry.all().filter('entrytype =','post').filter('published =',True).order('-date').fetch(10)
if entries and entries[0]:
last_updated = entries[0].date
last_updated = last_updated.strftime("%Y-%m-%dT%H:%M:%SZ")
for e in entries:
e.formatted_date = e.date.strftime("%Y-%m-%dT%H:%M:%SZ")
self.response.headers['Content-Type'] = 'application/atom+xml'
self.render2('views/atom.xml',{'entries':entries,'last_updated':last_updated})
class CommentsFeedHandler(BaseRequestHandler):
@cache(time=600)
def get(self,tags=None):
comments = Comment.all().order('-date').fetch(10)
if comments and comments[0]:
last_updated = comments[0].date
last_updated = last_updated.strftime("%Y-%m-%dT%H:%M:%SZ")
for e in comments:
e.formatted_date = e.date.strftime("%Y-%m-%dT%H:%M:%SZ")
self.response.headers['Content-Type'] = 'application/atom+xml'
self.render2('views/comments.xml',{'comments':comments,'last_updated':last_updated})
class SitemapHandler(BaseRequestHandler):
@cache(time=36000)
def get(self,tags=None):
urls = []
def addurl(loc,lastmod=None,changefreq=None,priority=None):
url_info = {
'location': loc,
'lastmod': lastmod,
'changefreq': changefreq,
'priority': priority
}
urls.append(url_info)
addurl(g_blog.baseurl,changefreq='daily',priority=0.9 )
entries = Entry.all().filter('published =',True).order('-date').fetch(g_blog.sitemap_entries)
for item in entries:
loc = "%s/%s" % (g_blog.baseurl, item.link)
addurl(loc,item.mod_date or item.date,'never',0.6)
if g_blog.sitemap_include_category:
cats=Category.all()
for cat in cats:
loc="%s/category/%s"%(g_blog.baseurl,cat.slug)
addurl(loc,None,'weekly',0.5)
if g_blog.sitemap_include_tag:
tags=Tag.all()
for tag in tags:
loc="%s/tag/%s"%(g_blog.baseurl, urlencode(tag.tag))
addurl(loc,None,'weekly',0.5)
## self.response.headers['Content-Type'] = 'application/atom+xml'
self.render2('views/sitemap.xml',{'urlset':urls})
class Error404(BaseRequestHandler):
@cache(time=36000)
def get(self,slug=None):
self.error(404)
class Post_comment(BaseRequestHandler):
#@printinfo
def post(self,slug=None):
useajax=self.param('useajax')=='1'
name=self.param('author')
email=self.param('email')
url=self.param('url')
key=self.param('key')
content=self.param('comment')
parent_id=self.paramint('parentid',0)
reply_notify_mail=self.parambool('reply_notify_mail')
sess=Session(self,timeout=180)
if not self.is_login:
#if not (self.request.cookies.get('comment_user', '')):
try:
check_ret=True
if g_blog.comment_check_type in (1,2) :
checkret=self.param('checkret')
check_ret=(int(checkret) == sess['code'])
elif g_blog.comment_check_type ==3:
import app.gbtools as gb
checknum=self.param('checknum')
checkret=self.param('checkret')
check_ret=eval(checknum)==int(gb.stringQ2B( checkret))
if not check_ret:
if useajax:
self.write(simplejson.dumps((False,-102,_('Your check code is invalid .'))))
else:
self.error(-102,_('Your check code is invalid .'))
return
except:
if useajax:
self.write(simplejson.dumps((False,-102,_('Your check code is invalid .'))))
else:
self.error(-102,_('Your check code is invalid .'))
return
sess.invalidate()
content=content.replace('\n','<br>')
content=myfilter.do_filter(content)
name=cgi.escape(name)[:20]
url=cgi.escape(url)[:100]
if not (name and email and content):
if useajax:
self.write(simplejson.dumps((False,-101,_('Please input name, email and comment .'))))
else:
self.error(-101,_('Please input name, email and comment .'))
else:
comment=Comment(author=name,
content=content,
email=email,
reply_notify_mail=reply_notify_mail,
entry=Entry.get(key))
if url:
try:
comment.weburl=url
except:
comment.weburl=None
#name=name.decode('utf8').encode('gb2312')
info_str='#@#'.join([urlencode(name),urlencode(email),urlencode(url)])
#info_str='#@#'.join([name,email,url.encode('utf8')])
cookiestr='comment_user=%s;expires=%s;domain=%s;path=/'%( info_str,
(datetime.now()+timedelta(days=100)).strftime("%a, %d-%b-%Y %H:%M:%S GMT"),
''
)
comment.ip=self.request.remote_addr
if parent_id:
comment.parent=Comment.get_by_id(parent_id)
try:
comment.save()
memcache.delete("/"+comment.entry.link)
self.response.headers.add_header( 'Set-Cookie', cookiestr)
if useajax:
comment_c=self.get_render('comment',{'comment':comment})
self.write(simplejson.dumps((True,comment_c.decode('utf8'))))
else:
self.redirect(self.referer+"#comment-"+str(comment.key().id()))
comment.entry.removecache()
memcache.delete("/feed/comments")
except:
if useajax:
self.write(simplejson.dumps((False,-102,_('Comment not allowed.'))))
else:
self.error(-102,_('Comment not allowed .'))
class ChangeTheme(BaseRequestHandler):
@requires_admin
def get(self,slug=None):
theme=self.param('t')
g_blog.theme_name=theme
g_blog.get_theme()
self.redirect('/')
class do_action(BaseRequestHandler):
def get(self,slug=None):
try:
func=getattr(self,'action_'+slug)
if func and callable(func):
func()
else:
self.error(404)
except BaseException,e:
self.error(404)
def post(self,slug=None):
try:
func=getattr(self,'action_'+slug)
if func and callable(func):
func()
else:
self.error(404)
except:
self.error(404)
@ajaxonly
def action_info_login(self):
if self.login_user:
self.write(simplejson.dumps({'islogin':True,
'isadmin':self.is_admin,
'name': self.login_user.nickname()}))
else:
self.write(simplejson.dumps({'islogin':False}))
#@hostonly
@cache()
def action_proxy(self):
result=urlfetch.fetch(self.param("url"), headers=self.request.headers)
if result.status_code == 200:
self.response.headers['Expires'] = 'Thu, 15 Apr 3010 20:00:00 GMT'
self.response.headers['Cache-Control'] = 'max-age=3600,public'
self.response.headers['Content-Type'] = result.headers['Content-Type']
self.response.out.write(result.content)
return
def action_getcomments(self):
key=self.param('key')
entry=Entry.get(key)
comments=Comment.all().filter("entry =",key)
commentuser=self.request.cookies.get('comment_user', '')
if commentuser:
commentuser=commentuser.split('#@#')
else:
commentuser=['','','']
vals={
'entry':entry,
'comments':comments,
'user_name':commentuser[0],
'user_email':commentuser[1],
'user_url':commentuser[2],
'checknum1':random.randint(1,10),
'checknum2':random.randint(1,10),
}
html=self.get_render('comments',vals)
self.write(simplejson.dumps(html.decode('utf8')))
def action_test(self):
self.write(settings.LANGUAGE_CODE)
self.write(_("this is a test"))
class getMedia(webapp.RequestHandler):
def get(self,slug):
media=Media.get(slug)
if media:
self.response.headers['Expires'] = 'Thu, 15 Apr 3010 20:00:00 GMT'
self.response.headers['Cache-Control'] = 'max-age=3600,public'
self.response.headers['Content-Type'] = str(media.mtype)
self.response.out.write(media.bits)
a=self.request.get('a')
if a and a.lower()=='download':
media.download+=1
media.put()
class CheckImg(BaseRequestHandler):
def get(self):
img = Image()
imgdata = img.create()
sess=Session(self,timeout=900)
if not sess.is_new():
sess.invalidate()
sess=Session(self,timeout=900)
sess['code']=img.text
sess.save()
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(imgdata)
class CheckCode(BaseRequestHandler):
def get(self):
sess=Session(self,timeout=900)
num1=random.randint(1,10)
num2=random.randint(1,10)
code="<span style='font-size:13px;color:red'>%d + %d =</span>"%(num1,num2)
sess['code']=num1+num2
sess.save()
#self.response.headers['Content-Type'] = "text/html"
self.response.out.write(code)
class Other(BaseRequestHandler):
def get(self,slug=None):
if not g_blog.tigger_urlmap(slug,page=self):
self.error(404)
def post(self,slug=None):
content=g_blog.tigger_urlmap(slug,page=self)
if content:
self.write(content)
else:
self.error(404)
def getZipHandler(**args):
return ('/xheditor/(.*)',zipserve.make_zip_handler('''D:\\work\\micolog\\plugins\\xheditor\\xheditor.zip'''))
def main():
webapp.template.register_template_library('filter')
webapp.template.register_template_library('app.recurse')
urls= [('/media/([^/]*)/{0,1}.*',getMedia),
('/checkimg/', CheckImg),
('/checkcode/', CheckCode),
('/skin',ChangeTheme),
('/feed', FeedHandler),
('/feed/comments',CommentsFeedHandler),
('/sitemap', SitemapHandler),
('/post_comment',Post_comment),
('/page/(?P<page>\d+)', MainPage),
('/category/(.*)',entriesByCategory),
('/(\d{4})/(\d{2})',archive_by_month),
('/tag/(.*)',entriesByTag),
#('/\?p=(?P<postid>\d+)',SinglePost),
('/', MainPage),
('/do/(\w+)', do_action),
('/e/(.*)',Other),
('/([\\w\\-\\./%]+)', SinglePost),
('.*',Error404),
]
application = webapp.WSGIApplication(urls,debug=False)
g_blog.application=application
g_blog.plugins.register_handlerlist(application)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main() | Python |
# Wrapper for loading templates from zipfile.
import zipfile,logging,os
from django.template import TemplateDoesNotExist
from django.conf import settings
logging.debug("zipload imported")
zipfile_cache={}
_TEMPLATES_='templates'
def get_from_zipfile(zipfilename,name):
logging.debug("get_from_zipfile(%s,%s)"%(zipfilename,name))
zipfile_object = zipfile_cache.get(zipfilename)
if zipfile_object is None:
try:
zipfile_object = zipfile.ZipFile(zipfilename)
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s: %s', zipfilename, err)
zipfile_object = ''
zipfile_cache[zipfilename] = zipfile_object
if zipfile_object == '':
return None
try:
data = zipfile_object.read(name)
return data
except (KeyError, RuntimeError), err:
return None
def get_template_sources(template_dirs=None):
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
if template_dir.endswith(".zip"):
yield template_dir#os.path.join(template_dir, zip_name)
def load_template_source(template_name, template_dirs=None):
tried = []
logging.debug("zip_loader::load_template_source:"+template_name)
## spart= template_name.split('/')
## theme_name=spart[0]
##
## zipfile=theme_name+".zip"
## template_file=os.path.join(theme_name,*spart[1:])
template_file='/'.join((_TEMPLATES_, template_name))
for zipfile in get_template_sources(template_dirs):
try:
return (get_from_zipfile(zipfile,template_file), os.path.join(zipfile,template_file))
except IOError:
tried.append(zipfile)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist, error_msg
load_template_source.is_usable = True
| Python |
from django.template import Library
from django.template import Node, NodeList, Template, Context
from django.template import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END
register = Library()
class RecurseNode( Node ):
def __init__(self, **kwargs):
self.loopvar, self.sequence = kwargs['loopvar'], kwargs['sequence']
self.children_name = kwargs['children_name']
self.nodelist_first, self.nodelist_second = kwargs['nodelist_first'], kwargs['nodelist_second']
del kwargs['nodelist_first'], kwargs['nodelist_second'], kwargs['sequence'], kwargs['children_name'], kwargs['loopvar']
self.kwargs = kwargs
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_first:
yield node
for node in self.nodelist_second:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend( self.nodelist_first.get_nodes_by_type(nodetype) )
nodes.extend( self.nodelist_second.get_nodes_by_type(nodetype) )
return nodes
def render(self, context, depth=0, values=False):
nodelist = NodeList()
if 'recurseloop' in context:
parentloop = context['recurseloop']
else:
parentloop = {}
context.push()
# On the first recursion pass, we have no values
if not values:
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
# Create a recurseloop value in the context. We'll update counters on each iteration just below.
loop_dict = context['recurseloop'] = {'parent': parentloop}
loop_dict['depth'] = depth + 1
loop_dict['depth0'] = depth
for i, item in enumerate(values):
# Add the additional arguments to the context
# They come in the form of {'name':(initial,increment)}
# As for now only numbers are supported, but also strings can be multiplied
for k,v in self.kwargs.iteritems():
context[k] = v[0] + v[1]*depth
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
context[ self.loopvar ] = item
for node in self.nodelist_first:
nodelist.append( node.render(context) )
if len( getattr( item, self.children_name ) ):
nodelist.append( self.render( context, depth+1, getattr( item, self.children_name ) ) )
for node in self.nodelist_second:
nodelist.append( node.render(context) )
context.pop()
return nodelist.render(context)
#@register.tag(name="for")
def do_recurse(parser, token):
"""
Recursively loops over each item in an array .
It also increments passed variables on each recursion depth.
For example, to display a list of comments with replies given ``comment_list``:
{% recurse comment in comments children="replies" indent=(0,20) %}
<div style="margin-left:{{indent}}px">{{ comment.text }}</div>
{% endrecurse %}
``children`` is the name of the iterable that contains the children of the current element
``children`` needs to be a property of comment, and is required for the recurseloop to work
You can pass additional parameters after children in the form of:
var_name=(intial_value, increment)
You need to take care of creating the tree structure on your own.
As for now there should be no spaces between the equal ``=``
signs when assigning children or additional variables
In addition to the variables passed, the recurse loop sets a
number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``recurseloop.depth`` The current depth of the loop (1 is the top level)
``recurseloop.depth0`` The current depth of the loop (0 is the top level)
``recurseloop.counter`` The current iteration of the current level(1-indexed)
``recurseloop.counter0`` The current iteration of the current level(0-indexed)
``recurseloop.first`` True if this is the first time through the current level
``recurseloop.last`` True if this is the last time through the current level
``recurseloop.parent`` This is the loop one level "above" the current one
========================== ================================================
You can also use the tag {% yield %} inside a recursion.
The ``yield`` tag will output the same HTML that's between the recurse and endrecurse tags
if the current element has children. If there are no children ``yield`` will output nothing
You must not, however wrap the ``yield`` tag inside other tags, just like you must not wrap
the ``else`` tag inside other tags when making if-else-endif
"""
# We will be throwing this a lot
def tError( contents ):
raise TemplateSyntaxError(
"'recurse' statements should use the format"
"'{%% recurse x in y children=\"iterable_property_name\" "
"arg1=(float,float) arg2=(\"str\",\"str\") %%}: %s" % contents )
bits = token.contents.split()
quotes = ["'","\""]
lenbits = len(bits)
if lenbits < 5:
tError(token.contents)
in_index = 2
children_index = 4
if bits[in_index] != 'in':
tError(token.contents)
children_token = bits[children_index].split("=")
if len(children_token) != 2 or children_token[0] != 'children':
tError(token.contents)
f = children_token[1][0]
l = children_token[1][-1]
if f != l or f not in quotes:
tError(token.contents)
else:
children_token[1] = children_token[1].replace(f,"")
def convert(val):
try:
val = float(val)
except ValueError:
f = val[0]
l = val[-1]
if f != l or f not in quotes:
tError(token.contents)
val = unicode( val.replace(f,"") )
return val
node_vars = {}
if lenbits > 5:
for bit in bits[5:]:
arg = bit.split("=")
if len(arg) != 2 :
tError(token.contents)
f = arg[1][0]
l = arg[1][-1]
if f != "(" or l != ")":
tError(token.contents)
try:
argval = tuple([ convert(x) for x in arg[1].replace("(","").replace(")","").split(",") ])
# Invalid float number, or missing comma
except (IndexError, ValueError):
tError(token.contents)
node_vars[ str(arg[0]) ] = argval
node_vars['children_name'] = children_token[1]
node_vars['loopvar'] = bits[1]
node_vars['sequence'] = parser.compile_filter(bits[3])
nodelist_first = parser.parse( ('yield', 'endrecurse',) )
token = parser.next_token()
if token.contents == 'yield':
nodelist_second = parser.parse( ('endrecurse', ) )
parser.delete_first_token()
else:
nodelist_second = NodeList()
node_vars['nodelist_first'] = nodelist_first
node_vars['nodelist_second'] = nodelist_second
return RecurseNode(**node_vars)
do_recurse = register.tag("recurse", do_recurse)
| Python |
#!/usr/bin/env python
"""Simple PNG Canvas for Python"""
__version__ = "0.8"
__author__ = "Rui Carmo (http://the.taoofmac.com)"
__copyright__ = "CC Attribution-NonCommercial-NoDerivs 2.0 Rui Carmo"
__contributors__ = ["http://collaboa.weed.rbse.com/repository/file/branches/pgsql/lib/spark_pr.rb"], ["Eli Bendersky"]
import zlib, struct
signature = struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)
# alpha blends two colors, using the alpha given by c2
def blend(c1, c2):
return [c1[i]*(0xFF-c2[3]) + c2[i]*c2[3] >> 8 for i in range(3)]
# calculate a new alpha given a 0-0xFF intensity
def intensity(c,i):
return [c[0],c[1],c[2],(c[3]*i) >> 8]
# calculate perceptive grayscale value
def grayscale(c):
return int(c[0]*0.3 + c[1]*0.59 + c[2]*0.11)
# calculate gradient colors
def gradientList(start,end,steps):
delta = [end[i] - start[i] for i in range(4)]
grad = []
for i in range(steps+1):
grad.append([start[j] + (delta[j]*i)/steps for j in range(4)])
return grad
class PNGCanvas:
def __init__(self, width, height,bgcolor=[0xff,0xff,0xff,0xff],color=[0,0,0,0xff]):
self.canvas = []
self.width = width
self.height = height
self.color = color #rgba
bgcolor = bgcolor[0:3] # we don't need alpha for background
for i in range(height):
self.canvas.append([bgcolor] * width)
def point(self,x,y,color=None):
if x<0 or y<0 or x>self.width-1 or y>self.height-1: return
if color == None: color = self.color
self.canvas[y][x] = blend(self.canvas[y][x],color)
def _rectHelper(self,x0,y0,x1,y1):
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
return [x0,y0,x1,y1]
def verticalGradient(self,x0,y0,x1,y1,start,end):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
grad = gradientList(start,end,y1-y0)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,grad[y-y0])
def rectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
self.polyline([[x0,y0],[x1,y0],[x1,y1],[x0,y1],[x0,y0]])
def filledRectangle(self,x0,y0,x1,y1):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
self.point(x,y,self.color)
def copyRect(self,x0,y0,x1,y1,dx,dy,destination):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
destination.canvas[dy+y-y0][dx+x-x0] = self.canvas[y][x]
def blendRect(self,x0,y0,x1,y1,dx,dy,destination,alpha=0xff):
x0, y0, x1, y1 = self._rectHelper(x0,y0,x1,y1)
for x in range(x0, x1+1):
for y in range(y0, y1+1):
rgba = self.canvas[y][x] + [alpha]
destination.point(dx+x-x0,dy+y-y0,rgba)
# draw a line using Xiaolin Wu's antialiasing technique
def line(self,x0, y0, x1, y1):
# clean params
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
if y0>y1:
y0, y1, x0, x1 = y1, y0, x1, x0
dx = x1-x0
if dx < 0:
sx = -1
else:
sx = 1
dx *= sx
dy = y1-y0
# 'easy' cases
if dy == 0:
for x in range(x0,x1,sx):
self.point(x, y0)
return
if dx == 0:
for y in range(y0,y1):
self.point(x0, y)
self.point(x1, y1)
return
if dx == dy:
for x in range(x0,x1,sx):
self.point(x, y0)
y0 = y0 + 1
return
# main loop
self.point(x0, y0)
e_acc = 0
if dy > dx: # vertical displacement
e = (dx << 16) / dy
for i in range(y0,y1-1):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
x0 = x0 + sx
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
y0 = y0 + 1
self.point(x0 + sx, y0, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
return
# horizontal displacement
e = (dy << 16) / dx
for i in range(x0,x1-sx,sx):
e_acc_temp, e_acc = e_acc, (e_acc + e) & 0xFFFF
if (e_acc <= e_acc_temp):
y0 = y0 + 1
w = 0xFF-(e_acc >> 8)
self.point(x0, y0, intensity(self.color,(w)))
x0 = x0 + sx
self.point(x0, y0 + 1, intensity(self.color,(0xFF-w)))
self.point(x1, y1)
def polyline(self,arr):
for i in range(0,len(arr)-1):
self.line(arr[i][0],arr[i][1],arr[i+1][0], arr[i+1][1])
def dump(self):
raw_list = []
for y in range(self.height):
raw_list.append(chr(0)) # filter type 0 (None)
for x in range(self.width):
raw_list.append(struct.pack("!3B",*self.canvas[y][x]))
raw_data = ''.join(raw_list)
# 8-bit image represented as RGB tuples
# simple transparency, alpha is pure white
return signature + \
self.pack_chunk('IHDR', struct.pack("!2I5B",self.width,self.height,8,2,0,0,0)) + \
self.pack_chunk('tRNS', struct.pack("!6B",0xFF,0xFF,0xFF,0xFF,0xFF,0xFF)) + \
self.pack_chunk('IDAT', zlib.compress(raw_data,9)) + \
self.pack_chunk('IEND', '')
def pack_chunk(self,tag,data):
to_check = tag + data
return struct.pack("!I",len(data)) + to_check + struct.pack("!I", zlib.crc32(to_check) & 0xFFFFFFFF)
def load(self,f):
assert f.read(8) == signature
self.canvas=[]
for tag, data in self.chunks(f):
if tag == "IHDR":
( width,
height,
bitdepth,
colortype,
compression, filter, interlace ) = struct.unpack("!2I5B",data)
self.width = width
self.height = height
if (bitdepth,colortype,compression, filter, interlace) != (8,2,0,0,0):
raise TypeError('Unsupported PNG format')
# we ignore tRNS because we use pure white as alpha anyway
elif tag == 'IDAT':
raw_data = zlib.decompress(data)
rows = []
i = 0
for y in range(height):
filtertype = ord(raw_data[i])
i = i + 1
cur = [ord(x) for x in raw_data[i:i+width*3]]
if y == 0:
rgb = self.defilter(cur,None,filtertype)
else:
rgb = self.defilter(cur,prev,filtertype)
prev = cur
i = i+width*3
row = []
j = 0
for x in range(width):
pixel = rgb[j:j+3]
row.append(pixel)
j = j + 3
self.canvas.append(row)
def defilter(self,cur,prev,filtertype,bpp=3):
if filtertype == 0: # No filter
return cur
elif filtertype == 1: # Sub
xp = 0
for xc in range(bpp,len(cur)):
cur[xc] = (cur[xc] + cur[xp]) % 256
xp = xp + 1
elif filtertype == 2: # Up
for xc in range(len(cur)):
cur[xc] = (cur[xc] + prev[xc]) % 256
elif filtertype == 3: # Average
xp = 0
for xc in range(len(cur)):
cur[xc] = (cur[xc] + (cur[xp] + prev[xc])/2) % 256
xp = xp + 1
elif filtertype == 4: # Paeth
xp = 0
for i in range(bpp):
cur[i] = (cur[i] + prev[i]) % 256
for xc in range(bpp,len(cur)):
a = cur[xp]
b = prev[xc]
c = prev[xp]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
value = a
elif pb <= pc:
value = b
else:
value = c
cur[xc] = (cur[xc] + value) % 256
xp = xp + 1
else:
raise TypeError('Unrecognized scanline filter type')
return cur
def chunks(self,f):
while 1:
try:
length = struct.unpack("!I",f.read(4))[0]
tag = f.read(4)
data = f.read(length)
crc = struct.unpack("!i",f.read(4))[0]
except:
return
if zlib.crc32(tag + data) != crc:
raise IOError
yield [tag,data]
if __name__ == '__main__':
width = 128
height = 64
print "Creating Canvas..."
c = PNGCanvas(width,height)
c.color = [0xff,0,0,0xff]
c.rectangle(0,0,width-1,height-1)
print "Generating Gradient..."
c.verticalGradient(1,1,width-2, height-2,[0xff,0,0,0xff],[0x20,0,0xff,0x80])
print "Drawing Lines..."
c.color = [0,0,0,0xff]
c.line(0,0,width-1,height-1)
c.line(0,0,width/2,height-1)
c.line(0,0,width-1,height/2)
# Copy Rect to Self
print "Copy Rect"
c.copyRect(1,1,width/2-1,height/2-1,0,height/2,c)
# Blend Rect to Self
print "Blend Rect"
c.blendRect(1,1,width/2-1,height/2-1,width/2,0,c)
# Write test
print "Writing to file..."
f = open("test.png", "wb")
f.write(c.dump())
f.close()
# Read test
print "Reading from file..."
f = open("test.png", "rb")
c.load(f)
f.close()
# Write back
print "Writing to new file..."
f = open("recycle.png","wb")
f.write(c.dump())
f.close()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright(C) 2008 SupDo.com
# Licensed under the GUN License, Version 3.0 (the "License");
#
# File: safecode.py
# Author: KuKei
# Create Date: 2008-07-16
# Description: 负责验证码生成。
# Modify Date: 2008-08-06
import md5
import random
from pngcanvas import PNGCanvas
class Image():
text = None
md5Text = None
img = None
width = 0
height = 0
#长度
textX = 10
textY = 10
beginX = 5
endX = 5
beginY = 5
endY = 5
spare = 4
def __init__(self,text=None):
if(text==None):
self.text = self.getRandom()
else:
self.text = text
#self.getMd5Text()
self.width = len(str(self.text))*(self.spare+self.textX)+self.beginX+self.endX
self.height = self.textY + self.beginY + self.endY
def create(self):
self.img = PNGCanvas(self.width,self.height)
self.img.color = [0xff,0xff,0xff,0xff]
#self.img.color = [0x39,0x9e,0xff,0xff]
#self.img.verticalGradient(1,1,self.width-2, self.height-2,[0xff,0,0,0xff],[0x60,0,0xff,0x80])
self.img.verticalGradient(1,1,self.width-2, self.height-2,[0xff,0x45,0x45,0xff],[0xff,0xcb,0x44,0xff])
for i in range(4):
a = str(self.text)[i]
self.writeText(a,i)
return self.img.dump()
def getRandom(self):
intRand = random.randrange(1000,9999)
return intRand
def getMd5Text(self):
m = md5.new()
m.update(str(self.text))
self.md5Text = m.hexdigest()
def writeText(self,text,pos=0):
if(text=="1"):
self.writeLine(pos, "avc")
elif(text=="2"):
self.writeLine(pos, "aht")
self.writeLine(pos, "hvtr")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbl")
self.writeLine(pos, "ahb")
elif(text=="3"):
self.writeLine(pos, "aht")
self.writeLine(pos, "ahc")
self.writeLine(pos, "ahb")
self.writeLine(pos, "avr")
elif(text=="4"):
self.writeLine(pos, "hvtl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "avc")
elif(text=="5"):
self.writeLine(pos, "aht")
self.writeLine(pos, "hvtl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbr")
self.writeLine(pos, "ahb")
elif(text=="6"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "hvbr")
self.writeLine(pos, "ahb")
elif(text=="7"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avr")
elif(text=="8"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "ahc")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahb")
elif(text=="9"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahc")
self.writeLine(pos, "ahb")
self.writeLine(pos, "hvtl")
elif(text=="0"):
self.writeLine(pos, "aht")
self.writeLine(pos, "avl")
self.writeLine(pos, "avr")
self.writeLine(pos, "ahb")
'''
type解释
a:全部,部分上下
h:一半
h:横
v:竖
l:左,上
c:中间
r:右,下
t:上
b:下
'''
def writeLine(self,pos,type):
if(type=="avl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY
)
elif(type=="avc"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX/2,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX/2,
self.beginY+self.textY
)
elif(type=="avr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
elif(type=="aht"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
)
elif(type=="ahc"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2
)
elif(type=="ahb"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
elif(type=="hvtl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2
)
elif(type=="hvtr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2
)
elif(type=="hvbl"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos,
self.beginY+self.textY
)
elif(type=="hvbr"):
self.img.line(
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY/2,
self.beginX+(self.textX+self.spare)*pos+self.textX,
self.beginY+self.textY
)
| Python |
"""tblib.py: A Trackback (client) implementation in Python
"""
__author__ = "Matt Croydon <matt@ooiio.com>"
__copyright__ = "Copyright 2003, Matt Croydon"
__license__ = "GPL"
__version__ = "0.1.0"
__history__ = """
0.1.0: 1/29/03 - Code cleanup, release. It can send pings, and autodiscover a URL to ping.
0.0.9: 1/29/03 - Basic error handling and autodiscovery works!
0.0.5: 1/29/03 - Internal development version. Working on autodiscovery and error handling.
0.0.4: 1/22/03 - First public release, code cleanup.
0.0.3: 1/22/03 - Removed hard coding that was used for testing.
0.0.2: 1/21/03 - First working version.
0.0.1: 1/21/03 - Initial version. Thanks to Mark Pilgrim for helping me figure some module basics out.
"""
import httplib, urllib, urlparse, re
from google.appengine.api import urlfetch
import logging
"""Everything I needed to know about trackback I learned from the trackback tech specs page
http://www.movabletype.org/docs/mttrackback.html. All arguments are optional. This allows us to create an empty TrackBack object,
then use autodiscovery to populate its attributes.
"""
class TrackBack:
def __init__(self, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None):
self.tbUrl = tbUrl
self.title = title
self.excerpt = excerpt
self.url = url
self.blog_name = blog_name
self.tbErrorCode = None
self.tbErrorMessage = None
def ping(self):
# Only execute if a trackback url has been defined.
if self.tbUrl:
# Create paramaters and make them play nice with HTTP
# Python's httplib example helps a lot:
# http://python.org/doc/current/lib/httplib-examples.html
params = urllib.urlencode({'title': self.title, 'url': self.url, 'excerpt': self.excerpt, 'blog_name': self.blog_name})
headers = ({"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "micolog"})
# urlparse is my hero
# http://www.python.org/doc/current/lib/module-urlparse.html
logging.info("ping...%s",params)
response=urlfetch.fetch(self.tbUrl,method=urlfetch.POST,payload=params,headers=headers)
self.httpResponse = response.status_code
data = response.content
self.tbResponse = data
logging.info("ping...%s"%data)
# Thanks to Steve Holden's book: _Python Web Programming_ (http://pydish.holdenweb.com/pwp/)
# Why parse really simple XML when you can just use regular expressions? Rawk.
errorpattern = r'<error>(.*?)</error>'
reg = re.search(errorpattern, self.tbResponse)
if reg:
self.tbErrorCode = reg.group(1)
if int(self.tbErrorCode) == 1:
errorpattern2 = r'<message>(.*?)</message>'
reg2 = re.search(errorpattern2, self.tbResponse)
if reg2:
self.tbErrorMessage = reg2.group(1)
else:
return 1
def autodiscover(self, urlToCheck):
response=urlfetch.fetch(urlToCheck)
data = response.read()
tbpattern = r'trackback:ping="(.*?)"'
reg = re.search(tbpattern, data)
if reg:
self.tbUrl = reg.group(1) | Python |
# -*- coding: utf-8 -*-
"""
A Python HTML filtering library - html_filter.py, v 1.15.4
Translated to Python by Samuel Adam <samuel.adam@gmail.com>
http://amisphere.com/contrib/python-html-filter/
Original PHP code ( lib_filter.php, v 1.15 ) by Cal Henderson <cal@iamcal.com>
http://iamcal.com/publish/articles/php/processing_html/
http://iamcal.com/publish/articles/php/processing_html_part_2/
This code is licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
"""
import re
from cgi import escape
from HTMLParser import HTMLParser
class html_filter:
"""
html_filter removes HTML tags that do not belong to a white list
closes open tags and fixes broken ones
removes javascript injections and black listed URLs
makes text URLs and emails clickable
adds rel="no-follow" to links except for white list
default settings are based on Flickr's "Some HTML is OK"
http://www.flickr.com/html.gne
HOWTO
1. Basic example
from html_filter import html_filter
filter = html_filter()
#change settings to meet your needs
filter.strip_comments = False
filter.allowed['br'] = ()
filter.no_close += 'br',
raw_html = '<p><strong><br><!-- Text to filter !!!<div></p>'
# go() is a shortcut to apply the most common methods
filtered_html = filter.go(raw_html)
# returns <strong><br /><!-- Text to filter !!!</strong>
2. You can only use one method at a time if you like
from html_filter import html_filter
filter = html_filter()
please_dont_scream_this_is_a_pop_contest = filter.fix_case('HARD ROCK ALELUYAH!!!')
# returns Hard rock aleluyah!!!
filter.break_words_longer_than = 30
wordwrap_text = filter.break_words('MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM...')
# adds html entity "​" (zero width space) each 30 characters
"""
def __init__(self):
### START Default Config ###
# tags and attributes that are allowed
self.allowed = {
'a': ('href', 'target'),
'b': (),
'blockquote': (),
'em': (),
'i': (),
'img': ('src', 'width', 'height', 'alt', 'title'),
'strong': (),
'u': (),
}
# tags which should always be self-closing (e.g. "<img />")
self.no_close = (
'img',
)
# tags which must always have seperate opening and closing tags (e.g. "<b></b>")
self.always_close = (
'a',
'b',
'blockquote',
'em',
'i',
'strong',
'u',
)
# tags which should be removed if they contain no content (e.g. "<b></b>" or "<b />")
self.remove_blanks = (
'a',
'b',
'blockquote',
'em',
'i',
'strong',
'u',
)
# attributes which should be checked for valid protocols
self.protocol_attributes = (
'src',
'href',
)
# protocols which are allowed
self.allowed_protocols = (
'http',
'https',
'ftp',
'mailto',
)
# forbidden urls ( regular expressions ) are replaced by #
self.forbidden_urls = (
r'^/delete-account',
r'^domain.ext/delete-account',
)
# should we make urls clickable ?
self.make_clickable_urls = True
# should we add a rel="nofollow" to the links ?
self.add_no_follow = True
# except for those domains
self.follow_for = (
'allowed-domain.ext',
)
# should we remove comments?
self.strip_comments = True
# should we removes blanks from beginning and end of data ?
self.strip_data = True
# should we try and make a b tag out of "b>"
self.always_make_tags = False
# entity control options
self.allow_numbered_entities = True
self.allowed_entities = (
'amp',
'gt',
'lt',
'quot',
)
# should we "break" words longer than x chars ( 0 means "No", minimum is 8 chars )
self.break_words_longer_than = 0
### END Default Config ###
# INIT
self.tag_counts = {}
# pre-compile some regexp patterns
self.pat_entities = re.compile(r'&([^&;]*)(?=(;|&|$))')
self.pat_quotes = re.compile(r'(>|^)([^<]+?)(<|$)', re.DOTALL|re.IGNORECASE)
self.pat_valid_entity = re.compile(r'^#([0-9]+)$', re.IGNORECASE)
self.pat_decode_entities_dec = re.compile(r'(&)#(\d+);?')
self.pat_decode_entities_hex = re.compile(r'(&)#x([0-9a-f]+);?', re.IGNORECASE)
self.pat_decode_entities_hex2 = re.compile(r'(%)([0-9a-f]{2});?', re.IGNORECASE)
self.pat_entities2 = re.compile(r'&([^&;]*);?', re.IGNORECASE)
self.pat_raw_url = re.compile('(('+'|'.join(self.allowed_protocols)+')://)(([a-z0-9](?:[a-z0-9\\-]*[a-z0-9])?\\.)+(com\\b|edu\\b|biz\\b|gov\\b|in(?:t|fo)\\b|mil\\b|net\\b|org\\b|[a-z][a-z]\\b)|((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])))(:\\d+)?(/[-a-z0-9_:\\\\@&?=+,\\.!/~*\'%\\$]*)*(?<![.,?!])(?!((?!(?:<a )).)*?(?:</a>))(?!((?!(?:<!--)).)*?(?:-->))', re.IGNORECASE)
#
def go(self, data):
data = self.strip_whitespace(data)
data = self.escape_comments(data)
data = self.balance_html(data)
data = self.clickable_urls(data)
data = self.check_tags(data)
data = self.process_remove_blanks(data)
data = self.validate_entities(data)
data = self.break_words(data)
return data
#
def strip_whitespace(self, data):
if self.strip_data:
data = data.strip()
return data
#
def escape_comments(self, data):
pat = re.compile(r'<!--(.*?)-->', re.IGNORECASE)
data = re.sub(pat, self.f0, data)
return data
def f0(self, m):
return '<!--'+escape(m.group(1), True)+'-->'
#
def balance_html(self, data):
# try and form html
if self.always_make_tags:
data = re.sub(r'>>+', r'>', data)
data = re.sub(r'<<+', r'<', data)
data = re.sub(r'^>', r'', data)
data = re.sub(r'<([^>]*?)(?=<|$)', r'<\1>', data)
data = re.sub(r'(^|>)([^<]*?)(?=>)', r'\1<\2', data)
else:
data = data.replace('<>', '<>') # <> as text
data = self.re_sub_overlap(r'<([^>]*?)(?=<|$)', r'<\1', data)
data = self.re_sub_overlap(r'(^|>)([^<]*?)(?=>)', r'\1\2><', data)
data = re.sub(r'<(\s)+?', r'<\1', data) # consider "< a href" as "< a href"
# this filter introduces an error, so we correct it
data = data.replace('<>', '')
return data
# python re.sub() doesn't overlap matches
def re_sub_overlap(self, pat, repl, data, i=0):
data_temp = re.sub(pat, repl, data[i:])
if data_temp != data[i:]:
data = data[:i] + data_temp
i += 1
data = self.re_sub_overlap(pat, repl, data, i)
return data
#
def clickable_urls(self, data):
if self.make_clickable_urls:
# urls
# pat = re.compile('(('+'|'.join(self.allowed_protocols)+')://)(([a-z0-9](?:[a-z0-9\\-]*[a-z0-9])?\\.)+(com\\b|edu\\b|biz\\b|gov\\b|in(?:t|fo)\\b|mil\\b|net\\b|org\\b|[a-z][a-z]\\b)|((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])))(:\\d+)?(/[-a-z0-9_:\\\\@&?=+,\\.!/~*\'%\\$]*)*(?<![.,?!])(?!((?!(?:<a )).)*?(?:</a>))(?!((?!(?:<!--)).)*?(?:-->))', re.IGNORECASE)
data = re.sub(self.pat_raw_url, self.f7, data)
# emails
if 'mailto' in self.allowed_protocols:
pat = re.compile(r'((([a-z]|[0-9]|!|#|$|%|&|\'|\*|\+|\-|/|=|\?|\^|_|`|\{|\||\}|~)+(\.([a-z]|[0-9]|!|#|$|%|&|\'|\*|\+|\-|/|=|\?|\^|_|`|\{|\||\}|~)+)*)@((((([a-z]|[0-9])([a-z]|[0-9]|\-){0,61}([a-z]|[0-9])\.))*([a-z]|[0-9])([a-z]|[0-9]|\-){0,61}([a-z]|[0-9])\.(com|edu|gov|int|mil|net|org|biz|info|name|pro|aero|coop|museum|arpa|[a-z]{2}))|(((([0-9]){1,3}\.){3}([0-9]){1,3}))|(\[((([0-9]){1,3}\.){3}([0-9]){1,3})\])))(?!((?!(?:<a )).)*?(?:</a>))(?!((?!(?:<!--)).)*?(?:-->))', re.IGNORECASE)
data = re.sub(pat, self.f8, data)
return data
def f7(self, m):
return '<a href="'+m.group(0)+'">'+m.group(0)+'</a>'
def f8(self, m):
return '<a href="mailto:'+m.group(0)+'">'+m.group(0)+'</a>'
#
def check_tags(self, data):
# compile loop regexps
self.pat_end_tag = re.compile(r'^/([a-z0-9]+)', re.DOTALL|re.IGNORECASE)
self.pat_start_tag = re.compile(r'^([a-z0-9]+)(.*?)(/?)$', re.DOTALL|re.IGNORECASE)
self.pat_matches_2 = re.compile(r'([a-z0-9]+)=(["\'])(.*?)\2', re.DOTALL|re.IGNORECASE) # <foo a="b" />
self.pat_matches_1 = re.compile(r'([a-z0-9]+)(=)([^"\s\']+)', re.DOTALL|re.IGNORECASE) # <foo a=b />
self.pat_matches_3 = re.compile(r'([a-z0-9]+)=(["\'])([^"\']*?)\s*$', re.DOTALL|re.IGNORECASE) # <foo a="b />
self.pat_comments = re.compile(r'^!--(.*)--$', re.DOTALL|re.IGNORECASE)
self.pat_param_protocol = re.compile(r'^([^:]+):', re.DOTALL|re.IGNORECASE)
pat = re.compile(r'<(.*?)>', re.DOTALL)
data = re.sub(pat, self.f1, data)
for tag in self.tag_counts:
count = self.tag_counts[tag]
for i in range(count):
data += '</'+tag+'>'
self.tag_counts = {}
return data
def f1(self, m):
return self.process_tag(m.group(1))
#
def process_tag(self, data):
# ending tags
m = re.match(self.pat_end_tag, data)
if m:
name = m.group(1).lower()
if name in self.allowed:
if name not in self.no_close:
if self.tag_counts.has_key(name):
self.tag_counts[name] -= 1
return '</' + name + '>'
else:
return ''
# starting tags
m = re.match(self.pat_start_tag, data)
if m:
name = m.group(1).lower()
body = m.group(2)
ending = m.group(3)
if name in self.allowed:
params = ''
matches_2 = re.findall(self.pat_matches_2, body) # <foo a="b" />
matches_1 = re.findall(self.pat_matches_1, body) # <foo a=b />
matches_3 = re.findall(self.pat_matches_3, body) # <foo a="b />
matches = {}
for match in matches_3:
matches[match[0].lower()] = match[2]
for match in matches_1:
matches[match[0].lower()] = match[2]
for match in matches_2:
matches[match[0].lower()] = match[2]
for pname in matches:
if pname in self.allowed[name]:
value = matches[pname]
if pname in self.protocol_attributes:
processed_value = self.process_param_protocol(value)
# add no_follow
if self.add_no_follow and name== 'a' and pname == 'href' and processed_value == value:
processed_value = re.sub(self.pat_raw_url, self.f9, processed_value)
value = processed_value
params += ' '+pname+'="'+value+'"'
if name in self.no_close:
ending = ' /'
if name in self.always_close:
ending = ''
if not ending:
if self.tag_counts.has_key(name):
self.tag_counts[name] += 1
else:
self.tag_counts[name] = 1
if ending:
ending = ' /'
return '<'+name+params+ending+'>'
else:
return ''
# comments
m = re.match(self.pat_comments, data)
if m:
if self.strip_comments:
return ''
else:
return '<'+data+'>'
# garbage, ignore it
return ''
def f9(self, m):
if m.group(3) not in self.follow_for:
return m.group()+'" rel="no-follow'
return m.group()
#
def process_param_protocol(self, data):
data = self.decode_entities(data)
m = re.match(self.pat_param_protocol, data)
if m:
if not m.group(1) in self.allowed_protocols:
start = len(m.group(1)) + 1
data = '#' + data[start:]
# remove forbidden urls
for pat in self.forbidden_urls:
m = re.search(pat, data)
if m:
data = '#'
return data
#
def process_remove_blanks(self, data):
for tag in self.remove_blanks:
data = re.sub(r'<'+tag+'(\s[^>]*)?></'+tag+'>', r'', data)
data = re.sub(r'<'+tag+'(\s[^>]*)?/>', r'', data)
return data
#
def strip_tags(self, html):
result = []
parser = HTMLParser()
parser.handle_data = result.append
parser.feed(html)
parser.close()
return ''.join(result)
def fix_case(self, data):
# compile loop regexps
self.pat_case_inner = re.compile(r'(^|[^\w\s\';,\\-])(\s*)([a-z])')
data_notags = self.strip_tags(data)
data_notags = re.sub(r'[^a-zA-Z]', r'', data_notags)
if len(data_notags) < 5:
return data
m = re.search(r'[a-z]', data_notags)
if m:
return data
pat = re.compile(r'(>|^)([^<]+?)(<|$)', re.DOTALL)
data = re.sub(pat, self.f2, data)
return data
def f2(self, m):
return m.group(1)+self.fix_case_inner(m.group(2))+m.group(3)
def fix_case_inner(self, data):
return re.sub(self.pat_case_inner, self.f3, data.lower())
def f3(self, m):
return m.group(1)+m.group(2)+m.group(3).upper()
#
def validate_entities(self, data):
# validate entities throughout the string
data = re.sub(self.pat_entities, self.f4, data)
# validate quotes outside of tags
data = re.sub(self.pat_quotes, self.f5, data)
return data
def f4(self, m):
return self.check_entity(m.group(1), m.group(2))
def f5(self, m):
return m.group(1)+m.group(2).replace('"', '"')+m.group(3)
#
def check_entity(self, preamble, term):
if term != ';':
return '&'+preamble
if self.is_valid_entity(preamble):
return '&'+preamble
return '&'+preamble
def is_valid_entity(self, entity):
m = re.match(self.pat_valid_entity, entity)
if m:
if int(m.group(1)) > 127:
return True
return self.allow_numbered_entities
if entity in self.allowed_entities:
return True
return False
#
# within attributes, we want to convert all hex/dec/url escape sequences into
# their raw characters so that we can check we don't get stray quotes/brackets
# inside strings
def decode_entities(self, data):
data = re.sub(self.pat_decode_entities_dec, self.decode_dec_entity, data)
data = re.sub(self.pat_decode_entities_hex, self.decode_hex_entity, data)
data = re.sub(self.pat_decode_entities_hex2, self.decode_hex_entity, data)
data = self.validate_entities(data)
return data
def decode_hex_entity(self, m):
return self.decode_num_entity(m.group(1), int(m.group(2), 16))
def decode_dec_entity(self, m):
return self.decode_num_entity(m.group(1), int(m.group(2)))
def decode_num_entity(self, orig_type, d):
if d < 0:
d = 32 # space
if d > 127:
if orig_type == '%':
return '%' + hex(d)[2:]
if orig_type == '&':
return '&#'+str(d)+';'
return escape(chr(d))
#
def break_words(self, data):
if self.break_words_longer_than > 0:
pat = re.compile(r'(>|^)([\s]*)([^<]+?)([\s]*)(<|$)', re.DOTALL)
data = re.sub(pat, self.f6, data)
return data
def f6(self, m):
return m.group(1)+m.group(2)+self.break_text(m.group(3))+m.group(4)+m.group(5)
def break_text(self, text):
ret = ''
entity_max_length = 8
if self.break_words_longer_than < entity_max_length:
width = entity_max_length
else:
width = self.break_words_longer_than
for word in text.split(' '):
if len(word) > width:
word = word.replace('​','')
m = re.search(self.pat_entities2, word[width-entity_max_length:width+entity_max_length])
if m:
width = width - entity_max_length + m.end()
ret += word[0:width] + '​' + self.break_text(word[width:]) # insert "Zero Width" Space - helps wordwrap
else:
ret += word + ' '
return ret.strip()
| Python |
from time import *
from calendar import timegm
# fix for mktime bug
# https://garage.maemo.org/tracker/index.php?func=detail&aid=4453&group_id=854&atid=3201
mktime = lambda time_tuple: calendar.timegm(time_tuple) + timezone
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.