text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Tests for the :mod:`campy.util.timer` module."""
|
{
"content_hash": "eb66a42b5fc82cf04f7f901def0bdff6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 51,
"avg_line_length": 52,
"alnum_prop": 0.6538461538461539,
"repo_name": "sredmond/acmpy",
"id": "b8378c96715873de3c6a62d32fa1d13a91fe8d38",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/util/test_timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296890"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals, division
import os
import argparse
import sc2reader
from sc2reader import utils
from sc2reader.exceptions import ReadError
def printReplay(filepath, arguments):
""" Prints summary information about SC2 replay file """
try:
replay = sc2reader.load_replay(filepath, debug=True)
if arguments.map:
print(" Map: {0}".format(replay.map_name))
if arguments.length:
print(" Length: {0} minutes".format(replay.game_length))
if arguments.date:
print(" Date: {0}".format(replay.start_time))
if arguments.teams:
lineups = [team.lineup for team in replay.teams]
print(" Teams: {0}".format("v".join(lineups)))
for team in replay.teams:
print(" Team {0}\t{1} ({2})".format(team.number, team.players[0].name, team.players[0].pick_race[0]))
for player in team.players[1:]:
print(" \t{0} ({1})".format(player.name, player.pick_race[0]))
if arguments.messages:
print(" Messages:")
for message in replay.messages:
print(" {0}".format(message))
if arguments.version:
print(" Version: {0}".format(replay.release_string))
print
except ReadError as e:
raise
return
prev = e.game_events[-1]
print("\nVersion {0} replay:\n\t{1}".format(e.replay.release_string, e.replay.filepath))
print("\t{0}, Type={1:X}".format(e.msg, e.type))
print("\tPrevious Event: {0}".format(prev.name))
print("\t\t"+prev.bytes.encode('hex'))
print("\tFollowing Bytes:")
print("\t\t"+e.buffer.read_range(e.location, e.location+30).encode('hex'))
print("Error with '{0}': ".format(filepath))
print(e)
except Exception as e:
print("Error with '{0}': ".format(filepath))
print(e)
raise
def printGameSummary(filepath, arguments):
summary = sc2reader.load_game_summary(filepath)
if arguments.map:
print(" Map: {0}".format(summary.map_name))
if arguments.length:
print(" Length: {0} minutes".format(summary.game_length))
if arguments.date:
print(" Date: {0}".format(summary.start_time))
if arguments.teams:
lineups = [team.lineup for team in summary.teams]
print(" Teams: {0}".format("v".join(lineups)))
for team in summary.teams:
print(" Team {0}\t{1}".format(team.number, team.players[0]))
for player in team.players[1:]:
print(" \t{0}".format(player))
if arguments.builds:
for player in summary.players:
print("\n== {0} ==\n".format(player))
for order in summary.build_orders[player.pid]:
msg = " {0:0>2}:{1:0>2} {2:<35} {3:0>2}/{4}"
print(msg.format(order.time / 60, order.time % 60, order.order, order.supply, order.total_supply))
print("")
def main():
parser = argparse.ArgumentParser(
description="""Prints basic information from Starcraft II replay and
game summary files or directories.""")
parser.add_argument('--recursive', action="store_true", default=True,
help="Recursively read through directories of Starcraft II files [default on]")
required = parser.add_argument_group('Required Arguments')
required.add_argument('paths', metavar='filename', type=str, nargs='+',
help="Paths to one or more Starcraft II files or directories")
shared_args = parser.add_argument_group('Shared Arguments')
shared_args.add_argument('--date', action="store_true", default=True,
help="print(game date [default on]")
shared_args.add_argument('--length', action="store_true", default=False,
help="print(game duration mm:ss in game time (not real time) [default off]")
shared_args.add_argument('--map', action="store_true", default=True,
help="print(map name [default on]")
shared_args.add_argument('--teams', action="store_true", default=True,
help="print(teams, their players, and the race matchup [default on]")
replay_args = parser.add_argument_group('Replay Options')
replay_args.add_argument('--messages', action="store_true", default=False,
help="print(in-game player chat messages [default off]")
replay_args.add_argument('--version', action="store_true", default=True,
help="print(the release string as seen in game [default on]")
s2gs_args = parser.add_argument_group('Game Summary Options')
s2gs_args.add_argument('--builds', action="store_true", default=False,
help="print(player build orders (first 64 items) [default off]")
arguments = parser.parse_args()
for path in arguments.paths:
depth = -1 if arguments.recursive else 0
for filepath in utils.get_files(path, depth=depth):
name, ext = os.path.splitext(filepath)
if ext.lower() == '.sc2replay':
print("\n--------------------------------------\n{0}\n".format(filepath))
printReplay(filepath, arguments)
elif ext.lower() == '.s2gs':
print("\n--------------------------------------\n{0}\n".format(filepath))
printGameSummary(filepath, arguments)
if __name__ == '__main__':
main()
|
{
"content_hash": "80eb460f625162770b58035007745617",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 122,
"avg_line_length": 44.225806451612904,
"alnum_prop": 0.5931801604668125,
"repo_name": "BlackVegetable/starcraft-oracle",
"id": "52b39c1ed462dc700402df567b9606d003f1b363",
"size": "5530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc2reader-master/sc2reader/scripts/sc2printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8381"
},
{
"name": "Python",
"bytes": "361265"
},
{
"name": "Shell",
"bytes": "4521"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import logout, login
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.views.generic import View
from django.db.models import Count
from courses.models import Course, Lesson
from courses.services import courses
from courses.services import lessons
from courses.services.lessons import get_user_progress_percent
from giscademy.utils.view_utils import ProtectedView
class IndexView(View):
form_class = UserCreationForm
template_name = 'index.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
# No need to show the index page, go straight to learn dashboard
return HttpResponseRedirect(reverse('learn'))
form = self.form_class()
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save()
# Log the user in for convenience.
login(request, user)
return HttpResponseRedirect(reverse('learn'))
return render(request, self.template_name, {'form': form})
def logout_view(request):
logout(request)
return HttpResponseRedirect('/')
class RegistrationView(View):
form_class = UserCreationForm
template_name = 'register.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save()
# Log the user in for convenience.
login(request, user)
return HttpResponseRedirect(reverse('learn'))
return render(request, self.template_name, {'form': form})
class LearnView(ProtectedView):
template_name = 'learn/learn.html'
def get(self, request):
user = request.user
user_courses = Course.objects.filter(enrollment__user=user)
for course in user_courses:
course.user_progress = courses.get_user_progress_percent(request.user, course)
return render(request, self.template_name, {'courses': user_courses})
class CourseDetailView(ProtectedView):
template_name = 'learn/course_detail.html'
def get(self, request, slug):
course = get_object_or_404(Course, slug=slug)
all_lessons = course.lessons.all().order_by('order').annotate(num_exercises=Count('exercise'))
for lesson in all_lessons:
lesson.user_progress = lessons.get_user_progress_percent(request.user, lesson)
return render(request, self.template_name, {'course': course, 'lessons': all_lessons})
class CatalogView(ProtectedView):
template_name = 'catalog.html'
valid_params = ['easy', 'medium', 'expert']
def get(self, request):
courses = Course.objects.all()
difficulty_filter = request.GET.get('courses')
if difficulty_filter and difficulty_filter in self.valid_params:
courses = courses.filter(**{'difficulty': difficulty_filter})
# Annotate the courses where the user is enrolled
user_courses = request.user.enrollment_set.all().values_list('course_id', flat=True)
for course in courses:
if course.id in user_courses:
course.user_is_enrolled = True
return render(request, self.template_name, {'courses': courses})
def post(self, request):
course_id = request.POST.get('course_id')
try:
course = Course.objects.get(id=int(course_id))
course.enroll(request.user)
return HttpResponseRedirect(reverse('learn'))
except Course.DoesNotExist:
pass
return HttpResponseRedirect(reverse('catalog'))
class SandboxView(ProtectedView):
template_name = 'sandbox.html'
def get(self, request):
return render(request, self.template_name)
|
{
"content_hash": "5f8cd4a9735fb0d49809ff7da050d066",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 102,
"avg_line_length": 35.17796610169491,
"alnum_prop": 0.6646591182847507,
"repo_name": "RubenSchmidt/giscademy",
"id": "f86edbbfbc9a94130cbff0d3c01ccc0922c785c4",
"size": "4151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23390"
},
{
"name": "HTML",
"bytes": "43665"
},
{
"name": "JavaScript",
"bytes": "332861"
},
{
"name": "Python",
"bytes": "63323"
}
],
"symlink_target": ""
}
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
INTERNAL_IPS = ('127.0.0.1',
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# By default urllib, urllib2, and the like have no timeout which can cause
# some apache processes to hang until they are forced kill.
# Before python 2.6, the only way to cause them to time out is by setting
# the default timeout on the global socket
import socket
socket.setdefaulttimeout(5)
import os
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media/')
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static/')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'common_static'),
)
GENSHI_TEMPLATE_DIR = os.path.join(PROJECT_PATH, "templates", "genshi")
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates"),
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'debug_toolbar',
'djangohelpers',
'actionkit',
'main',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
AKCODE_GIT_REPO = None
# import local settings overriding the defaults
try:
from local_settings import *
except ImportError:
try:
from mod_python import apache
apache.log_error( "local settings not available", apache.APLOG_NOTICE )
except ImportError:
import sys
sys.stderr.write( "local settings not available\n" )
else:
try:
INSTALLED_APPS += LOCAL_INSTALLED_APPS
except NameError:
pass
try:
MIDDLEWARE_CLASSES += LOCAL_MIDDLEWARE_CLASSES
except NameError:
pass
|
{
"content_hash": "8aab8a544020e904f064369dc1ed941a",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 28.78481012658228,
"alnum_prop": 0.693051890941073,
"repo_name": "boldprogressives/akcode",
"id": "df5b8c6cc10f5dbf25b00d87ac75749fcbfa0014",
"size": "4549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "761"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "DOT",
"bytes": "5910"
},
{
"name": "Dart",
"bytes": "986"
},
{
"name": "Delphi",
"bytes": "1412"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "Java",
"bytes": "396"
},
{
"name": "JavaScript",
"bytes": "16067899"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "959"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "PHP",
"bytes": "26207"
},
{
"name": "Perl",
"bytes": "678"
},
{
"name": "PowerShell",
"bytes": "418"
},
{
"name": "Python",
"bytes": "43577"
},
{
"name": "R",
"bytes": "668"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "1238"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "Visual Basic",
"bytes": "916"
},
{
"name": "XQuery",
"bytes": "114"
}
],
"symlink_target": ""
}
|
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append('..')
sys.path.append('../..')
sys.path.append('../../..')
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fteproxy'
copyright = u'2013, Kevin P. Dyer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../fte/VERSION') as fh:
FTEPROXY_RELEASE = fh.read().strip()
version = FTEPROXY_RELEASE
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Format-TransformingEncryptionFTEdoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Format-TransformingEncryptionfteproxy.tex', u'fteproxy Documentation',
u'Kevin P. Dyer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'format-transformingencryptionfte', u'fteproxy Documentation',
[u'Kevin P. Dyer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Format-TransformingEncryptionFTE', u'fteproxy Documentation',
u'Kevin P. Dyer', 'Format-TransformingEncryptionFTE', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "5a5201418eccee4e37dc34f079123cd3",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 94,
"avg_line_length": 32.84873949579832,
"alnum_prop": 0.7031210028140189,
"repo_name": "kpdyer/libfte",
"id": "5e507b09b4b8fbf03775fa662a0f9e6f8fab4993",
"size": "8265",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "23581"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "29192"
}
],
"symlink_target": ""
}
|
'''
Build the pipeline workflow by plumbing the stages together.
'''
from ruffus import Pipeline, output_from, regex, suffix
from stages import Stages
def make_pipeline(state):
'''Build the pipeline by constructing stages and connecting them together'''
# Build an empty pipeline
pipeline = Pipeline(name='md5')
# Get a list of paths to all the input files
input_files = state.config.get_option('files')
# Stages are dependent on the state
stages = Stages(state)
# The original FASTQ files
# This is a dummy stage. It is useful because it makes a node in the
# pipeline graph, and gives the pipeline an obvious starting point.
pipeline.originate(
task_func=stages.original_files,
name='original_files',
output=input_files)
# Align paired end reads in FASTQ to the reference producing a BAM file
pipeline.transform(
task_func=stages.md5_checksum,
name='md5_checksum',
input=output_from('original_files'),
filter=suffix(''),
output='.md5')
return pipeline
|
{
"content_hash": "5654a463749bf33d71ee1afbc2f73bcd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 30.885714285714286,
"alnum_prop": 0.6799259944495837,
"repo_name": "bjpop/md5_pipeline",
"id": "6d97e1ec2d0ba760bcd5797e00198d21c67edfc1",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pipeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14062"
}
],
"symlink_target": ""
}
|
import lxml.html
import lxml.etree
import models
import urllib2
import json
import httplib
import unicodedata
"""
Abstract base class for fetchers
"""
class Fetcher(object):
"""
Base URL for fetching Artists' details
"""
__BASE_URL = ''
"""
Unique name for fetcher, used in models.Band.origin
"""
__NAME = ''
"""
Given an Artist name, searches it remotely for the genre.
It returns a list of models.Band objects or an Empty list.
The name is internally treated as case-insensitive.
This should be the only one method called by other objects
"""
def search(self, name):
raise NotImplementedError("Should have implemented this")
"""
Fetches one or more artists under the given name.
The name is internally treated as case-insensitive.
This method should always be used as helper for search(name) method
"""
def fetch(self, name):
raise NotImplementedError("Should have implemented this")
"""
Fecher for ProgArchives
"""
class ProgArchives(Fetcher):
__BASE_URL = 'http://www.progarchives.com/bands-alpha.asp?letter=*'
__NAME = 'ProgArchives'
"""
Given a band name, searches in fetched results
"""
def search(self, name):
genres = []
for band in self.fetch(name):
if band.name.lower() == name.lower():
genres.append(band.genre)
return genres
"""
Downloads __BASE_URL page and parses it, looking for band name and extracting the genre.
On HTTP error, tries again. On Unicode Errors, ignores.
"""
def fetch(self, name):
results = []
parsed_page = None
while not parsed_page:
try:
parsed_page = lxml.html.parse(ProgArchives.__BASE_URL)
except IOError:
print "HTTP Error. Trying again.."
except httplib.BadStatusLine:
print "HTTP connection error, retrying.."
pass
processed_table = parsed_page.xpath("//table")[0]
band_attributes = []
for row in processed_table[1:]:
band_attributes = []
try:
for col in row:
band_attributes.append(col.text_content().strip())
band = models.Band()
band.name = unicode(band_attributes[0])
band.genre = unicode(band_attributes[1])
band.origin = unicode(ProgArchives.__NAME)
results.append(band)
except UnicodeDecodeError, e:
print str(e)
except UnicodeEncodeError, e:
print str(e)
return results
class MetalArchives(Fetcher):
__BASE_URL = 'http://www.metal-archives.com/search/ajax-band-search/?field=name&exactBandMatch=1&query='
__NAME = 'MetalArchives'
"""
Given a band name, searches in fetched results
"""
def search(self, name):
genres = []
bands = self.fetch(name)
for band in bands:
genres.append(band.genre)
return genres
"""
Downloads __BASE_URL JSON file and parses it. Retrieves one or more bands with the given name and discovers the
genre.
On HTTP error, tries again. On Unicode Errors, ignores.
"""
def fetch(self, artist):
results = []
try:
artist = urllib2.quote(artist)
except KeyError:
artist = urllib2.quote(unicodedata.normalize("NFKD",artist).encode('ascii','ignore'))
base_url = MetalArchives.__BASE_URL + artist + '&sEcho=1&iDisplayStart='
idisplaystart = 0
url = base_url + str(idisplaystart)
json_file = None
while not json_file:
try:
json_file = urllib2.urlopen(url)
except urllib2.URLError:
print "HTTP connection error, retrying.."
pass
except httplib.BadStatusLine:
print "HTTP connection error, retrying.."
pass
json_string = json.load(json_file)
#itotalrecords = int(json_string['iTotalRecords'])
bands_json = json_string[u'aaData']
for band_json in bands_json:
name = band_json[0]
genre = band_json[1]
tree = lxml.html.fromstring(name)
name_html = tree.xpath("//a")[0]
name = name_html.text_content()
band = models.Band()
try:
band.name = unicode(name)
band.genre = unicode(genre)
band.origin = unicode(MetalArchives.__NAME)
results.append(band)
except UnicodeDecodeError, e:
print str(e)
except UnicodeEncodeError, e:
print str(e)
return results
|
{
"content_hash": "fdc4cdbd7a050dc4a92e79445e6e86b0",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 115,
"avg_line_length": 31.038709677419355,
"alnum_prop": 0.5743088754936604,
"repo_name": "dgraziotin/iProtal",
"id": "749127637de19d7882a6817911a429ebd5c81771",
"size": "4811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fetchers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13714"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import bibtexparser
import re
from datetime import datetime
import django
from django.utils import timezone
from django.utils.encoding import smart_text
django.setup()
from papers.models import Article, Profile
from django.contrib.auth.models import User
import papers.utils as utils
def remove_junk(x):
return re.sub('[{}\\\]', '', x)
def key2str(key, dic):
""" Gets entry from dict and returns an empty string if the key does not exist. """
if key in dic.keys():
return smart_text(remove_junk(dic[key]))
else:
return ''
def key2int(key, dic):
""" Gets integer entry from dict and returns None if the key does not exist or if there is a ValueError. """
value=None
if key in dic.keys():
try:
value=int(dic[key])
except ValueError:
value=None
return value
def import_bibtex(filename='library.bib'):
""" Reads a bibtex file and returns a list of Article instances """
with open(filename) as bibtex_file:
bibtex_str = bibtex_file.read()
bib_database = bibtexparser.loads(bibtex_str)
print("Entries read from BibTeX file %i"%len(bib_database.entries))
# packaging into django objects
data = []
for e in bib_database.entries:
title = key2str('title',e)
abstract = key2str('abstract',e)
if not key2int('year',e) or not abstract or not title: continue
pubdate = datetime(key2int('year',e),1,1)
keywords = key2str('keyword',e)
# truncate keywords
keywords = (keywords[:250]) if len(keywords) > 250 else keywords
art, created = utils.add_or_update_article(title=title,
authors=key2str('author',e),
pubdate=pubdate,
journal=key2str('journal',e),
abstract=abstract,
keywords=keywords,
url=key2str('link',e),
doi=key2str('doi',e),
pmid=key2int('pmid',e),
)
art.save()
data.append(art)
print("Entries added or updated in database %i"%len(data))
return data
if __name__ == "__main__":
# articles = import_bibtex('../goldret/ham.bib')
# utils.add_to_training_set( User.objects.all()[0], articles, 1 )
articles = import_bibtex('../../data/ham.bib')
profile,_ = Profile.objects.get_or_create(user=User.objects.get(username='zenke'))
utils.add_to_training_set( profile, articles, 1 )
|
{
"content_hash": "fc6e580a2e1b20282973632a4be5f449",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 112,
"avg_line_length": 31.174418604651162,
"alnum_prop": 0.6299888101454681,
"repo_name": "fzenke/morla",
"id": "cd1bb1f4dfb62c3b0da5aa48d8119a298d0f3982",
"size": "2700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/test_import_ham.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10456"
},
{
"name": "JavaScript",
"bytes": "1599"
},
{
"name": "Python",
"bytes": "60267"
},
{
"name": "Shell",
"bytes": "506"
}
],
"symlink_target": ""
}
|
"""Training helper that checkpoints models and creates session."""
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util.tf_export import tf_export
def _maybe_name(obj):
"""Returns object name if it has one, or a message otherwise.
This is useful for names that apper in error messages.
Args:
obj: Object to get the name of.
Returns:
name, "None", or a "no name" message.
"""
if obj is None:
return "None"
elif hasattr(obj, "name"):
return obj.name
else:
return "<no name for %s>" % type(obj)
def _restore_checkpoint_and_maybe_run_saved_model_initializers(
sess, saver, path):
"""Restores checkpoint values and SavedModel initializers if found."""
# NOTE: All references to SavedModel refer to SavedModels loaded from the
# load_v2 API (which does not require the `sess` argument).
# If the graph contains resources loaded from a SavedModel, they are not
# restored when calling `saver.restore`. Thus, the SavedModel initializer must
# be called with `saver.restore` to properly initialize the model.
# The SavedModel init is stored in the "saved_model_initializers" collection.
# This collection is part of the MetaGraph's default_init_op, so it is already
# called by MonitoredSession as long as the saver doesn't restore any
# checkpoints from the working dir.
saved_model_init_ops = ops.get_collection("saved_model_initializers")
if saved_model_init_ops:
sess.run(saved_model_init_ops)
# The saver must be called *after* the SavedModel init, because the SavedModel
# init will restore the variables from the SavedModel variables directory.
# Initializing/restoring twice is not ideal but there's no other way to do it.
saver.restore(sess, path)
@tf_export(v1=["train.SessionManager"])
class SessionManager(object):
"""Training helper that restores from checkpoint and creates session.
This class is a small wrapper that takes care of session creation and
checkpoint recovery. It also provides functions that to facilitate
coordination among multiple training threads or processes.
* Checkpointing trained variables as the training progresses.
* Initializing variables on startup, restoring them from the most recent
checkpoint after a crash, or wait for checkpoints to become available.
### Usage:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will checkpoint the model in '/tmp/mydir'.
sm = SessionManager()
sess = sm.prepare_session(master, init_op, saver, checkpoint_dir)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`prepare_session()` initializes or restores a model. It requires `init_op`
and `saver` as an argument.
A second process could wait for the model to be ready by doing the following:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will wait for the model to become ready.
sm = SessionManager()
sess = sm.wait_for_session(master)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`wait_for_session()` waits for a model to be initialized by other processes.
"""
def __init__(self,
local_init_op=None,
ready_op=None,
ready_for_local_init_op=None,
graph=None,
recovery_wait_secs=30,
local_init_run_options=None,
local_init_feed_dict=None):
"""Creates a SessionManager.
The `local_init_op` is an `Operation` that is run always after a new session
was created. If `None`, this step is skipped.
The `ready_op` is an `Operation` used to check if the model is ready. The
model is considered ready if that operation returns an empty 1D string
tensor. If the operation returns a non empty 1D string tensor, the elements
are concatenated and used to indicate to the user why the model is not
ready.
The `ready_for_local_init_op` is an `Operation` used to check if the model
is ready to run local_init_op. The model is considered ready if that
operation returns an empty 1D string tensor. If the operation returns a non
empty 1D string tensor, the elements are concatenated and used to indicate
to the user why the model is not ready.
If `ready_op` is `None`, the model is not checked for readiness.
`recovery_wait_secs` is the number of seconds between checks that
the model is ready. It is used by processes to wait for a model to
be initialized or restored. Defaults to 30 seconds.
Args:
local_init_op: An `Operation` run immediately after session creation.
Usually used to initialize tables and local variables.
ready_op: An `Operation` to check if the model is initialized.
ready_for_local_init_op: An `Operation` to check if the model is ready
to run local_init_op.
graph: The `Graph` that the model will use.
recovery_wait_secs: Seconds between checks for the model to be ready.
local_init_run_options: RunOptions to be passed to session.run when
executing the local_init_op.
local_init_feed_dict: Optional session feed dictionary to use when running
the local_init_op.
Raises:
ValueError: If ready_for_local_init_op is not None but local_init_op is
None
"""
# Sets default values of arguments.
if graph is None:
graph = ops.get_default_graph()
self._local_init_op = local_init_op
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._graph = graph
self._recovery_wait_secs = recovery_wait_secs
self._target = None
self._local_init_run_options = local_init_run_options
self._local_init_feed_dict = local_init_feed_dict
if ready_for_local_init_op is not None and local_init_op is None:
raise ValueError("If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "
", ready_for_local_init_op [%s]" %
ready_for_local_init_op)
def _restore_checkpoint(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
# This is required to so that we initialize the TPU device before
# restoring from checkpoint since we'll be placing variables on the device
# and TPUInitialize wipes out the memory of the device.
strategy = distribution_strategy_context.get_strategy()
if strategy and hasattr(strategy.extended,
"_experimental_initialize_system"):
strategy.extended._experimental_initialize_system() # pylint: disable=protected-access
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
_restore_checkpoint_and_maybe_run_saved_model_initializers(
sess, saver, checkpoint_filename_with_path)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
_restore_checkpoint_and_maybe_run_saved_model_initializers(
sess, saver, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
def prepare_session(self,
master,
init_op=None,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
init_feed_dict=None,
init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
running the `init_op` and calling `init_fn` if they are provided.
The `local_init_op` is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
`ready_for_local_init_op` passes.
If the model is recovered from a checkpoint it is assumed that all
global variables have been initialized, in particular neither `init_op`
nor `init_fn` will be executed.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
if not is_loaded_from_checkpoint:
if init_op is None and not init_fn and self._local_init_op is None:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn or local_init_op was given")
if init_op is not None:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
local_init_success, msg = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for local_init. "
"Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op),
init_fn,
msg))
is_ready, msg = self._model_ready(sess)
if not is_ready:
raise RuntimeError(
"Init operations did not make model ready. "
"Init op: %s, init fn: %s, local_init_op: %s, error: %s" %
(_maybe_name(init_op), init_fn, self._local_init_op, msg))
return sess
def recover_session(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, recovering if possible.
Creates a new session on 'master'. If the session is not initialized
and can be recovered from a checkpoint, recover it.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, initialized) where 'initialized' is `True` if
the session could be recovered and initialized, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
# Always try to run local_init_op
local_init_success, msg = self._try_run_local_init_op(sess)
if not is_loaded_from_checkpoint:
# Do not need to run checks for readiness
return sess, False
restoring_file = checkpoint_dir or checkpoint_filename_with_path
if not local_init_success:
logging.info(
"Restoring model from %s did not make model ready for local init:"
" %s", restoring_file, msg)
return sess, False
is_ready, msg = self._model_ready(sess)
if not is_ready:
logging.info("Restoring model from %s did not make model ready: %s",
restoring_file, msg)
return sess, False
logging.info("Restored model from %s", restoring_file)
return sess, is_loaded_from_checkpoint
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
NB: The amount of time this method waits for the session is bounded
by max_wait_secs. By default, this function will wait indefinitely.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
A `Session`. May be None if the operation exceeds the timeout
specified by config.operation_timeout_in_ms.
Raises:
tf.DeadlineExceededError: if the session is not available after
max_wait_secs.
"""
self._target = master
if max_wait_secs is None:
max_wait_secs = float("Inf")
timer = _CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
not_ready_msg = None
not_ready_local_msg = None
local_init_success, not_ready_local_msg = self._try_run_local_init_op(
sess)
if local_init_success:
# Successful if local_init_op is None, or ready_for_local_init_op passes
is_ready, not_ready_msg = self._model_ready(sess)
if is_ready:
return sess
self._safe_close(sess)
# Do we have enough time left to try again?
remaining_ms_after_wait = (
timer.secs_remaining() - self._recovery_wait_secs)
if remaining_ms_after_wait < 0:
raise errors.DeadlineExceededError(
None, None,
"Session was not ready after waiting %d secs." % (max_wait_secs,))
logging.info("Waiting for model to be ready. "
"Ready_for_local_init_op: %s, ready: %s",
not_ready_local_msg, not_ready_msg)
time.sleep(self._recovery_wait_secs)
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
def _model_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
return _ready(self._ready_op, sess, "Model not ready")
def _model_ready_for_local_init(self, sess):
"""Checks if the model is ready to run local_init_op.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready to run
local_init_op and False otherwise, and msg is `None` if the model is
ready to run local_init_op, a `String` with the reason why it is not ready
otherwise.
"""
return _ready(self._ready_for_local_init_op, sess,
"Model not ready for local init")
def _try_run_local_init_op(self, sess):
"""Tries to run _local_init_op, if not None, and is ready for local init.
Args:
sess: A `Session`.
Returns:
A tuple (is_successful, msg), where is_successful is True if
_local_init_op is None, or we ran _local_init_op, and False otherwise;
and msg is a `String` with the reason why the model was not ready to run
local init.
"""
if self._local_init_op is not None:
is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
logging.info("Running local_init_op.")
sess.run(self._local_init_op, feed_dict=self._local_init_feed_dict,
options=self._local_init_run_options)
logging.info("Done running local_init_op.")
return True, None
else:
return False, msg
return True, None
def _ready(op, sess, msg):
"""Checks if the model is ready or not, as determined by op.
Args:
op: An op, either _ready_op or _ready_for_local_init_op, which defines the
readiness of the model.
sess: A `Session`.
msg: A message to log to warning if not ready
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
if op is None:
return True, None
else:
try:
ready_value = sess.run(op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if (ready_value is None or ready_value.dtype == np.int32 or
ready_value.size == 0):
return True, None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join(
[i.decode("utf-8") for i in ready_value])
return False, "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("%s : error [%s]", msg, str(e))
raise e
return False, str(e)
class _CountDownTimer(object):
__slots__ = ["_start_time_secs", "_duration_secs"]
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
|
{
"content_hash": "6768cf03595b70877fc9b7e6bd31fc6e",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 93,
"avg_line_length": 39.56468531468531,
"alnum_prop": 0.6578586894083337,
"repo_name": "Intel-Corporation/tensorflow",
"id": "5cc36141f3bc65d9b84aa9b876a441e29db3c3d2",
"size": "23320",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/session_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
"""
Factory functions to prepare useful data.
"""
import pytz
import pandas as pd
import numpy as np
from datetime import timedelta
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource)
from zipline.finance.trading import (
SimulationParameters, TradingEnvironment, noop_load
)
from zipline.sources.test_source import create_trade
from zipline.data.loader import ( # For backwards compatibility
load_from_yahoo,
load_bars_from_yahoo,
)
__all__ = ['load_from_yahoo', 'load_bars_from_yahoo']
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None,
data_frequency='daily',
emission_rate='daily',
env=None):
if env is None:
# Construct a complete environment with reasonable defaults
env = TradingEnvironment(load=noop_load)
if start is None:
start = pd.Timestamp("{0}-01-01".format(year), tz='UTC')
if end is None:
if num_days:
start_index = env.trading_days.searchsorted(start)
end = env.trading_days[start_index + num_days - 1]
else:
end = pd.Timestamp("{0}-12-31".format(year), tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
env=env,
)
return sim_params
def get_next_trading_dt(current, interval, env):
next_dt = pd.Timestamp(current).tz_convert(env.exchange_tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=env.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if env.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(env.exchange_tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params, env,
source_id="test_factory"):
trades = []
current = sim_params.first_open
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval, env)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
def create_txn(sid, price, amount, datetime):
txn = Event({
'sid': sid,
'amount': amount,
'dt': datetime,
'price': price,
'type': DATASOURCE_TYPE.TRANSACTION,
'source_id': 'MockTransactionSource'
})
return txn
def create_commission(sid, value, datetime):
txn = Event({
'dt': datetime,
'type': DATASOURCE_TYPE.COMMISSION,
'cost': value,
'sid': sid,
'source_id': 'MockCommissionSource'
})
return txn
def create_txn_history(sid, priceList, amtList, interval, sim_params, env):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
current = get_next_trading_dt(current, interval, env)
txns.append(create_txn(sid, price, amount, current))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.trading_days,
data=np.random.rand(len(sim_params.trading_days)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.trading_days[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_minutely_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and every minute
thereafter for each sid. Thus, two sids should result in two trades per
minute.
"""
return create_trade_source(
sids,
timedelta(minutes=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_trade_source(sids, trade_time_increment, sim_params, env,
concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if env.is_market_hours(sim_params.period_end):
end = sim_params.period_end
# Otherwise, the last_close after the period_end is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent,
'env': env,
}
source = SpecificEquityTrades(*args, **kwargs)
return source
def create_test_df_source(sim_params=None, env=None, bars='daily'):
if bars == 'daily':
freq = pd.datetools.BDay()
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params and bars == 'daily':
index = sim_params.trading_days
else:
if env is None:
env = TradingEnvironment(load=noop_load)
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
days = env.days_in_range(start, end)
if bars == 'daily':
index = days
if bars == 'minute':
index = pd.DatetimeIndex([], freq=freq)
for day in days:
day_index = env.market_minutes_for_day(day)
index = index.append(day_index)
x = np.arange(1, len(index) + 1)
df = pd.DataFrame(x, index=index, columns=[0])
return DataFrameSource(df), df
def create_test_panel_source(sim_params=None, env=None, source_type=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if env is None:
env = TradingEnvironment(load=noop_load)
index = env.days_in_range(start, end)
price = np.arange(0, len(index))
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'volume': volume,
'arbitrary': arbitrary},
index=index)
if source_type:
df['type'] = source_type
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
def create_test_panel_ohlc_source(sim_params, env):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
index = env.days_in_range(start, end)
price = np.arange(0, len(index)) + 100
high = price * 1.05
low = price * 0.95
open_ = price + .1 * (price % 2 - .5)
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'high': high,
'low': low,
'open': open_,
'volume': volume,
'arbitrary': arbitrary},
index=index)
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
|
{
"content_hash": "2bacbbb4b337a5fbe3b6b87df4d621a0",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 79,
"avg_line_length": 29.972477064220183,
"alnum_prop": 0.5855524946434038,
"repo_name": "umuzungu/zipline",
"id": "9adb4dcb5e3b697151ab21e6aabd68436d9507c7",
"size": "10385",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zipline/utils/factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "168399"
},
{
"name": "Python",
"bytes": "1778507"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
}
|
"""
...
"""
import argparse
import sys
import os
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otTables as ot
from fontTools.otlLib.builder import buildStatTable, _addName
from fontTools.varLib.instancer import (
instantiateVariableFont,
sanityCheckVariableTables
)
def split_slnt(ttfont, out_dir):
"""Use varlib instance to split a variable font if it contains a
slnt or ital axis."""
sanityCheckVariableTables(ttfont)
axes = {a.axisTag: a for a in ttfont['fvar'].axes}
ital_angle = axes['ital'].maxValue
roman = instantiateVariableFont(ttfont, {"ital": 0})
italic = instantiateVariableFont(ttfont, {"ital": ital_angle})
_update_bits(italic)
_update_nametable(italic)
_update_fvar(roman)
_update_fvar(italic)
_update_roman_stat(roman)
_update_italic_stat(italic)
roman_filename = os.path.join(
out_dir,
vf_filename(roman)
)
roman.save(roman_filename)
italic_filename = os.path.join(
out_dir,
vf_filename(italic)
)
italic.save(italic_filename)
def _update_fvar(ttfont):
fvar = ttfont['fvar']
nametable = ttfont['name']
family_name = nametable.getName(16, 3, 1, 1033) or nametable.getName(1, 3, 1, 1033)
family_name = family_name.toUnicode()
font_style = "Italic" if "Italic" in nametable.getName(2, 3, 1, 1033).toUnicode() else "Roman"
ps_family_name = f"{family_name.replace(' ', '')}{font_style}"
nametable.setName(ps_family_name, 25, 3, 1, 1033)
for instance in fvar.instances:
instance_style = nametable.getName(instance.subfamilyNameID, 3, 1, 1033).toUnicode()
instance_style = instance_style.replace("Italic", "").strip().replace(" ", "")
if instance_style == "":
instance_style = "Regular"
ps_name = f"{ps_family_name}-{instance_style}"
instance.postscriptNameID = _addName(nametable, ps_name, 256)
def _update_roman_stat(ttfont):
stat = ttfont['STAT'].table
record = ot.AxisValue()
record.AxisIndex = 2
record.Flags = 2
record.ValueNameID = 296 # Roman
record.LinkedValue = 1
record.Value = 0
record.Format = 3
stat.AxisValueArray.AxisValue[-1] = record
def _update_italic_stat(ttfont):
stat = ttfont['STAT'].table
record = ot.AxisValue()
record.AxisIndex = 2
record.Flags = 0
record.ValueNameID = 258 # Italic
record.Value = 1.0
record.Format = 1
stat.AxisValueArray.AxisValue[-1] = record
def vf_filename(ttfont):
axes = sorted([a.axisTag for a in ttfont['fvar'].axes])
axes = ",".join(axes)
family_name = ttfont['name'].getName(1, 3, 1, 1033)
name = family_name.toUnicode()
if "Italic" in ttfont['name'].getName(2, 3, 1, 1033).toUnicode():
return f"{name}-Italic[{axes}].ttf"
return f"{name}[{axes}].ttf"
def _update_bits(ttfont):
"""Update bits for instantiated italic font"""
# OS/2: disable Regular bit and enable italic bit
ttfont['OS/2'].fsSelection ^= (1 << 6) | 1
# head: enable italic bit
ttfont["head"].macStyle |= (1 << 1)
ttfont["post"].italicAngle = -12
ttfont["hhea"].caretSlopeRun = 435
ttfont["hhea"].caretSlopeRise = 2048
def _update_nametable(ttfont):
nametable = ttfont['name']
dflt_axes_loc = {a.axisTag: a.defaultValue for a in ttfont['fvar'].axes}
dflt_nameid = None
for instance in ttfont['fvar'].instances:
if instance.coordinates == dflt_axes_loc:
dflt_nameid = instance.subfamilyNameID
if not dflt_nameid:
raise ValueError("Cannot name font. Default axis locations are not represented by an instance.")
dflt_name = nametable.getName(dflt_nameid, 3, 1, 1033).toUnicode()
familyname = nametable.getName(1, 3, 1, 1033).toUnicode()
# Update subfamily name
nametable.setName(dflt_name, 2, 3, 1, 1033)
# Update full font name and uniqueID
full_font_name = f"{familyname} {dflt_name}"
nametable.setName(full_font_name, 3, 3, 1, 1033)
nametable.setName(full_font_name, 4, 3, 1, 1033)
nametable.setName(full_font_name, 4, 1, 0, 0)
# Postscript name
postscript_name = f"{familyname}-{dflt_name}"
nametable.setName(postscript_name, 6, 3, 1, 1033)
nametable.setName(postscript_name, 6, 1, 0, 0)
def main():
ttfont = TTFont(sys.argv[1])
split_slnt(ttfont, sys.argv[2])
if __name__ == "__main__":
main()
|
{
"content_hash": "4bad9d767f1823a3893344752018f8bb",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 104,
"avg_line_length": 30.517241379310345,
"alnum_prop": 0.6508474576271186,
"repo_name": "googlefonts/roboto-classic",
"id": "536adf08c556da719fa9b4772b8b98cb46ddb5e1",
"size": "4425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/split_slnt_vf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "301640"
},
{
"name": "Shell",
"bytes": "4136"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url('^$','django.views.generic.simple.direct_to_template',{'template':'bueda_flickr_mashup/templates/bueda_flickr_mashup/demo.html'},name='demo'),
url('^run','views.demo',name='run'),
)
|
{
"content_hash": "10d77af0121b595be8745a161d8ebf2b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 150,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.6984732824427481,
"repo_name": "bueda/bueda-flickr-mashup",
"id": "cfb74eedde8b1813d42458b98fd6deec3e4b6acb",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bueda_flickr_mashup/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4371"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
# Adding model 'EventTag'
db.create_table(u'tagstore_eventtag', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()),
('environment_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()),
('group_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()),
('event_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')()),
('key', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['tagstore.TagKey'], db_column='key')),
('value', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['tagstore.TagValue'], db_column='value')),
('date_added', self.gf('django.db.models.fields.DateTimeField')(
db_index=True)),
))
db.send_create_signal('tagstore', ['EventTag'])
# Adding unique constraint on 'EventTag', fields ['event_id', 'key', 'value']
db.create_unique(u'tagstore_eventtag', ['event_id', 'key', 'value'])
# Adding index on 'EventTag', fields ['project_id', 'key', 'value']
db.create_index(u'tagstore_eventtag', ['project_id', 'key', 'value'])
# Adding index on 'EventTag', fields ['group_id', 'key', 'value']
db.create_index(u'tagstore_eventtag', ['group_id', 'key', 'value'])
# Adding index on 'EventTag', fields ['environment_id', 'key', 'value']
db.create_index(u'tagstore_eventtag', ['environment_id', 'key', 'value'])
# Adding model 'GroupTagKey'
db.create_table(u'tagstore_grouptagkey', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('group_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('environment_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True)),
('_key', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['tagstore.TagKey'], db_column='key')),
('values_seen', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
))
db.send_create_signal('tagstore', ['GroupTagKey'])
# Adding unique constraint on 'GroupTagKey', fields ['project_id',
# 'group_id', 'environment_id', '_key']
db.create_unique(
u'tagstore_grouptagkey', [
'project_id', 'group_id', 'environment_id', 'key'])
# Adding model 'GroupTagValue'
db.create_table(u'tagstore_grouptagvalue', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('group_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('environment_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True)),
('times_seen', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
('_key', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['tagstore.TagKey'], db_column='key')),
('_value', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['tagstore.TagValue'], db_column='value')),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(
null=True, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(
null=True, db_index=True)),
))
db.send_create_signal('tagstore', ['GroupTagValue'])
# Adding unique constraint on 'GroupTagValue', fields ['project_id',
# 'group_id', 'environment_id', '_key', '_value']
db.create_unique(
u'tagstore_grouptagvalue', [
'project_id', 'group_id', 'environment_id', 'key', 'value'])
# Adding index on 'GroupTagValue', fields ['project_id', '_key', '_value', 'last_seen']
db.create_index(u'tagstore_grouptagvalue', ['project_id', 'key', 'value', 'last_seen'])
# Adding model 'TagKey'
db.create_table(u'tagstore_tagkey', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('environment_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=32)),
('values_seen', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
('status', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
))
db.send_create_signal('tagstore', ['TagKey'])
# Adding unique constraint on 'TagKey', fields ['project_id', 'environment_id', 'key']
db.create_unique(u'tagstore_tagkey', ['project_id', 'environment_id', 'key'])
# Adding model 'TagValue'
db.create_table(u'tagstore_tagvalue', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('environment_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True)),
('_key', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['tagstore.TagKey'], db_column='key')),
('value', self.gf('django.db.models.fields.CharField')(max_length=200)),
('data', self.gf('sentry.db.models.fields.gzippeddict.GzippedDictField')(null=True, blank=True)),
('times_seen', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(
null=True, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(
null=True, db_index=True)),
))
db.send_create_signal('tagstore', ['TagValue'])
# Adding unique constraint on 'TagValue', fields ['project_id',
# 'environment_id', '_key', 'value']
db.create_unique(u'tagstore_tagvalue', ['project_id', 'environment_id', 'key', 'value'])
# Adding index on 'TagValue', fields ['project_id', '_key', 'last_seen']
db.create_index(u'tagstore_tagvalue', ['project_id', 'key', 'last_seen'])
def backwards(self, orm):
# Removing index on 'TagValue', fields ['project_id', '_key', 'last_seen']
db.delete_index(u'tagstore_tagvalue', ['project_id', 'key', 'last_seen'])
# Removing unique constraint on 'TagValue', fields ['project_id',
# 'environment_id', '_key', 'value']
db.delete_unique(u'tagstore_tagvalue', ['project_id', 'environment_id', 'key', 'value'])
# Removing unique constraint on 'TagKey', fields ['project_id', 'environment_id', 'key']
db.delete_unique(u'tagstore_tagkey', ['project_id', 'environment_id', 'key'])
# Removing index on 'GroupTagValue', fields ['project_id', '_key', '_value', 'last_seen']
db.delete_index(u'tagstore_grouptagvalue', ['project_id', 'key', 'value', 'last_seen'])
# Removing unique constraint on 'GroupTagValue', fields ['project_id',
# 'group_id', 'environment_id', '_key', '_value']
db.delete_unique(
u'tagstore_grouptagvalue', [
'project_id', 'group_id', 'environment_id', 'key', 'value'])
# Removing unique constraint on 'GroupTagKey', fields ['project_id',
# 'group_id', 'environment_id', '_key']
db.delete_unique(
u'tagstore_grouptagkey', [
'project_id', 'group_id', 'environment_id', 'key'])
# Removing index on 'EventTag', fields ['environment_id', 'key', 'value']
db.delete_index(u'tagstore_eventtag', ['environment_id', 'key', 'value'])
# Removing index on 'EventTag', fields ['group_id', 'key', 'value']
db.delete_index(u'tagstore_eventtag', ['group_id', 'key', 'value'])
# Removing index on 'EventTag', fields ['project_id', 'key', 'value']
db.delete_index(u'tagstore_eventtag', ['project_id', 'key', 'value'])
# Removing unique constraint on 'EventTag', fields ['event_id', 'key', 'value']
db.delete_unique(u'tagstore_eventtag', ['event_id', 'key', 'value'])
# Deleting model 'EventTag'
db.delete_table(u'tagstore_eventtag')
# Deleting model 'GroupTagKey'
db.delete_table(u'tagstore_grouptagkey')
# Deleting model 'GroupTagValue'
db.delete_table(u'tagstore_grouptagvalue')
# Deleting model 'TagKey'
db.delete_table(u'tagstore_tagkey')
# Deleting model 'TagValue'
db.delete_table(u'tagstore_tagvalue')
models = {
'tagstore.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key', 'value'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key', 'value'), ('group_id', 'key', 'value'), ('environment_id', 'key', 'value'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'event_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['tagstore.TagKey']", 'db_column': "'key'"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'value': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['tagstore.TagValue']", 'db_column': "'value'"})
},
'tagstore.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'environment_id', '_key'),)", 'object_name': 'GroupTagKey'},
'_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['tagstore.TagKey']", 'db_column': "'key'"}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'tagstore.grouptagvalue': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'environment_id', '_key', '_value'),)", 'object_name': 'GroupTagValue', 'index_together': "(('project_id', '_key', '_value', 'last_seen'),)"},
'_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['tagstore.TagKey']", 'db_column': "'key'"}),
'_value': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['tagstore.TagValue']", 'db_column': "'value'"}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'tagstore.tagkey': {
'Meta': {'unique_together': "(('project_id', 'environment_id', 'key'),)", 'object_name': 'TagKey'},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'tagstore.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'environment_id', '_key', 'value'),)", 'object_name': 'TagValue', 'index_together': "(('project_id', '_key', 'last_seen'),)"},
'_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['tagstore.TagKey']", 'db_column': "'key'"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['tagstore']
|
{
"content_hash": "a60b2379e8bc97c77c56f919dc4a252d",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 222,
"avg_line_length": 66.02136752136752,
"alnum_prop": 0.6083241633762703,
"repo_name": "ifduyue/sentry",
"id": "6315e23a9cfe5c1882afddf6db236289e5bcf590",
"size": "15473",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/tagstore/south_migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
'''
Created on May 17, 2012
@author: h87966
'''
import unittest
from google.appengine.ext import testbed
from unit5.blog_entry import BlogData
from unit5.blog_datastore_factory import BlogDataStoreFactory
from unit5.blog_service import BlogService
class Test(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.factory = BlogDataStoreFactory()
self.storage = self.factory.get_storage()
self.service = BlogService(self.factory)
def tearDown(self):
self.testbed.deactivate()
def test_create_json(self):
subject = 'Test Subject'
content = 'Test Content'
blog = BlogData(subject=subject,content=content)
blog.put()
blog_id = blog.key().id()
# blog_data = self.service.fetch(blog_id)
json_string = self.service.create_json(blog_id)
self.assertTrue(('"subject": "%s"' % subject) in json_string, "Actual json string: " + str(json_string))
self.assertTrue(('"content": "%s"' % content) in json_string, "Actual json string: " + str(json_string))
def test_create_json_with_double_quotes(self):
subject = 'Test"s Subject'
content = 'Test"s Content'
blog = BlogData(subject=subject,content=content)
blog.put()
blog_id = blog.key().id()
# blog_data = self.service.fetch(blog_id)
json_string = self.service.create_json(blog_id)
self.assertTrue(('"subject": "%s"' % subject) in json_string, "Actual json string: " + str(json_string))
self.assertTrue(('"content": "%s"' % content) in json_string, "Actual json string: " + str(json_string))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test']
unittest.main()
|
{
"content_hash": "5398185ab90e828c199ec6f2b4446570",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 112,
"avg_line_length": 35.50877192982456,
"alnum_prop": 0.6403162055335968,
"repo_name": "cdoremus/udacity-python_web_development-cs253",
"id": "22a9a75bf9e5244e21a92a6b1f3ecd6efd53e8bd",
"size": "2024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit5/blog_service_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15273"
},
{
"name": "Python",
"bytes": "233912"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for coll in orm.CollectionRecord.objects.all():
for qdc in orm['dublincore.qualifieddublincoreelement'].objects.filter(object_id=coll.pk):
qdc.object_id = coll.ark
qdc.save()
def backwards(self, orm):
"Write your backwards methods here."
for coll in orm.CollectionRecord.objects.all():
for qdc in orm['dublincore.qualifieddublincoreelement'].objects.filter(object_id=coll.pk):
qdc.object_id = coll.id
qdc.save()
models = {
'dublincore.qualifieddublincoreelement': {
'Meta': {'ordering': "['term']", 'object_name': 'QualifiedDublinCoreElement'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'qualifier': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'collection_record.collectionrecord': {
'Meta': {'unique_together': "(('title_filing', 'publisher'), ('local_identifier', 'publisher'))", 'object_name': 'CollectionRecord'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'accessrestrict': ('django.db.models.fields.TextField', [], {}),
'acqinfo': ('django.db.models.fields.TextField', [], {}),
'ark': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'bioghist': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_dacs': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'date_iso': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'extent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'local_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'online_items_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oac.Institution']"}),
'scopecontent': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'title_filing': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userestrict': ('django.db.models.fields.TextField', [], {})
},
'collection_record.publishinginstitution': {
'Meta': {'ordering': "['name']", 'object_name': 'PublishingInstitution', 'db_table': "'oac_institution'", '_ormbases': ['oac.Institution'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oac.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oac.County']"}),
'custom_zoom_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'oac.county': {
'Meta': {'ordering': "['name']", 'object_name': 'County'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'oac.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'archivegrid_harvest': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'ark': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'cdlpath': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oac.City']"}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oac.County']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_zoom_level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'google_analytics_tracking_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isa_campus': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'mainagency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_ark': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent_institution': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['oac.Institution']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'primary_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for_institution'", 'null': 'True', 'to': "orm['auth.User']"}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'show_subjects': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'worldcat_harvest': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'zip4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['collection_record']
|
{
"content_hash": "cbcc6207002410060ac5f31229aae965",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 188,
"avg_line_length": 79.52054794520548,
"alnum_prop": 0.5509043927648579,
"repo_name": "cdlib/RecordExpress",
"id": "ed7c8296301624d555da615f047d601c43868a16",
"size": "11628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collection_record/migrations/0005_change_qdc_id_to_arks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "26697"
},
{
"name": "HTML",
"bytes": "24386"
},
{
"name": "JavaScript",
"bytes": "11552"
},
{
"name": "Python",
"bytes": "224866"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
import ctypes
import ctypes.wintypes
import datetime
import os
import wx
try:
import winsound
except ImportError:
winsound = None
import ChatKosLookup
DIVIDER = '-' * 40
PLUS_TAG = '[+]'
MINUS_TAG = u'[\u2212]' # Unicode MINUS SIGN
# Cargo-culted from:
# http://stackoverflow.com/questions/3927259/how-do-you-get-the-exact-path-to-my-documents
def GetMyDocumentsDir():
shell32 = ctypes.windll.shell32
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH + 1)
if shell32.SHGetSpecialFolderPathW(None, buf, 0x5, False):
return buf.value
return None
def GetEveLogsDir():
home = GetMyDocumentsDir()
if not home:
return None
if os.path.isdir(os.path.join(home, 'EVE', 'logs', 'Chatlogs')):
return os.path.join(home, 'EVE', 'logs', 'Chatlogs')
if os.path.isdir(os.path.join(home, 'CCP', 'EVE', 'logs', 'Chatlogs')):
return os.path.join(home, 'CCP', 'EVE', 'logs', 'Chatlogs')
return None
class MainFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.UpdateIcon()
self.working_file = self.GetWorkingFile()
if not self.working_file:
self.Close()
return
self.UpdateTitle(self.working_file)
self.checker = ChatKosLookup.KosChecker()
self.tailer = ChatKosLookup.FileTailer(self.working_file)
self.labels = []
self.text_boxes = []
for i in xrange(100):
text = wx.StaticText(self, -1, '', (5, 16 * i + 5))
self.text_boxes.append(text)
self.SetSize((150, 800))
self.SetBackgroundColour('white')
self.Show()
self.KosCheckerPoll()
def UpdateIcon(self):
"""
If running from py2exe, then the icon is implicitly obtained from the .exe
file, but when running from source, this method pulls it in from the
directory containing the python modules.
"""
try:
icon_path = os.path.join(os.path.dirname(__file__), 'icon.ico')
except NameError:
# __file__ does not exist
return
if os.path.exists(icon_path):
self.SetIcon(wx.Icon(icon_path, wx.BITMAP_TYPE_ICO))
def KosCheckerPoll(self):
entry, comment = self.tailer.poll()
if not entry:
wx.FutureCall(1000, self.KosCheckerPoll)
return
kos, not_kos, error = self.checker.koscheck_logentry(entry)
new_labels = []
if comment:
new_labels.append(('black', comment))
if kos or not_kos:
new_labels.append(('black',
'KOS: %d Not KOS: %d' % (len(kos), len(not_kos))))
if kos:
self.PlayKosAlertSound()
new_labels.extend([('red', u'%s %s (%s)' % (MINUS_TAG, p, reason))
for (p, reason) in kos])
if not_kos:
if kos:
new_labels.append(('black', ' '))
new_labels.extend([('blue', '%s %s' % (PLUS_TAG, p)) for p in not_kos])
if error:
new_labels.append(('black', 'Error: %d' % len(error)))
new_labels.extend([('black', p) for p in error])
if new_labels:
new_labels.append(('black', DIVIDER))
self.labels = new_labels + self.labels
self.labels = self.labels[:100]
self.UpdateLabels()
wx.FutureCall(100, self.KosCheckerPoll)
def PlayKosAlertSound(self):
if winsound:
winsound.PlaySound("SystemQuestion", winsound.SND_ALIAS)
def UpdateLabels(self):
for i, (color, label) in enumerate(self.labels):
self.text_boxes[i].SetForegroundColour(color)
self.text_boxes[i].SetLabel(label)
def UpdateTitle(self, working_file):
filename = os.path.basename(working_file)
name = filename.rsplit('_', 2)[0]
name.replace('_', ' ')
self.SetLabel(name)
def GetWorkingFile(self):
today = datetime.date.today().strftime('%Y%m%d')
wildcards = [
'Fleet logs (today)', 'Fleet_%s_*.txt' % today,
'Fleet logs (all)', 'Fleet*.txt',
'All logs (today)', '*_%s_*.txt' % today,
'All logs', '*.txt']
dialog = wx.FileDialog(
self,
'Choose a log file',
GetEveLogsDir(),
style=wx.OPEN,
wildcard='|'.join(wildcards))
result = dialog.ShowModal()
if result != wx.ID_OK:
return None
return dialog.GetPath()
if __name__ == '__main__':
app = wx.App()
frame = MainFrame(None, -1, 'KOS Checker')
app.MainLoop()
|
{
"content_hash": "5d46cef48cb781298fe71d2ef4e29726",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 90,
"avg_line_length": 29.427586206896553,
"alnum_prop": 0.62034216076869,
"repo_name": "Mightymod/nrds-tools-tnnt",
"id": "cd6ad768cde94e70f0d87bc347b3757b7687c9aa",
"size": "4267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "KosLookupExe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12951"
}
],
"symlink_target": ""
}
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_electra"] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_electra"] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_electra"] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
{
"content_hash": "22c5f17be5e481628134a3a4c0a6bf95",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 113,
"avg_line_length": 30.194805194805195,
"alnum_prop": 0.6653763440860215,
"repo_name": "huggingface/transformers",
"id": "59e3ca47794173fd8b8721d57d1e3f60c03cf0b1",
"size": "5428",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/electra/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
}
|
"""
To change the version of entire package, just edit this one location.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
version_info = (0, 0, 6)
__version__ = ".".join(map(str, version_info))
|
{
"content_hash": "e93109ba8b4f4af859a88f931caae8bc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 27.2,
"alnum_prop": 0.6323529411764706,
"repo_name": "cantino/newspaper",
"id": "b452fa57e888f630721b5bc3b5e78f60aa2a2547",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newspaper/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "3336406"
},
{
"name": "Python",
"bytes": "7765307"
},
{
"name": "Shell",
"bytes": "6707"
}
],
"symlink_target": ""
}
|
from pathlib import Path
def all_tracefiles(input_dirs, tracepoint) -> tuple:
for dir in input_dirs:
for router in (dir / 'routers').iterdir():
tracefile = router / 'trace' / tracepoint
yield router.name, tracefile
def extract_messages(tracefile: Path) -> list:
messages = []
try:
with tracefile.open() as f:
for line in f:
time = line.split()[0]
msg = ' '.join(line.split()[1:])
messages.append((time, msg))
except FileNotFoundError:
pass
return messages
|
{
"content_hash": "b4799b050921df60bf6162c25ba448c3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 28,
"alnum_prop": 0.5595238095238095,
"repo_name": "reisub-de/dmpr-simulator",
"id": "57783b1a4d1e6732b14239b8c6f2407974a85073",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dmprsim/analyze/_utils/extract_messages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1513"
},
{
"name": "Python",
"bytes": "74292"
}
],
"symlink_target": ""
}
|
from functools import wraps
from smartbotsol.singleton import Singleton
class Cache(object):
"""Cache interface"""
__metaclass__ = Singleton
STORE = None
def get(self, parameter):
raise NotImplementedError
def add(self, key, value):
raise NotImplementedError
def to_dict(self):
"""Serializer"""
raise NotImplementedError
def from_dict(self, fdict):
"""Deserializer"""
raise NotImplementedError
def _log(func):
import logging
log = logging.getLogger(__name__)
@wraps(func)
def wrap(*args, **kwargs):
result = func(*args, **kwargs)
log.debug('EXTRACT: {}'.format(result))
return result
return wrap
|
{
"content_hash": "0a40c2fe28a98f702f42a7cbbec6d012",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 47,
"avg_line_length": 22.060606060606062,
"alnum_prop": 0.6167582417582418,
"repo_name": "dqunbp/smartbotsol",
"id": "a864aa11dcd8443f75d91447c552c7ff86b5228f",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartbotsol/core/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "28018"
}
],
"symlink_target": ""
}
|
def process_qq_history(path, skip_system=True, encoding="utf-8", strip=None, output_path=None):
"""
Process QQ chat history export text file to sentences.
:param path: Path to QQ history txt file.
:param skip_system: Skip system message if set.
:param encoding: Encoding of the txt file.
:param strip: Chars to be stripped out.
:param output_path: Path to save output.
:return: Processed result path.
"""
import re
# Generate result filename.
if not output_path:
import os
_, filename = os.path.split(path)
result_path = "sentences_in_" + filename
else:
result_path = output_path
# Open files.
with open(path, 'r') as the_file, open(result_path, 'w') as result_file:
# Skip first 7 lines. This will skip until the line before the first system message.
skip = 7
# 0 stands for the empty line before each message.
# 1 stands for the speaker information line.
# 2 stands for the actual message sent.
line_category = 0
# Iterate through the file.
for line in the_file:
# Skip lines.
if skip > 0:
skip -= 1
continue
content = line.decode(encoding=encoding)
if content == u"\r\n":
# Reset line category to 0.
line_category = 0
continue
else:
line_category += 1
# Skip system messages.
if line_category == 1:
if skip_system and "(10000)" in content:
skip += 1
continue
else:
# Strip unnecessary characters.
content = re.sub(r'\[.+\]', '', content).strip(strip)
# Write result if not empty.
if content:
result_file.write(content.encode(encoding=encoding) + "\n")
return result_path
def read_as_set(path, encoding="utf-8", skip=0, skip_prefixes=None, strip=None):
"""
Read a text file and form a set using each line in it.
:param path: Path to the file.
:param encoding: Encoding fo the text.
:param skip: Line count to skip.
:param skip_prefixes: Skip lines with this prefix.
:param strip: Chars to be stripped out.
:return: A set in which is the non-empty lines of the file.
"""
result_set = set()
skips = skip
with open(path, 'r') as the_file:
for line in the_file:
if skips > 0:
skips -= 1
continue
content = line.decode(encoding=encoding).strip(strip)
if not content:
continue
if skip_prefixes:
skip = False
for item in skip_prefixes:
if content.startswith(item.decode(encoding)):
skip = True
if skip:
continue
result_set.add(content)
return result_set
def cut_words_in(path, encoding="utf-8", skip_prefixes=None, strip=None, output_path=None, cleanup=False):
"""
Cut each line in the file into words and stores it in the same directory with a "cut_" prefix in file name.
:param path: Path to the file to cut.
:param encoding: Encoding fo the file.
:param skip_prefixes: Lines start with this prefix will be skipped.
:param strip: Chars to be stripped out.
:param output_path: Path to save output.
:param cleanup: Delete meaningless words, like "这个", if true.
:return: Path to the result file.
"""
if not output_path:
import os
_, filename = os.path.split(path)
result_path = "words_in_" + filename
else:
result_path = output_path
with open(result_path, 'w') as result_file:
for words in CutDocument(path,
cut=False,
skip_prefixes=skip_prefixes,
strip=strip,
cleanup=cleanup,
encoding=encoding):
result_line = " ".join(words)
result_file.write(result_line.encode(encoding=encoding) + "\n")
return result_path
def cut_words(line, cleanup=False):
"""
Cut a sentence in unicode.
:param line: Unicode sentence.
:param cleanup: Delete meaningless words, like "这个", if true.
:return: A list of words.
"""
import jieba
from unicodedata import category
# Delete punctuation words.
content = ''.join(ch for ch in line if category(ch)[0] != 'P')
if cleanup:
import jieba.posseg as pseg
words = []
# POS tagging.
terms = pseg.cut(content)
for term, tag in terms:
if (
tag.startswith(u"c") or tag.startswith(u"e") or
tag.startswith(u"r") or tag.startswith(u"p") or
tag.startswith(u"u") or tag.startswith(u"w") or
tag.startswith(u"y") or tag.startswith(u"v") or
tag.startswith(u"m") or tag.startswith(u"q") or
tag.startswith(u"d")
):
continue
words.append(term)
return words
else:
# Word segmentation.
terms = jieba.cut(content)
return map(unicode, terms)
class CutDocument(object):
"""
Iterate though document, generates a list of cut words per-line.
"""
def __init__(self, document, cut=True, encoding="utf-8",
skip_prefixes=None, strip=None, cleanup=False, min_length=1):
"""
Constructor.
:param document: Path to document that contains one sentences per-line, or a list of sentences.
:param cut: Is the document already cut.
:param encoding: Encoding of the document.
:param skip_prefixes: Lines start with this prefix will be skipped.
:param strip: Chars to be stripped out.
:param cleanup: Delete meaningless words, like "这个", if true.
"""
self.document = document
self.cut = cut
self.encoding = encoding
self.skip_prefixes = skip_prefixes
self.strip = strip
self.cleanup = cleanup
self.min_length = min_length
def __iter__(self):
if isinstance(self.document, list):
the_document = self.document
else:
the_document = open(self.document, 'r')
with the_document:
for line in the_document:
if self.cut:
yield line.decode(encoding=self.encoding).split(" ")
else:
# Decode and strip.
content = line.decode(encoding=self.encoding).strip(self.strip)
# Skip empty lines and lines with skip prefix.
if not content:
continue
if self.skip_prefixes:
skip = False
for item in self.skip_prefixes:
if content.startswith(item.decode(self.encoding)):
skip = True
if skip:
continue
if len(content) < self.min_length:
continue
cut = cut_words(content, cleanup=self.cleanup)
yield cut
|
{
"content_hash": "0cdd2623573f541becf57dc2fecebf23",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 111,
"avg_line_length": 33.410714285714285,
"alnum_prop": 0.5376803848209514,
"repo_name": "dickrd/cla_tool",
"id": "a388dcea02e644660272923ca94e03c70454069d",
"size": "7511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla/util/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33337"
}
],
"symlink_target": ""
}
|
from airflow.contrib.hooks.aws_hook import AwsHook
class RedshiftHook(AwsHook):
"""
Interact with AWS Redshift, using the boto3 library
"""
def get_conn(self):
return self.get_client_type('redshift')
# TODO: Wrap create_cluster_snapshot
def cluster_status(self, cluster_identifier):
"""
Return status of a cluster
:param cluster_identifier: unique identifier of a cluster whose properties you are requesting
:type cluster_identifier: str
"""
# Use describe clusters
response = self.get_conn().describe_clusters(ClusterIdentifier=cluster_identifier)
# Possibly return error if cluster does not exist
return response['Clusters'][0]['ClusterStatus'] if response['Clusters'] else None
def delete_cluster(self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster whose properties you are requesting
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines if a final cluster snapshot is made before shut-down
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
"""
response = self.get_conn().delete_cluster(
ClusterIdentifier = cluster_identifier,
SkipFinalClusterSnapshot = skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier = final_cluster_snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None
def describe_cluster_snapshots(self, cluster_identifier):
"""
Gets a list of snapshots for a cluster
:param cluster_identifier: unique identifier of a cluster whose properties you are requesting
:type cluster_identifier: str
"""
response = self.get_conn().describe_cluster_snapshots(
ClusterIdentifier = cluster_identifier
)
if 'Snapshots' not in response:
return None
snapshots = response['Snapshots']
snapshots = filter(lambda x: x['Status'], snapshots)
snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True)
return snapshots
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier):
"""
Restores a cluster from it's snapshot
:param cluster_identifier: unique identifier of a cluster whose properties you are requesting
:type cluster_identifier: str
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
"""
response = self.get_conn().restore_from_cluster_snapshot(
ClusterIdentifier = cluster_identifier,
SnapshotIdentifier = snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier):
"""
Creates a snapshot of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
:param cluster_identifier: unique identifier of a cluster whose properties you are requesting
:type cluster_identifier: str
"""
response = self.get_conn().create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
)
return response['Snapshot'] if response['Snapshot'] else None
|
{
"content_hash": "6ff1c71b4310099c92028447d222fe47",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 121,
"avg_line_length": 43.44186046511628,
"alnum_prop": 0.673982869379015,
"repo_name": "jfantom/incubator-airflow",
"id": "071caf2610c01199589fca851d3a03cc18ae5c6a",
"size": "4303",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/redshift_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152247"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2665909"
},
{
"name": "Shell",
"bytes": "28054"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
class Constraint(object):
def __call__(self, p):
return p
class MaxNorm(Constraint):
def __init__(self, m=2):
self.m = m
def __call__(self, p):
norms = T.sqrt(T.sum(T.sqr(p), axis=0))
desired = T.clip(norms, 0, self.m)
p = p * (desired / (1e-7 + norms))
return p
class NonNeg(Constraint):
def __call__(self, p):
p *= T.ge(p, 0)
return p
class UnitNorm(Constraint):
def __call__(self, p):
return p / T.sqrt(T.sum(p**2, axis=-1, keepdims=True))
identity = Constraint
maxnorm = MaxNorm
nonneg = NonNeg
unitnorm = UnitNorm
|
{
"content_hash": "84d08b21a28d86eb520abd8e34e0b9de",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 62,
"avg_line_length": 22.34375,
"alnum_prop": 0.5846153846153846,
"repo_name": "kfoss/keras",
"id": "a1164126ff79060cacda13c5a21e4f66ef5d09d9",
"size": "715",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keras/constraints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "250877"
}
],
"symlink_target": ""
}
|
import socket, select
# from SocketServer import (TCPServer as TCP,
# StreamRequestHandler as SRH)
from time import ctime
import os
HOST = ''
PORT = 21567
BUFSIZ = 1024
ADDR = (HOST, PORT)
tcpSerSock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5)
CLIENTS = [tcpSerSock]
data = ''
while True:
try:
# print 'waiting for connection...'
read_sockets,write_sockets,error_sockets = \
select.select(CLIENTS,[],[])
for sock in read_sockets:
if sock == tcpSerSock:
tcpClliSock, addr = tcpSerSock.accept()
print '...connected from:', addr
CLIENTS.append(tcpClliSock)
else:
data = sock.recv(BUFSIZ)
for each in CLIENTS:
if each not in (tcpSerSock, sock):
each.send(data)
# tcpClliSock.close()
except KeyboardInterrupt:
tcpSerSock.close()
break
# tcpServ = TCP(ADDR, MyRequestHandler)
# print 'waiting for people joining...'
# tcpServ.serve_forever()
|
{
"content_hash": "dc3ac2bca6ecd79aed19a0a1a580c0af",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 55,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.5662959794696322,
"repo_name": "WillSkywalker/core-python-programming",
"id": "2ee311ce05707a97ca25deb8f2f04301df9f1380",
"size": "1252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_16/chatroom_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "104665"
},
{
"name": "Python",
"bytes": "40522"
}
],
"symlink_target": ""
}
|
"""
AncestryDNA genotyping data extraction.
Copyright (C) 2016 PersonalGenomes.org
This software is shared under the "MIT License" license (aka "Expat License"),
see LICENSE.TXT for full license text.
"""
import bz2
from cStringIO import StringIO
from datetime import date, datetime
import logging
import os
import re
import shutil
import urlparse
import arrow
import bcrypt
from base_source import BaseSource
from data_retrieval.sort_vcf import sort_vcf
logger = logging.getLogger(__name__)
REF_ANCESTRYDNA_FILE = os.path.join(
os.path.dirname(__file__), 'reference_b37.txt')
# Was used to generate reference genotypes in the previous file.
REFERENCE_GENOME_URL = ('http://hgdownload-test.cse.ucsc.edu/' +
'goldenPath/hg19/bigZips/hg19.2bit')
VCF_FIELDS = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER',
'INFO', 'FORMAT', 'ANCESTRYDNA_DATA']
HEADER_V1 = [
"#Below is a text version of your DNA file from Ancestry.com DNA, LLC. THIS \r\n",
"#INFORMATION IS FOR YOUR PERSONAL USE AND IS INTENDED FOR GENEALOGICAL RESEARCH \r\n",
"#ONLY. IT IS NOT INTENDED FOR MEDICAL OR HEALTH PURPOSES. THE EXPORTED DATA IS \r\n",
"#SUBJECT TO THE AncestryDNA TERMS AND CONDITIONS, BUT PLEASE BE AWARE THAT THE \r\n",
"#DOWNLOADED DATA WILL NO LONGER BE PROTECTED BY OUR SECURITY MEASURES.\r\n"
"#\r\n",
"#Genetic data is provided below as five TAB delimited columns. Each line \r\n",
"#corresponds to a SNP. Column one provides the SNP identifier (rsID where \r\n",
"#possible). Columns two and three contain the chromosome and basepair position \r\n",
"#of the SNP using human reference build 37.1 coordinates. Columns four and five \r\n",
"#contain the two alleles observed at this SNP (genotype). The genotype is reported \r\n",
"#on the forward (+) strand with respect to the human reference.\r\n",
]
HEADER_V2 = [
"#Below is a text version of your DNA file from Ancestry.com DNA, LLC. THIS \r\n",
"#INFORMATION IS FOR YOUR PERSONAL USE AND IS INTENDED FOR GENEALOGICAL RESEARCH \r\n",
"#ONLY. IT IS NOT INTENDED FOR MEDICAL OR HEALTH PURPOSES. THE EXPORTED DATA IS \r\n",
"#SUBJECT TO THE AncestryDNA TERMS AND CONDITIONS, BUT PLEASE BE AWARE THAT THE \r\n",
"#DOWNLOADED DATA WILL NO LONGER BE PROTECTED BY OUR SECURITY MEASURES.\r\n",
"#WHEN YOU DOWNLOAD YOUR RAW DNA DATA, YOU ASSUME ALL RISK OF STORING, \r\n",
"#SECURING AND PROTECTING YOUR DATA. FOR MORE INFORMATION, SEE ANCESTRYDNA FAQS. \r\n",
"#\r\n",
"#Genetic data is provided below as five TAB delimited columns. Each line \r\n",
"#corresponds to a SNP. Column one provides the SNP identifier (rsID where \r\n",
"#possible). Columns two and three contain the chromosome and basepair position \r\n",
"#of the SNP using human reference build 37.1 coordinates. Columns four and five \r\n",
"#contain the two alleles observed at this SNP (genotype). The genotype is reported \r\n",
"#on the forward (+) strand with respect to the human reference.\r\n",
]
HEADER_V3 = [
"#Below is a text version of your DNA file from Ancestry.com DNA, LLC. THIS \r\n",
"#INFORMATION IS FOR YOUR PERSONAL USE AND IS INTENDED FOR GENEALOGICAL RESEARCH \r\n",
"#ONLY. IT IS NOT INTENDED FOR MEDICAL, DIAGNOSTIC, OR HEALTH PURPOSES. THE EXPORTED DATA IS \r\n",
"#SUBJECT TO THE AncestryDNA TERMS AND CONDITIONS, BUT PLEASE BE AWARE THAT THE \r\n",
"#DOWNLOADED DATA WILL NO LONGER BE PROTECTED BY OUR SECURITY MEASURES.\r\n",
"#WHEN YOU DOWNLOAD YOUR RAW DNA DATA, YOU ASSUME ALL RISK OF STORING, \r\n",
"#SECURING AND PROTECTING YOUR DATA. FOR MORE INFORMATION, SEE ANCESTRYDNA FAQS. \r\n",
"#\r\n",
"#Genetic data is provided below as five TAB delimited columns. Each line \r\n",
"#corresponds to a SNP. Column one provides the SNP identifier (rsID where \r\n",
"#possible). Columns two and three contain the chromosome and basepair position \r\n",
"#of the SNP using human reference build 37.1 coordinates. Columns four and five \r\n",
"#contain the two alleles observed at this SNP (genotype). The genotype is reported \r\n",
"#on the forward (+) strand with respect to the human reference.\r\n",
]
# The only non-commented-out header line. We want to ignore it.
EXPECTED_COLUMNS_HEADER = 'rsid\tchromosome\tposition\tallele1\tallele2\r\n'
CHROM_MAP = {
'1': '1',
'2': '2',
'3': '3',
'4': '4',
'5': '5',
'6': '6',
'7': '7',
'8': '8',
'9': '9',
'10': '10',
'11': '11',
'12': '12',
'13': '13',
'14': '14',
'15': '15',
'16': '16',
'17': '17',
'18': '18',
'19': '19',
'20': '20',
'21': '21',
'22': '22',
'23': 'X',
'24': 'Y',
'25': 'X',
}
def vcf_header(source=None, reference=None, format_info=None):
"""Generate a VCF header."""
header = []
today = date.today()
header.append('##fileformat=VCFv4.1')
header.append('##fileDate=%s%s%s' % (str(today.year),
str(today.month).zfill(2),
str(today.day).zfill(2)))
if source:
header.append('##source=' + source)
if reference:
header.append('##reference=%s' % reference)
for item in format_info:
header.append('##FORMAT=' + item)
header.append('#' + '\t'.join(VCF_FIELDS))
return header
def vcf_from_raw_ancestrydna(raw_ancestrydna, genome_sex):
output = StringIO()
reference = dict()
with open(REF_ANCESTRYDNA_FILE) as f:
for line in f:
data = line.rstrip().split('\t')
if data[0] not in reference:
reference[data[0]] = dict()
reference[data[0]][data[1]] = data[2]
header = vcf_header(
source='open_humans_data_processing.ancestry_dna',
reference=REFERENCE_GENOME_URL,
format_info=['<ID=GT,Number=1,Type=String,Description="Genotype">']
)
for line in header:
output.write(line + '\n')
for line in raw_ancestrydna:
# Skip header
if line.startswith('#'):
continue
if line == EXPECTED_COLUMNS_HEADER:
continue
data = line.rstrip().split('\t')
# Skip uncalled and genotyping without explicit base calls
if not re.match(r'^[ACGT]$', data[3]):
continue
if not re.match(r'^[ACGT]$', data[4]):
continue
vcf_data = {x: '.' for x in VCF_FIELDS}
# Chromosome. Determine correct reporting according to genome_sex.
try:
vcf_data['REF'] = reference[data[1]][data[2]]
except KeyError:
continue
vcf_data['CHROM'] = CHROM_MAP[data[1]]
if data[1] == '24' and genome_sex == 'Female':
continue
if data[1] in ['23', '24'] and genome_sex == 'Male':
alleles = data[3]
else:
alleles = data[3] + data[4]
# Position, dbSNP ID, reference. Skip if we don't have ref.
vcf_data['POS'] = data[2]
if data[0].startswith('rs'):
vcf_data['ID'] = data[0]
# Figure out the alternate alleles.
alt_alleles = []
for alle in alleles:
if alle != vcf_data['REF'] and alle not in alt_alleles:
alt_alleles.append(alle)
if alt_alleles:
vcf_data['ALT'] = ','.join(alt_alleles)
else:
vcf_data['ALT'] = '.'
vcf_data['INFO'] = 'END=' + vcf_data['POS']
# Get allele-indexed genotype.
vcf_data['FORMAT'] = 'GT'
all_alleles = [vcf_data['REF']] + alt_alleles
genotype_indexed = '/'.join([str(all_alleles.index(x))
for x in alleles])
vcf_data['ANCESTRYDNA_DATA'] = genotype_indexed
output_line = '\t'.join([vcf_data[x] for x in VCF_FIELDS])
output.write(output_line + '\n')
return output
class AncestryDNASource(BaseSource):
"""
Create clean file in AncestryDNA format from downloaded version
Obsessively careful processing to minimize risk that AncestryDNA file
format changes inadvertantly result in unexpected leaks, e.g. names.
"""
source = 'ancestry_dna'
def check_header_lines(self, input_lines, header_lines, header_name):
if not len(input_lines) == len(header_lines):
if header_name:
logger.debug("Header line count != {}".format(header_name))
return False
matched_lines = [
header_lines[i] == input_lines[i] for i in range(len(header_lines))]
if header_name:
logger.debug("Header line matching for {}: {}".format(
header_name, matched_lines))
return all(matched_lines)
def clean_raw_ancestrydna(self):
"""
Create clean file in AncestryDNA format from downloaded version
Obsessively careful processing that ensures AncestryDNA file format changes
won't inadvertantly result in unexpected information, e.g. names.
"""
inputfile = self.open_archive()
output = StringIO()
header_l1 = inputfile.next()
expected_header_l1 = '#AncestryDNA raw data download\r\n'
if header_l1 == expected_header_l1:
output.write(header_l1)
dateline = inputfile.next()
re_datetime_string = (r'([0-1][0-9]/[0-3][0-9]/20[1-9][0-9] ' +
r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9]) MDT')
if re.search(re_datetime_string, dateline):
datetime_string = re.search(re_datetime_string, dateline).groups()[0]
datetime_ancestrydna = datetime.strptime(datetime_string,
'%m/%d/%Y %H:%M:%S')
output.write(
'#This file was generated by AncestryDNA at: {}\r\n'.format(
datetime_ancestrydna.strftime('%a %b %d %H:%M:%S %Y MDT')))
re_array_version = (
r'#Data was collected using AncestryDNA array version: V\d\.\d\r\n')
header_array_version = inputfile.next()
if re.match(re_array_version, header_array_version):
output.write(header_array_version)
re_converter_version = (
r'#Data is formatted using AncestryDNA converter version: V\d\.\d\r\n')
header_converter_version = inputfile.next()
if re.match(re_converter_version, header_converter_version):
output.write(header_converter_version)
next_line = inputfile.next()
header_p_lines = []
while next_line.startswith('#'):
header_p_lines.append(next_line)
next_line = inputfile.next()
if self.check_header_lines(header_p_lines, HEADER_V1, 'HEADER_V1'):
for line in HEADER_V1:
output.write(line)
elif self.check_header_lines(header_p_lines, HEADER_V2, 'HEADER_V2'):
for line in HEADER_V2:
output.write(line)
elif self.check_header_lines(header_p_lines, HEADER_V3, 'HEADER_V3'):
for line in HEADER_V3:
output.write(line)
else:
self.sentry_log("AncestryDNA header didn't match expected formats")
data_header = next_line
if data_header == EXPECTED_COLUMNS_HEADER:
output.write(EXPECTED_COLUMNS_HEADER)
next_line = inputfile.next()
bad_format = False
# AncestryDNA always reports two alleles for all X and Y positions.
# For XY individuals, haplozygous positions are redundantly reported.
# For XX individuals this means Y positions are "0".
# Note the above two statements are not ALWAYS true! The raw data
# ocassionally reports 'heterozygous' calls for X and Y in XY individuals,
# and Y calls in XX individuals. So our test is forgiving of these.
genome_sex = 'Female'
called_Y = 0
reported_Y = 0
LINE_RE = re.compile(
r'(rs|VGXS)[0-9]+\t[1-9][0-9]?\t[0-9]+\t[ACGTDI0]\t[ACGTDI0]')
REPORTED_Y = re.compile(r'(rs|VGXS)[0-9]+\t24\t[0-9]+\t[ACGTDI0]\t[ACGTDI0]')
CALLED_Y = re.compile(r'(rs|VGXS)[0-9]+\t24\t[0-9]+\t[ACGTDI]\t[ACGTDI]')
while next_line:
if LINE_RE.match(next_line):
if REPORTED_Y.match(next_line):
reported_Y += 1
if CALLED_Y.match(next_line):
called_Y += 1
output.write(next_line)
else:
# Only report this type of format issue once.
if not bad_format:
bad_format = True
self.sentry_log('AncestryDNA body did not conform to expected format.')
logger.warn('Bad format: "%s"', next_line)
try:
next_line = inputfile.next()
except StopIteration:
next_line = None
if called_Y * 1.0 / reported_Y > 0.5:
genome_sex = 'Male'
return output, genome_sex
def should_update(self, files):
"""
Reprocess only if source file has changed.
We store a hash of the original filepath as metadata and check this.
Update is deemed unnecessary if (a) processed files exist, (b) they
have recorded orig_file_hash, (c) we verify these all match a hash of
the source file path for this task (from self.file_url).
"""
if not files:
logger.info(
'Update needed for user "{}", source "{}": no current '
'files available.'.format(self.oh_username, self.source))
return True
for file_data in files:
try:
orig_file_hash = file_data['metadata']['orig_file_hash']
except KeyError:
logger.info(
'Update needed for user "{}", source "{}": no hash stored '
'for original file.'.format(self.oh_username, self.source))
return True
if not self.same_orig_file(orig_file_hash):
logger.info(
'Update needed for user "{}", source "{}": hash mismatch '
'for original file.'.format(self.oh_username, self.source))
return True
logger.info('Update unnecessary for user "{}", source "{}".'.format(
self.oh_username, self.source))
return False
def same_orig_file(self, orig_file_hash):
"""
Check hashed self.file_url path against stored orig_file_hash.
The path in an original source file URL are expected to be unique, as
we store them with a UUID.
"""
if not self.file_url:
return False
url_path = str(urlparse.urlparse(self.file_url).path)
new_hash = bcrypt.hashpw(url_path, str(orig_file_hash))
return orig_file_hash == new_hash
def create_files(self, input_file=None, file_url=None):
"""
Create Open Humans Dataset from uploaded AncestryDNA genotyping data
Optional arguments:
input_file: path to a local copy of the uploaded file
file_url: path to an online copy of the input file
"""
if not self.input_file:
raise Exception('Run with either input_file or file_url')
new_hash = ''
if self.file_url:
orig_path = urlparse.urlparse(self.file_url).path
new_hash = bcrypt.hashpw(str(orig_path), bcrypt.gensalt())
filename_base = 'AncestryDNA-genotyping'
raw_ancestrydna, genome_sex = self.clean_raw_ancestrydna()
raw_ancestrydna.seek(0)
vcf_ancestrydna_unsorted = vcf_from_raw_ancestrydna(
raw_ancestrydna, genome_sex)
# Save raw AncestryDNA genotyping to temp file.
raw_filename = filename_base + '.txt'
with open(self.temp_join(raw_filename), 'w') as raw_file:
raw_ancestrydna.seek(0)
shutil.copyfileobj(raw_ancestrydna, raw_file)
self.temp_files.append({
'temp_filename': raw_filename,
'metadata': {
'description':
'AncestryDNA full genotyping data, original format',
'tags': ['AncestryDNA', 'genotyping'],
'orig_file_hash': new_hash,
'creation_date': arrow.get().format(),
},
})
# Save VCF AncestryDNA genotyping to temp file.
vcf_ancestrydna_unsorted.seek(0)
vcf_ancestrydna_sorted = sort_vcf(vcf_ancestrydna_unsorted)
vcf_filename = filename_base + '.vcf.bz2'
with bz2.BZ2File(self.temp_join(vcf_filename), 'w') as vcf_file:
vcf_ancestrydna_sorted.seek(0)
shutil.copyfileobj(vcf_ancestrydna_sorted, vcf_file)
self.temp_files.append({
'temp_filename': vcf_filename,
'metadata': {
'description': 'AncestryDNA full genotyping data, VCF format',
'tags': ['AncestryDNA', 'genotyping', 'vcf'],
'orig_file_hash': new_hash,
'creation_date': arrow.get().format(),
},
})
|
{
"content_hash": "c5a8f3e5c57d1fe343379cc521821236",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 105,
"avg_line_length": 39.31221719457014,
"alnum_prop": 0.5843116942909761,
"repo_name": "OpenHumans/open-humans-data-processing",
"id": "90b01820bdc7688ce0cfc89e6c35ff28eb359b07",
"size": "17376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sources/ancestry_dna/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "130408"
}
],
"symlink_target": ""
}
|
"""
Tests for attachments Api.
"""
import ddt
import mock
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import attachments as v3_attachments
from cinder import context
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
@ddt.ddt
class AttachmentsAPITestCase(test.TestCase):
"""Test Case for attachment API."""
def setUp(self):
super(AttachmentsAPITestCase, self).setUp()
self.controller = v3_attachments.AttachmentsController()
self.volume_api = volume_api.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.volume1 = self._create_volume(display_name='fake_volume_1',
project_id=fake.PROJECT_ID)
self.volume2 = self._create_volume(display_name='fake_volume_2',
project_id=fake.PROJECT2_ID)
self.attachment1 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID1)
self.attachment2 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID1)
self.attachment3 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID2)
self.attachment4 = self._create_attachment(
volume_uuid=self.volume2.id, instance_uuid=fake.UUID2)
self.addCleanup(self._cleanup)
def _cleanup(self):
self.attachment1.destroy()
self.attachment2.destroy()
self.attachment3.destroy()
self.attachment4.destroy()
self.volume1.destroy()
self.volume2.destroy()
def _create_volume(self, ctxt=None, display_name=None, project_id=None):
"""Create a volume object."""
ctxt = ctxt or self.ctxt
volume = objects.Volume(ctxt)
volume.display_name = display_name
volume.project_id = project_id
volume.status = 'available'
volume.attach_status = 'attached'
volume.create()
return volume
def test_create_attachment(self):
req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
fake.PROJECT_ID,
version=mv.NEW_ATTACH)
body = {
"attachment":
{
"connector": None,
"instance_uuid": fake.UUID1,
"volume_uuid": self.volume1.id
},
}
attachment = self.controller.create(req, body)
self.assertEqual(self.volume1.id,
attachment['attachment']['volume_id'])
self.assertEqual(fake.UUID1,
attachment['attachment']['instance'])
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_update')
def test_update_attachment(self, mock_update):
fake_connector = {'fake_key': 'fake_value'}
mock_update.return_value = fake_connector
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, self.attachment1.id),
version=mv.NEW_ATTACH,
use_admin_context=True)
body = {
"attachment":
{
"connector": {'fake_key': 'fake_value'},
},
}
attachment = self.controller.update(req, self.attachment1.id, body)
self.assertEqual(fake_connector,
attachment['attachment']['connection_info'])
self.assertEqual(fake.UUID1, attachment['attachment']['instance'])
@mock.patch.object(objects.VolumeAttachment, 'get_by_id')
def test_attachment_operations_not_authorized(self, mock_get):
mock_get.return_value = {'project_id': fake.PROJECT2_ID}
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, self.attachment1.id),
version=mv.NEW_ATTACH,
use_admin_context=False)
body = {
"attachment":
{
"connector": {'fake_key': 'fake_value'},
},
}
self.assertRaises(exception.NotAuthorized,
self.controller.update, req,
self.attachment1.id, body)
self.assertRaises(exception.NotAuthorized,
self.controller.delete, req,
self.attachment1.id)
@ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER),
mv.RESOURCE_FILTER, mv.LIKE_FILTER)
@mock.patch('cinder.api.common.reject_invalid_filters')
def test_attachment_list_with_general_filter(self, version, mock_update):
url = '/v3/%s/attachments' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=version,
use_admin_context=False)
self.controller.index(req)
if version != mv.get_prior_version(mv.RESOURCE_FILTER):
support_like = True if version == mv.LIKE_FILTER else False
mock_update.assert_called_once_with(req.environ['cinder.context'],
mock.ANY, 'attachment',
support_like)
@ddt.data('reserved', 'attached')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_delete')
def test_delete_attachment(self, status, mock_delete):
volume1 = self._create_volume(display_name='fake_volume_1',
project_id=fake.PROJECT_ID)
attachment = self._create_attachment(
volume_uuid=volume1.id, instance_uuid=fake.UUID1,
attach_status=status)
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, attachment.id),
version=mv.NEW_ATTACH,
use_admin_context=True)
self.controller.delete(req, attachment.id)
volume2 = objects.Volume.get_by_id(self.ctxt, volume1.id)
if status == 'reserved':
self.assertEqual('detached', volume2.attach_status)
self.assertRaises(
exception.VolumeAttachmentNotFound,
objects.VolumeAttachment.get_by_id, self.ctxt, attachment.id)
else:
self.assertEqual('attached', volume2.attach_status)
mock_delete.assert_called_once_with(req.environ['cinder.context'],
attachment.id, mock.ANY)
def _create_attachment(self, ctxt=None, volume_uuid=None,
instance_uuid=None, mountpoint=None,
attach_time=None, detach_time=None,
attach_status=None, attach_mode=None):
"""Create an attachment object."""
ctxt = ctxt or self.ctxt
attachment = objects.VolumeAttachment(ctxt)
attachment.volume_id = volume_uuid
attachment.instance_uuid = instance_uuid
attachment.mountpoint = mountpoint
attachment.attach_time = attach_time
attachment.detach_time = detach_time
attachment.attach_status = attach_status or 'reserved'
attachment.attach_mode = attach_mode
attachment.create()
return attachment
@ddt.data("instance_uuid", "volume_uuid")
def test_create_attachment_without_resource_uuid(self, resource_uuid):
req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
fake.PROJECT_ID,
version=mv.NEW_ATTACH)
body = {
"attachment":
{
"connector": None
}
}
body["attachment"][resource_uuid] = "test_id"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@ddt.data(False, True)
def test_list_attachments(self, is_detail):
url = '/v3/%s/attachments' % fake.PROJECT_ID
list_func = self.controller.index
if is_detail:
url = '/v3/%s/groups/detail' % fake.PROJECT_ID
list_func = self.controller.detail
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = list_func(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['attachments']))
self.assertEqual(self.attachment3.id,
res_dict['attachments'][0]['id'])
def test_list_attachments_with_limit(self):
url = '/v3/%s/attachments?limit=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(1, len(res_dict['attachments']))
def test_list_attachments_with_marker(self):
url = '/v3/%s/attachments?marker=%s' % (fake.PROJECT_ID,
self.attachment3.id)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(2, len(res_dict['attachments']))
self.assertEqual(self.attachment2.id,
res_dict['attachments'][0]['id'])
@ddt.data("desc", "asc")
def test_list_attachments_with_sort(self, sort_dir):
url = '/v3/%s/attachments?sort_key=id&sort_dir=%s' % (fake.PROJECT_ID,
sort_dir)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['attachments']))
order_ids = sorted([self.attachment1.id,
self.attachment2.id,
self.attachment3.id])
expect_result = order_ids[2] if sort_dir == "desc" else order_ids[0]
self.assertEqual(expect_result,
res_dict['attachments'][0]['id'])
@ddt.data({'admin': True, 'request_url': '?all_tenants=1', 'count': 4},
{'admin': False, 'request_url': '?all_tenants=1', 'count': 3},
{'admin': True, 'request_url':
'?all_tenants=1&project_id=%s' % fake.PROJECT2_ID,
'count': 1},
{'admin': False, 'request_url': '', 'count': 3},
{'admin': False, 'request_url': '?instance_id=%s' % fake.UUID1,
'count': 2},
{'admin': False, 'request_url': '?instance_id=%s' % fake.UUID2,
'count': 1})
@ddt.unpack
def test_list_attachment_with_tenants(self, admin, request_url, count):
url = '/v3/%s/attachments%s' % (fake.PROJECT_ID, request_url)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=admin)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(count, len(res_dict['attachments']))
|
{
"content_hash": "0fc11e2c975c1d15b27102e8d4e1bd96",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 78,
"avg_line_length": 43.53113553113553,
"alnum_prop": 0.547542914843487,
"repo_name": "eharney/cinder",
"id": "f780f9d5b8f3d8faf8ba55f2afb9171e6dba5470",
"size": "12523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/api/v3/test_attachments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
}
|
"""Unit tests for core.domain.rte_component_registry."""
import inspect
import os
import pkgutil
import re
import string
import struct
from core.domain import obj_services
from core.domain import rte_component_registry
from core.tests import test_utils
import feconf
import schema_utils
import schema_utils_test
import utils
# File names ending in any of these suffixes will be ignored when checking for
# RTE component validity.
IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store']
RTE_THUMBNAIL_HEIGHT_PX = 16
RTE_THUMBNAIL_WIDTH_PX = 16
_COMPONENT_CONFIG_SCHEMA = [
('backend_id', basestring), ('category', basestring),
('description', basestring), ('frontend_id', basestring),
('tooltip', basestring), ('icon_data_url', basestring),
('requires_fs', bool), ('is_block_element', bool),
('customization_arg_specs', list)]
class RteComponentUnitTests(test_utils.GenericTestBase):
"""Tests that all the default RTE components are valid."""
def _is_camel_cased(self, name):
"""Check whether a name is in CamelCase."""
return name and (name[0] in string.ascii_uppercase)
def _is_alphanumeric_string(self, input_string):
"""Check whether a string is alphanumeric."""
return bool(re.compile('^[a-zA-Z0-9_]+$').match(input_string))
def _validate_customization_arg_specs(self, customization_arg_specs):
"""Validates the given customization arg specs."""
for ca_spec in customization_arg_specs:
self.assertEqual(set(ca_spec.keys()), set([
'name', 'description', 'schema', 'default_value']))
self.assertTrue(isinstance(ca_spec['name'], basestring))
self.assertTrue(self._is_alphanumeric_string(ca_spec['name']))
self.assertTrue(isinstance(ca_spec['description'], basestring))
self.assertGreater(len(ca_spec['description']), 0)
# The default value might not pass validation checks (e.g. the
# Image component has a required field whose default value is
# empty). Thus, when checking the default value schema, we don't
# apply the custom validators.
schema_utils_test.validate_schema(ca_spec['schema'])
self.assertEqual(
ca_spec['default_value'],
schema_utils.normalize_against_schema(
ca_spec['default_value'], ca_spec['schema'],
apply_custom_validators=False))
if ca_spec['schema']['type'] == 'custom':
# Default value of SanitizedUrl obj_type may be empty. The empty
# string is not considered valid for this object, so we don't
# attempt to normalize it.
if ca_spec['schema']['obj_type'] == 'SanitizedUrl':
self.assertEqual(ca_spec['default_value'], '')
else:
obj_class = obj_services.Registry.get_object_class_by_type(
ca_spec['schema']['obj_type'])
self.assertEqual(
ca_spec['default_value'],
obj_class.normalize(ca_spec['default_value']))
def _listdir_omit_ignored(self, directory):
"""List all files and directories within 'directory', omitting the ones
whose name ends in one of the IGNORED_FILE_SUFFIXES.
"""
names = os.listdir(directory)
for suffix in IGNORED_FILE_SUFFIXES:
names = [name for name in names if not name.endswith(suffix)]
return names
def test_image_thumbnails_for_rte_components(self):
"""Test the thumbnails for the RTE component icons."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_name, component_specs) in rte_components.iteritems():
generated_image_filepath = os.path.join(
os.getcwd(), feconf.RTE_EXTENSIONS_DIR,
component_name, '%s.png' % component_name)
relative_icon_data_url = component_specs['icon_data_url'][1:]
defined_image_filepath = os.path.join(
os.getcwd(), feconf.EXTENSIONS_DIR_PREFIX,
'extensions', relative_icon_data_url)
self.assertEqual(generated_image_filepath, defined_image_filepath)
with open(generated_image_filepath, 'rb') as f:
img_data = f.read()
width, height = struct.unpack('>LL', img_data[16:24])
self.assertEqual(int(width), RTE_THUMBNAIL_WIDTH_PX)
self.assertEqual(int(height), RTE_THUMBNAIL_HEIGHT_PX)
def test_rte_components_are_valid(self):
"""Test that the default RTE components are valid."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_id, component_specs) in rte_components.iteritems():
# Check that the component id is valid.
self.assertTrue(self._is_camel_cased(component_id))
# Check that the component directory exists.
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
self.assertTrue(os.path.isdir(component_dir))
# In this directory there should be a /directives directory, an
# an icon .png file and a protractor.js file, and an optional
# preview .png file.
# In /directives directory should be HTML file, a JS file,
# there could be multiple JS and HTML files.
dir_contents = self._listdir_omit_ignored(component_dir)
self.assertLessEqual(len(dir_contents), 4)
directives_dir = os.path.join(component_dir, 'directives')
png_file = os.path.join(component_dir, '%s.png' % component_id)
protractor_file = os.path.join(component_dir, 'protractor.js')
self.assertTrue(os.path.isdir(directives_dir))
self.assertTrue(os.path.isfile(png_file))
self.assertTrue(os.path.isfile(protractor_file))
main_ts_file = os.path.join(
directives_dir, 'OppiaNoninteractive%sDirective.ts'
% component_id)
main_html_file = os.path.join(
directives_dir, '%s_directive.html' % component_id.lower())
self.assertTrue(os.path.isfile(main_ts_file))
self.assertTrue(os.path.isfile(main_html_file))
ts_file_content = utils.get_file_contents(main_ts_file)
self.assertIn(
'oppiaNoninteractive%s' % component_id, ts_file_content)
self.assertNotIn('<script>', ts_file_content)
self.assertNotIn('</script>', ts_file_content)
# Check that the configuration file contains the correct
# top-level keys, and that these keys have the correct types.
for item, item_type in _COMPONENT_CONFIG_SCHEMA:
self.assertTrue(isinstance(
component_specs[item], item_type))
# The string attributes should be non-empty.
if item_type == basestring:
self.assertTrue(component_specs[item])
self._validate_customization_arg_specs(
component_specs['customization_arg_specs']) # pylint: disable=protected-access
def test_require_file_contains_all_imports(self):
"""Test that the rich_text_components.html file contains script-imports
for all directives of all RTE components.
"""
rtc_ts_filenames = []
for component_id in feconf.ALLOWED_RTE_EXTENSIONS:
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
directives_dir = os.path.join(component_dir, 'directives')
directive_filenames = os.listdir(directives_dir)
rtc_ts_filenames.extend(
filename for filename
in directive_filenames if filename.endswith('.ts'))
rtc_ts_file = os.path.join(
feconf.RTE_EXTENSIONS_DIR, 'richTextComponentsRequires.ts')
with open(rtc_ts_file, 'r') as f:
rtc_require_file_contents = f.read()
for rtc_ts_filename in rtc_ts_filenames:
self.assertIn(rtc_ts_filename, rtc_require_file_contents)
class RteComponentRegistryUnitTests(test_utils.GenericTestBase):
"""Tests the methods in RteComponentRegistry."""
def test_get_all_rte_components(self):
"""Test get_all_rte_components method."""
obtained_components = (
rte_component_registry.Registry.get_all_rte_components().keys())
actual_components = [name for name in os.listdir(
'./extensions/rich_text_components') if os.path.isdir(os.path.join(
'./extensions/rich_text_components', name))]
self.assertEqual(set(obtained_components), set(actual_components))
def test_get_tag_list_with_attrs(self):
"""Test get_tag_list_with_attrs method."""
obtained_tag_list_with_attrs = (
rte_component_registry.Registry.get_tag_list_with_attrs())
actual_tag_list_with_attrs = {}
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
for component_spec in component_specs.values():
tag_name = 'oppia-noninteractive-%s' % component_spec['frontend_id']
attr_names = [
'%s-with-value' % attr['name'] for attr in component_spec[
'customization_arg_specs']]
actual_tag_list_with_attrs[tag_name] = attr_names
self.assertEqual(
set(obtained_tag_list_with_attrs.keys()),
set(actual_tag_list_with_attrs.keys()))
for key in obtained_tag_list_with_attrs:
self.assertEqual(
set(obtained_tag_list_with_attrs[key]),
set(actual_tag_list_with_attrs[key]))
def test_get_component_types_to_component_classes(self):
"""Test get_component_types_to_component_classes method."""
component_types_to_component_classes = rte_component_registry.Registry.get_component_types_to_component_classes() # pylint: disable=line-too-long
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_component_tags = component_types_to_component_classes.keys()
actual_component_tags = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()]
self.assertEqual(
set(obtained_component_tags), set(actual_component_tags))
obtained_component_class_names = [
component_class.__name__
for component_class in component_types_to_component_classes.values()
]
actual_component_class_names = []
rte_path = [feconf.RTE_EXTENSIONS_DIR]
for loader, name, _ in pkgutil.iter_modules(path=rte_path):
if name == 'components':
module = loader.find_module(name).load_module(name)
break
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and name != 'BaseRteComponent':
actual_component_class_names.append(name)
self.assertEqual(
set(obtained_component_class_names),
set(actual_component_class_names))
def test_get_component_tag_names(self):
"""Test get_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
keys = ['is_block_element', 'is_complex']
expected_values = [True, False]
for key in keys:
for expected_value in expected_values:
actual_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if component_spec[key] == expected_value]
obtained_component_tag_names = (
rte_component_registry.Registry.get_component_tag_names(
key, expected_value))
self.assertEqual(
set(actual_component_tag_names),
set(obtained_component_tag_names))
def test_get_inline_component_tag_names(self):
"""Test get_inline_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_inline_component_tag_names = (
rte_component_registry.Registry.get_inline_component_tag_names())
actual_inline_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if not component_spec['is_block_element']]
self.assertEqual(
set(actual_inline_component_tag_names),
set(obtained_inline_component_tag_names))
def test_get_block_component_tag_names(self):
"""Test get_block_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_block_component_tag_names = (
rte_component_registry.Registry.get_block_component_tag_names())
actual_block_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if component_spec['is_block_element']]
self.assertEqual(
set(actual_block_component_tag_names),
set(obtained_block_component_tag_names))
def test_get_simple_component_tag_names(self):
"""Test get_simple_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_simple_component_tag_names = (
rte_component_registry.Registry.get_simple_component_tag_names())
actual_simple_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if not component_spec['is_complex']]
self.assertEqual(
set(actual_simple_component_tag_names),
set(obtained_simple_component_tag_names))
def test_get_complex_component_tag_names(self):
"""Test get_complex_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_complex_component_tag_names = (
rte_component_registry.Registry.get_complex_component_tag_names())
actual_complex_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if component_spec['is_complex']]
self.assertEqual(
set(actual_complex_component_tag_names),
set(obtained_complex_component_tag_names))
|
{
"content_hash": "639d7fe5e08f1317273ad8ad55845b06",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 153,
"avg_line_length": 44.666666666666664,
"alnum_prop": 0.6134592524105138,
"repo_name": "souravbadami/oppia",
"id": "e2b54a9c7bc94e09fbf2560b353c90fcfebb363f",
"size": "15765",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/rte_component_registry_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90864"
},
{
"name": "HTML",
"bytes": "1044569"
},
{
"name": "JavaScript",
"bytes": "606331"
},
{
"name": "Python",
"bytes": "7870122"
},
{
"name": "Shell",
"bytes": "54930"
},
{
"name": "TypeScript",
"bytes": "4922933"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__author__ = "{{ cookiecutter.full_name }}"
__version__ = "{{ cookiecutter.version }}"
__license__ = "MIT"
__email__ = "{{ cookiecutter.email }}"
__uri__ = "https://{{ cookiecutter.project_name }}.readthedocs.org"
__description__ = "{{ cookiecutter.short_desc }}"
|
{
"content_hash": "83bd3041e92ef668197f2f517a8a3c66",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 36.77777777777778,
"alnum_prop": 0.6404833836858006,
"repo_name": "econchick/cookiecutter-roguelynn",
"id": "a6c7ba8d52c05fe70af3769bc9ff4d8c90573dfb",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7887"
}
],
"symlink_target": ""
}
|
import sys
import os
import time
from optparse import OptionParser
try:
import simplejson as json
except ImportError:
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa")))
from oeqa.oetest import runTests
from oeqa.utils.sshcontrol import SSHControl
# this isn't pretty but we need a fake target object
# for running the tests externally as we don't care
# about deploy/start we only care about the connection methods (run, copy)
class FakeTarget(object):
def __init__(self, d):
self.connection = None
self.ip = None
self.server_ip = None
self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
self.testdir = d.getVar("TEST_LOG_DIR", True)
self.pn = d.getVar("PN", True)
def exportStart(self):
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
sshloglink = os.path.join(self.testdir, "ssh_target_log")
if os.path.islink(sshloglink):
os.unlink(sshloglink)
os.symlink(self.sshlog, sshloglink)
print("SSH log file: %s" % self.sshlog)
self.connection = SSHControl(self.ip, logfile=self.sshlog)
def run(self, cmd, timeout=None):
return self.connection.run(cmd, timeout)
def copy_to(self, localpath, remotepath):
return self.connection.copy_to(localpath, remotepath)
def copy_from(self, remotepath, localpath):
return self.connection.copy_from(remotepath, localpath)
class MyDataDict(dict):
def getVar(self, key, unused = None):
return self.get(key, "")
class TestContext(object):
def __init__(self):
self.d = None
self.target = None
def main():
usage = "usage: %prog [options] <json file>"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
overwrite the value determined from TEST_TARGET_IP at build time")
parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
overwrite the value determined from TEST_SERVER_IP at build time.")
parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \
specified in the json if that directory actually exists or it will error out.")
parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
the current dir is used. This is used for usually creating a ssh log file and a scp test file.")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system")
with open(args[0], "r") as f:
loaded = json.load(f)
if options.ip:
loaded["target"]["ip"] = options.ip
if options.server_ip:
loaded["target"]["server_ip"] = options.server_ip
d = MyDataDict()
for key in loaded["d"].keys():
d[key] = loaded["d"][key]
if options.log_dir:
d["TEST_LOG_DIR"] = options.log_dir
else:
d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
if options.deploy_dir:
d["DEPLOY_DIR"] = options.deploy_dir
else:
if not os.path.isdir(d["DEPLOY_DIR"]):
raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"])
target = FakeTarget(d)
for key in loaded["target"].keys():
setattr(target, key, loaded["target"][key])
tc = TestContext()
setattr(tc, "d", d)
setattr(tc, "target", target)
for key in loaded.keys():
if key != "d" and key != "target":
setattr(tc, key, loaded[key])
target.exportStart()
runTests(tc)
return 0
if __name__ == "__main__":
try:
ret = main()
except Exception:
ret = 1
import traceback
traceback.print_exc(5)
sys.exit(ret)
|
{
"content_hash": "143c8540aece639b895fbe0157ad00dc",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 131,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6305288461538462,
"repo_name": "wwright2/dcim3-angstrom1",
"id": "e1b6642ec2a441577ecce62e7ecec03e3e6ad2df",
"size": "4771",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sources/openembedded-core/meta/lib/oeqa/runexported.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "73541"
},
{
"name": "Awk",
"bytes": "286"
},
{
"name": "Batchfile",
"bytes": "19960"
},
{
"name": "BitBake",
"bytes": "2875212"
},
{
"name": "BlitzBasic",
"bytes": "6367"
},
{
"name": "C",
"bytes": "1598095"
},
{
"name": "C++",
"bytes": "2198121"
},
{
"name": "CMake",
"bytes": "7277"
},
{
"name": "CSS",
"bytes": "28636"
},
{
"name": "Groff",
"bytes": "502999"
},
{
"name": "HTML",
"bytes": "210823"
},
{
"name": "JavaScript",
"bytes": "23100"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32539"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "PHP",
"bytes": "829048"
},
{
"name": "Pascal",
"bytes": "17352"
},
{
"name": "Perl",
"bytes": "66339"
},
{
"name": "Python",
"bytes": "3672452"
},
{
"name": "QMake",
"bytes": "165"
},
{
"name": "Ruby",
"bytes": "10695"
},
{
"name": "Shell",
"bytes": "820076"
},
{
"name": "SourcePawn",
"bytes": "259600"
},
{
"name": "Tcl",
"bytes": "4897"
},
{
"name": "VimL",
"bytes": "8483"
},
{
"name": "XSLT",
"bytes": "9089"
}
],
"symlink_target": ""
}
|
number1 = int(input("digite um número que servirá de base:"))
number2 = int(input("digite um número que servirá de expoente: "))
counter = 1
number = number1
while counter < number2:
number = number * number1
counter += 1
print("o número",number1,"elevado ao número",number2,"é:",number)
|
{
"content_hash": "194ff7082c8a2c40015078e2af3727b2",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 29.8,
"alnum_prop": 0.7013422818791947,
"repo_name": "jucimarjr/IPC_2017-1",
"id": "97a20f5f702458d169b677a795840cb85d5630ca",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lista05/lista05_lista01_questao13.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2978"
},
{
"name": "Python",
"bytes": "525677"
}
],
"symlink_target": ""
}
|
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# example summary provider for CFBinaryHeap
# the real summary is now C++ code built into LLDB
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the length for an CFBinaryHeap, so they need not
# obey the interface specification for synthetic children providers
class CFBinaryHeapRef_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# 8 bytes on i386
# 16 bytes on x64
# most probably 2 pointers
def offset(self):
logger = lldb.formatters.Logger.Logger()
return 2 * self.sys_params.pointer_size
def length(self):
logger = lldb.formatters.Logger.Logger()
size = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
return size.GetValueAsUnsigned(0)
class CFBinaryHeapUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def length(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)CFBinaryHeapGetCount(" + stream.GetData() + " )")
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not CFBinaryHeap>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = class_data.class_name()
logger >> "name string got was " + \
str(name_string) + " but actual name is " + str(actual_name)
if class_data.is_cftype():
# CFBinaryHeap does not expose an actual NSWrapper type, so we have to check that this is
# an NSCFType and then check we are a pointer-to CFBinaryHeap
valobj_type = valobj.GetType()
if valobj_type.IsValid() and valobj_type.IsPointerType():
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBinaryHeap':
wrapper = CFBinaryHeapRef_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
return wrapper
wrapper = CFBinaryHeapUnknown_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def CFBinaryHeap_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.length()
except:
summary = None
logger >> "summary got from provider: " + str(summary)
# for some reason, one needs to clear some bits for the count
# to be correct when using CF(Mutable)BagRef on x64
# the bit mask was derived through experimentation
# (if counts start looking weird, then most probably
# the mask needs to be changed)
if summary is None:
summary = '<variable is not CFBinaryHeap>'
elif isinstance(summary, basestring):
pass
else:
if provider.sys_params.is_64_bit:
summary = summary & ~0x1fff000000000000
if summary == 1:
return '@"1 item"'
else:
summary = '@"' + str(summary) + ' items"'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFBinaryHeap.CFBinaryHeap_SummaryProvider CFBinaryHeapRef")
|
{
"content_hash": "91f04c3068cc0809bd465fd5f083055b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 97,
"avg_line_length": 34.86335403726708,
"alnum_prop": 0.6468911455549617,
"repo_name": "youtube/cobalt",
"id": "b22f2c5d4f7c504c929cbfb398ce5fa1295cff12",
"size": "5613",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/lldb/examples/summaries/cocoa/CFBinaryHeap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import hashlib
import logging
import os
import random
import signal
import socket
import string
import subprocess
import sys
import time
from twisted.internet import reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.error import MulticastJoinError
from twisted.web import resource, server, static
def has_option(short, long):
return short in sys.argv[1:] or long in sys.argv[1:]
_DEBUG = has_option('-d', '--debug')
_IP = has_option('-i', '--ip')
_QUIET = has_option('-q', '--quiet')
_LOGGING_FORMAT = '%(message)s'
_LOGGING_FORMAT_DEBUG = '[%(levelname)s] %(module)s.%(funcName)s: %(message)s'\
_ROOT_PKG_CACHE = '/var/cache/pacman/pkg/'
_REANNOUNCE_TIMER = 60 * 3
_TIMEOUT = 1.5 * _REANNOUNCE_TIMER
_TIMEOUT_JOINERROR = 180
_WAITING_TIMER = 1
_MULTICAST_GROUP = '228.0.2.35'
_MULTICAST_PORT = 19432
_MULTICAST_ADDR = (_MULTICAST_GROUP, _MULTICAST_PORT)
_HTTP_FILE_PORT = _MULTICAST_PORT
_HTTP_PACMAN_PORT = _HTTP_FILE_PORT + 1
_ID = hashlib.sha1('{name}_{random_value}'.format(
name = os.uname()[1],
random_value = int(random.random() * 100000000),
)).hexdigest()
_DISCONNECT_MSG = 'flocon: DISCONNECT'
_HAS_MSG = 'flocon: HAS'
_NO_MSG = 'flocon: NO'
_PING_MSG = 'flocon: PING'
_PONG_MSG = 'flocon: PONG'
_YES_MSG = 'flocon: YES'
_SEPARATOR = '-'
_SEPARATOR_F = ' = '
_CLIENTS = dict()
def _list_clients(signum, stack_frame):
c_len = len(_CLIENTS)
logging.info('\nThere %s %s client%s connected.',
'are' if c_len > 1 else 'is', c_len, 's' if c_len > 1 else '')
for client in _CLIENTS.values():
logging.info(' - %s', client.display())
logging.info('')
def _find_fallback_mirror():
with open('/etc/pacman.d/mirrorlist') as f:
for line in f.readlines():
if line.startswith("# flocon: Server"):
try:
return line.split('=')[1].strip()
except IndexError:
return None
return None
_FALLBACK_MIRROR = str(_find_fallback_mirror()) + '/$filename'
_FILE_SERVER = 'http://$ip:$port/$filename'
class Client:
def __init__(self, id, addr):
self.id, self.last, self.addr, self.connected = id, None, addr, True
self.ip, self.port, self.host = addr[0], addr[1], None
self.find_host()
self.update()
def __str__(self):
return self.display(display=False)
def display(self, display=True):
if _DEBUG or display:
return '{{id = {}; host = {}; ip = {}; port = {}}}'.format(
self.id, '[unknown]' if self.host is None else self.host,
self.ip, self.port,
)
elif _IP or self.host is None:
return '{{ip = {}; port = {}}}'.format(self.ip, self.port)
else:
return '{{host = {}; port = {}}}'.format(self.host, self.port)
def update(self):
self.last = time.time()
def is_valid(self):
return self.connected and (time.time() - self.last) < _TIMEOUT
def find_host(self):
kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}
try:
p = subprocess.Popen(['host', self.ip], **kwargs)
except OSError:
return
out, _ = p.communicate()
if p.returncode == 0:
self.host = out.split()[-1][:-1]
_REQUEST = None
class MulticastClientManager(DatagramProtocol):
def __init__(self, *args, **kwargs):
try:
DatagramProtocol.__init__(self, *args, **kwargs)
except AttributeError:
pass
self.__attempts = _TIMEOUT_JOINERROR
def startProtocol(self, signum=None, stack_frame=None):
def multicastError(_):
self.__attempts -= 1
if self.__attempts == 0:
logging.info('Impossible to connect to network. Send me '
'SIGUSR2 when you have some network available.')
signal.signal(signal.SIGUSR2, self.startProtocol)
self.__attempts = _TIMEOUT_JOINERROR
else:
logging.debug('Multicast join failed!')
reactor.callLater(5, self.startProtocol)
def mutlicastJoined(_):
self.announce_presence()
if signum is not None:
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
joiner = self.transport.joinGroup(_MULTICAST_GROUP)
joiner.addCallback(mutlicastJoined)
joiner.addErrback(multicastError)
return joiner
def datagramReceived(self, datagram, addr):
logging.debug('Received a new UDP message from %r', addr)
logging.debug('Message was: %s', datagram)
try:
_id, _msg = datagram.split(_SEPARATOR, 1)
except ValueError:
logging.error('I couldn\'t understand last message.')
return
if _msg == _PING_MSG or _msg == _PONG_MSG:
if _id == _ID:
return
try:
_CLIENTS[_id].update()
except KeyError:
c = Client(_id, addr)
logging.info('Client %s connected!', c)
_CLIENTS[_id] = c
if _msg == _PING_MSG:
self.send_data(_PONG_MSG, addr)
return
# From here, if we don't know the client, we just ignore the message.
try:
client = _CLIENTS[_id]
except KeyError:
logging.error('I don\'t know this client!')
return
if _msg == _DISCONNECT_MSG:
client.connected = False
timeout_clients()
return
# File message.
try:
_msg, _filename = _msg.split(_SEPARATOR_F)
except KeyError:
logging.error('I couldn\'t understand last file message.')
return
if _msg == _HAS_MSG:
self.has_file(client, _filename)
return
if _REQUEST is None or _REQUEST.filename != _filename:
return
if _msg == _YES_MSG and _REQUEST is not None:
_REQUEST.redirect_file_server(_id)
elif _msg == _NO_MSG and _REQUEST is not None:
_REQUEST.client_answered_no()
def send_data(self, msg, addr):
try:
self.transport.write(_SEPARATOR.join([_ID, msg]), addr)
except socket.error:
pass
def send_with_filename(self, msg, filename, addr):
self.send_data(_SEPARATOR_F.join([msg, filename]), addr)
def announce_presence(self):
logging.debug('Sending presence to multicast group.')
self.send_data(_PING_MSG, _MULTICAST_ADDR)
reactor.callLater(_REANNOUNCE_TIMER, self.announce_presence)
def announce_disconnection(self):
self.send_data(_DISCONNECT_MSG, _MULTICAST_ADDR)
def ask_file(self, filename):
for _, client in _CLIENTS.iteritems():
logging.debug('Sending request for file to %s.', client)
self.send_with_filename(_HAS_MSG, filename, client.addr)
return len(_CLIENTS)
def has_file(self, client, filename):
packages = []
for _, _, files in os.walk(_ROOT_PKG_CACHE):
for _filename in files:
if _filename.endswith('.tar.xz'):
packages.append(_filename)
if filename in packages:
logging.info('%s: %s ? YES', client, filename)
self.send_with_filename(_YES_MSG, filename, client.addr)
else:
logging.info('%s: %s ? NO', client, filename)
self.send_with_filename(_NO_MSG, filename, client.addr)
_MULTICAST_OBJ = MulticastClientManager()
class Request:
def __init__(self, request):
self.request = request
_, self.repo, _, self.arch, self.filename = self.request.uri.split('/')
def init_response(self):
if not _CLIENTS:
self.redirect_fallback_mirror()
else:
self.clients = _MULTICAST_OBJ.ask_file(self.filename)
reactor.callLater(1, self.redirect_fallback_mirror)
def __str__(self):
return self.request.uri
def redirect_file_server(self, id):
global _REQUEST
if _REQUEST is None or _REQUEST.filename != self.filename:
return
client = _CLIENTS[id]
logging.info('%s: Redirecting to client %s', self.filename, client)
url = string.Template(_FILE_SERVER).safe_substitute({
'ip': client.ip, 'port': client.port, 'filename': self.filename,
})
logging.debug('Redirect URL: %s', url)
self.request.redirect(url)
self.request.finish()
_REQUEST = None
def redirect_fallback_mirror(self):
global _REQUEST
if _REQUEST is None or _REQUEST.filename != self.filename:
return
if _FALLBACK_MIRROR.startswith('None'):
# No fallback mirror is set in configuration, so we just return an
# error.
logging.info('%s: No fallback mirror: 404 Not Found.', self.filename)
self.request.setResponseCode(404)
self.request.finish()
else:
logging.info('%s: Redirecting to fallback mirror.', self.filename)
url = string.Template(_FALLBACK_MIRROR).safe_substitute({
'repo': self.repo, 'arch': self.arch, 'filename': self.filename,
})
logging.debug('Redirect URL: %s', url)
self.request.redirect(url)
self.request.finish()
_REQUEST = None
def client_answered_no(self):
self.clients -= 1
logging.debug('One of the clients answered no. Still %d clients '
'remaining.', self.clients)
if self.clients == 0:
self.redirect_fallback_mirror()
class LocalHttpServer(resource.Resource):
isLeaf = True
def render_GET(self, request):
global _REQUEST
logging.debug('Recieved GET request for %s', request.uri)
_REQUEST = Request(request)
_REQUEST.init_response()
return server.NOT_DONE_YET
def timeout_clients():
for _id, _client in _CLIENTS.items():
if _client.is_valid():
continue
logging.info('Client %s disconnected!', _client)
del _CLIENTS[_id]
reactor.callLater(_REANNOUNCE_TIMER, timeout_clients)
def disconnect_multicast():
# Send disconnect message to everyone.
_MULTICAST_OBJ.announce_disconnection()
def main():
if _DEBUG:
logging.basicConfig(level=logging.DEBUG, format=_LOGGING_FORMAT_DEBUG)
elif _QUIET:
logging.basicConfig(level=logging.CRITICAL)
else:
logging.basicConfig(level=logging.INFO, format=_LOGGING_FORMAT)
# Hook on SIGUSR1
signal.signal(signal.SIGUSR1, _list_clients)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
# Displaying some information about us.
logging.info('Id: %s', _ID)
logging.info('Multicast group: %s', _MULTICAST_GROUP)
logging.info('Multicast port: %s', _MULTICAST_PORT)
logging.info('Fallback mirror: %s', _FALLBACK_MIRROR)
logging.info('')
# Multicast server/client
reactor.listenMulticast(_MULTICAST_PORT, _MULTICAST_OBJ)
# HTTP Server for pacman.
reactor.listenTCP(_HTTP_PACMAN_PORT, server.Site(LocalHttpServer()))
# HTTP Server for files.
root = static.File(_ROOT_PKG_CACHE)
reactor.listenTCP(_HTTP_FILE_PORT, server.Site(root))
# Timeout clients when not reannouncing.
timeout_clients()
# End of the program.
reactor.addSystemEventTrigger('before', 'shutdown', disconnect_multicast)
reactor.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "cf6d1f5c959f44074746a9a738d12470",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 81,
"avg_line_length": 32.76619718309859,
"alnum_prop": 0.5880330123796423,
"repo_name": "fmichea/flocon",
"id": "78eabc4ec3eaef2690a2b2c748fda39ca635f7a8",
"size": "11762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flocon/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12103"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
# from flask.ext.script import Manager, Shell, Server
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from flask.ext.mail import Mail, Message
from config import basedir
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
# manager = Manager(app)
mail = Mail(app)
# def make_shell_context():
# return dict(app=app, db=db, School=School, Student=Student,
# Project=Project)
# manager.add_command("shell", Shell(make_context=make_shell_context))
# manager.add_command('db', MigrateCommand)
from app import views, models
|
{
"content_hash": "7c6d697575a2982b3fc9b142d450341e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 27.06896551724138,
"alnum_prop": 0.729936305732484,
"repo_name": "gretahuang/calhacks2015",
"id": "ae2cb0bc3f28415dbd6443ff00f1d80fbea0f18f",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "938"
},
{
"name": "HTML",
"bytes": "1408"
},
{
"name": "Python",
"bytes": "14792"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from TCA.administration.models import Grade
from TCA.utils.models.mixins import TimeStamped
class Stream(TimeStamped):
grade = models.OneToOneField(
Grade,
db_index=True
)
url = models.CharField(
'URL',
max_length=1024)
class Meta:
ordering = ['grade']
def __unicode__(self):
return 'Stream: %s' % unicode(self.grade)
|
{
"content_hash": "1eec697bc81ed53a35a4482bb4911c2d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 20.954545454545453,
"alnum_prop": 0.6399132321041214,
"repo_name": "JosmanPS/tsebaoth-christian-academy",
"id": "34f8529e9da600dffd68aebd038d426a79c643d8",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TCA/stream/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155074"
},
{
"name": "HTML",
"bytes": "51352"
},
{
"name": "JavaScript",
"bytes": "302885"
},
{
"name": "Python",
"bytes": "88031"
}
],
"symlink_target": ""
}
|
import os
import sqlite3
import csv
def build(table_name, csv_file):
print 'Creating for %s.' % table_name
# Database Connect
con = sqlite3.connect(DATABASE_FILE)
con.text_factory = str
# Load File
data = []
with open(csv_file,'r') as f:
next(f) # skip headings
reader=csv.reader(f,delimiter='\t')
for row in reader:
data.append(row)
# Create Table
con.execute('CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, new_character TEXT, old_character TEXT, total_strokes INTEGER, grade TEXT, add_year INTEGER, reading TEXT);' % table_name)
# Insert Data
for row in data:
con.execute('INSERT INTO %s (new_character, old_character, total_strokes, grade, add_year, reading) VALUES (?, ?, ?, ?, ?, ?)' % table_name, (row[0],row[1],row[2],row[3],row[4],row[5]))
# Results
c = con.cursor()
c.execute('SELECT count(*) FROM %s' % table_name)
print 'Counts: %d' % c.fetchone()[0]
c = con.cursor()
c.execute('SELECT * FROM %s ORDER BY id DESC LIMIT 1' % table_name)
print 'Last Result:'
print 'ID:%d %s %s %s %s %s %s' % c.fetchone()
# Commit
con.commit()
# Database Close
con.close()
BASE_DIR = './'
DATABASE_FILE = BASE_DIR + 'kanji_reading.db'
INIT_DATABASE = True
# Remove if exists
if os.path.exists(DATABASE_FILE) and INIT_DATABASE:
os.remove(DATABASE_FILE)
build('joyo2010', BASE_DIR + 'joyo2010.txt')
print 'Done.'
|
{
"content_hash": "4f3b5d4934a68090d6a662045f9e7781",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 208,
"avg_line_length": 28.50943396226415,
"alnum_prop": 0.6108537392455328,
"repo_name": "Atrac613/KanjiReading-iOS",
"id": "66853438c27fd2bb8c50f6a03c90c349d85d79b4",
"size": "1536",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "KanjiReading/Resources/build_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10667"
},
{
"name": "C++",
"bytes": "70109"
},
{
"name": "Objective-C",
"bytes": "113782"
},
{
"name": "Python",
"bytes": "1536"
},
{
"name": "Ruby",
"bytes": "33"
},
{
"name": "Shell",
"bytes": "2006"
}
],
"symlink_target": ""
}
|
"""Tests for TFGAN's head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import head
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import training
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def dummy_loss(gan_model, add_summaries=True): # pylint:disable=unused-argument
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=None,
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
class GANHeadTest(test.TestCase):
def setUp(self):
super(GANHeadTest, self).setUp()
self.gan_head = head.gan_head(
generator_loss_fn=dummy_loss,
discriminator_loss_fn=dummy_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
get_eval_metric_ops_fn=self.get_metrics)
self.assertTrue(isinstance(self.gan_head, head.GANHead))
def get_metrics(self, gan_model):
self.assertTrue(isinstance(gan_model, tfgan_tuples.GANModel))
return {}
def _test_modes_helper(self, mode):
return self.gan_head.create_estimator_spec(
features=None,
mode=mode,
logits=get_gan_model())
def test_modes_predict(self):
spec = self._test_modes_helper(model_fn_lib.ModeKeys.PREDICT)
self.assertItemsEqual((_DEFAULT_SERVING_KEY, 'predict'),
spec.export_outputs.keys())
def test_modes_eval(self):
self._test_modes_helper(model_fn_lib.ModeKeys.EVAL)
def test_modes_train(self):
self._test_modes_helper(model_fn_lib.ModeKeys.TRAIN)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "73f52e0985746f726e816a383f503ee9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 36.32098765432099,
"alnum_prop": 0.7127804214819851,
"repo_name": "drpngx/tensorflow",
"id": "5309d87765694fa476dae006105e842420a7c437",
"size": "3631",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/gan/python/estimator/python/head_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "304178"
},
{
"name": "C++",
"bytes": "43473091"
},
{
"name": "CMake",
"bytes": "202538"
},
{
"name": "Go",
"bytes": "1148824"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "755551"
},
{
"name": "Jupyter Notebook",
"bytes": "2211560"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48603"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "36815599"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "428510"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
from rest_framework import routers, serializers, viewsets
# Lazy
from assays.models import(
AssayStudy,
AssayDataPoint,
AssayMatrixItem,
AssayGroup,
AssayGroupCompound,
AssayGroupCell,
AssayGroupSetting,
AssayStudyAssay,
AssaySampleLocation,
AssayTarget,
AssayMethod,
PhysicalUnits
)
from django.contrib.auth.models import Group
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import ListSerializer
from rest_framework.exceptions import ValidationError
from rest_framework.fields import SkipField
from rest_framework.settings import api_settings
# Via https://github.com/claytondaley/drf-keyed-list
class KeyedListSerializer(ListSerializer):
default_error_messages = {
'not_a_dict': _('Expected a dict of items but got type "{input_type}".'),
'empty': _('This dict may not be empty.')
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
meta = getattr(self.child, 'Meta', None)
assert hasattr(meta, 'keyed_list_serializer_field'), \
"Must provide a field name at keyed_list_serializer_field when using KeyedListSerializer"
self._keyed_field = meta.keyed_list_serializer_field
def to_internal_value(self, data):
if not isinstance(data, dict):
message = self.error_messages['not_a_dict'].format(
input_type=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='not_a_dict')
if not self.allow_empty and len(data) == 0:
if self.parent and self.partial:
raise SkipField()
message = self.error_messages['empty']
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='empty')
data = [{**v, **{self._keyed_field: k}} for k, v in data.items()]
return super().to_internal_value(data)
def to_representation(self, data):
response = super().to_representation(data)
return {v.pop(self._keyed_field): v for v in response}
# Maybe we ought to have serializers in a different file?
class AssayDataPointSerializer(serializers.ModelSerializer):
# An exception is thrown when repeating the source... for some reason?
# If it defaults to the attribute name, why complain about passing source extraneously?
sample_location = serializers.StringRelatedField()
# Alias
assay_id = serializers.StringRelatedField(source='study_assay_id', read_only=True)
item_id = serializers.StringRelatedField(source='matrix_item_id', read_only=True)
time = serializers.StringRelatedField(source='get_time_string')
time_in_minutes = serializers.IntegerField(source='time')
class Meta:
model = AssayDataPoint
fields = [
'item_id',
'assay_id',
'sample_location',
'value',
'cross_reference',
'time',
'time_in_minutes',
'notes',
'assay_plate_id',
'assay_well_id',
'replicate',
'excluded',
'replaced',
'update_number',
]
# Where things get a bit messy
# Unfortunately will need quite a few serializers
class AssayGroupCompoundSerializer(serializers.ModelSerializer):
compound = serializers.StringRelatedField(source='compound_instance.compound')
supplier = serializers.StringRelatedField(source='compound_instance.supplier')
lot = serializers.StringRelatedField(source='compound_instance.lot')
receipt_date = serializers.StringRelatedField(source='compound_instance.receipt_date')
concentration_unit = serializers.StringRelatedField()
addition_location = serializers.StringRelatedField()
addition_time = serializers.StringRelatedField(source='get_addition_time_string')
duration = serializers.StringRelatedField(source='get_duration_string')
addition_time_in_minutes = serializers.IntegerField(source='addition_time')
duration_in_minutes = serializers.IntegerField(source='duration')
class Meta:
model = AssayGroupCompound
fields = [
'compound',
'supplier',
'lot',
'receipt_date',
'concentration',
'concentration_unit',
'addition_time',
'addition_time_in_minutes',
'duration',
'duration_in_minutes',
'addition_location',
]
class AssayGroupCellSerializer(serializers.ModelSerializer):
# Unless we, you know, want this as another layer
cell_sample = serializers.StringRelatedField()
biosensor = serializers.StringRelatedField()
density_unit = serializers.StringRelatedField()
addition_location = serializers.StringRelatedField()
addition_time = serializers.StringRelatedField(source='get_addition_time_string')
addition_time_in_minutes = serializers.IntegerField(source='addition_time')
class Meta:
model = AssayGroupCell
fields = [
'cell_sample',
'biosensor',
'density',
'density_unit',
'passage',
'addition_time',
'addition_time_in_minutes',
'addition_location',
]
class AssayGroupSettingSerializer(serializers.ModelSerializer):
setting = serializers.StringRelatedField()
unit = serializers.StringRelatedField()
addition_location = serializers.StringRelatedField()
addition_time = serializers.StringRelatedField(source='get_addition_time_string')
duration = serializers.StringRelatedField(source='get_duration_string')
addition_time_in_minutes = serializers.IntegerField(source='addition_time')
duration_in_minutes = serializers.IntegerField(source='duration')
class Meta:
model = AssayGroupSetting
fields = [
'setting',
'value',
'unit',
'addition_time',
'addition_time_in_minutes',
'duration',
'duration_in_minutes',
'addition_location',
]
class AssayGroupSerializer(serializers.ModelSerializer):
# Aliases
mps_model = serializers.StringRelatedField(source='organ_model')
mps_model_version = serializers.StringRelatedField(source='organ_model_protocol')
# Backwards relations
compounds = AssayGroupCompoundSerializer(source='assaygroupcompound_set', read_only=True, many=True)
cells = AssayGroupCellSerializer(source='assaygroupcell_set', read_only=True, many=True)
settings = AssayGroupSettingSerializer(source='assaygroupsetting_set', read_only=True, many=True)
class Meta:
model = AssayGroup
fields = [
# Need the id for matching
# It will be part of the representation
'id',
'name',
'mps_model',
'mps_model_version',
'compounds',
'cells',
'settings',
]
list_serializer_class = KeyedListSerializer
keyed_list_serializer_field = 'id'
class AssayStudyAssaySerializer(serializers.ModelSerializer):
target = serializers.StringRelatedField()
method = serializers.StringRelatedField()
unit = serializers.StringRelatedField()
class Meta:
model = AssayStudyAssay
fields = [
# Need the id for matching
'id',
'target',
'method',
'unit',
]
list_serializer_class = KeyedListSerializer
keyed_list_serializer_field = 'id'
class AssayMatrixItemSerializer(serializers.ModelSerializer):
# Force string ID
group_id = serializers.StringRelatedField(read_only=True)
class Meta:
model = AssayMatrixItem
fields = [
# Need the id for matching
'id',
'group_id',
'name',
# Setup date generally is the same as study's...
# Error on the side of more?
'scientist',
'notebook',
'notebook_page',
'notes',
]
list_serializer_class = KeyedListSerializer
keyed_list_serializer_field = 'id'
# Want a fast url
class AssayStudySerializer(serializers.HyperlinkedModelSerializer):
# Force string ID
id = serializers.StringRelatedField(read_only=True)
study_types = serializers.StringRelatedField(source='get_study_types_string')
data_group = serializers.StringRelatedField(source='group')
class Meta:
model = AssayStudy
fields = [
'id',
'url',
'name',
'data_group',
'study_types',
'start_date',
'description',
]
class AssayStudyDataSerializer(serializers.ModelSerializer):
# Force string ID
id = serializers.StringRelatedField(read_only=True)
study_types = serializers.StringRelatedField(source='get_study_types_string')
data_group = serializers.StringRelatedField(source='group')
data = AssayDataPointSerializer(source='assaydatapoint_set', read_only=True, many=True)
groups = AssayGroupSerializer(source='assaygroup_set', read_only=True, many=True)
assays = AssayStudyAssaySerializer(source='assaystudyassay_set', read_only=True, many=True)
# Are we sticking with this nomenclature?
items = AssayMatrixItemSerializer(source='assaymatrixitem_set', read_only=True, many=True)
class Meta:
model = AssayStudy
depth = 1
fields = [
# Redundant, but for clarity
'id',
# Study fields
# Stringification gives study types etc.
# Do we want the stringification or split up?
# '__str__',
'name',
'data_group',
'study_types',
'start_date',
'description',
'groups',
'items',
'assays',
'data',
]
# ViewSets define the view behavior.
# Mixin for differentiating list and detail
class DetailSerializerMixin(object):
def get_serializer_class(self):
if self.action == 'retrieve':
if hasattr(self, 'detail_serializer_class'):
return self.detail_serializer_class
return super(DetailSerializerMixin, self).get_serializer_class()
# Maybe we ought to have ViewSets in a different file?
class AssayStudyViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
# Contrived
queryset = AssayStudy.objects.filter(
restricted=False,
).exclude(
signed_off_by__isnull=True,
)
http_method_names = ['get']
# WE CANNOT USE THE SAME QUERYSET FOR BOTH
# We also need to use different serializers
def list(self, request):
queryset = AssayStudy.objects.filter(
restricted=False,
).exclude(
signed_off_by__isnull=True,
)
# NOTICE CONTEXT FOR HOST
serializer = AssayStudySerializer(
queryset,
many=True,
context={'request': request}
)
return Response(serializer.data)
def retrieve(self, request, pk=None):
# Only public data
queryset = AssayStudy.objects.filter(
restricted=False,
).exclude(
signed_off_by__isnull=True,
).prefetch_related(
# Subject to change!
'assaygroup_set__assaygroupcompound_set__compound_instance__compound',
'assaygroup_set__assaygroupcompound_set__compound_instance__supplier',
'assaygroup_set__assaygroupcompound_set__concentration_unit',
'assaygroup_set__assaygroupcompound_set__addition_location',
'assaygroup_set__assaygroupcell_set__cell_sample__cell_type__organ',
'assaygroup_set__assaygroupcell_set__cell_sample__cell_subtype',
'assaygroup_set__assaygroupcell_set__cell_sample__supplier',
'assaygroup_set__assaygroupcell_set__addition_location',
'assaygroup_set__assaygroupcell_set__density_unit',
'assaygroup_set__assaygroupcell_set__biosensor',
'assaygroup_set__assaygroupsetting_set__setting',
'assaygroup_set__assaygroupsetting_set__unit',
'assaygroup_set__assaygroupsetting_set__addition_location',
'assaygroup_set__organ_model__device',
'assaystudyassay_set__target',
'assaystudyassay_set__method',
'assaystudyassay_set__unit',
'assaydatapoint_set__sample_location',
'assaydatapoint_set__subtarget'
)
study = get_object_or_404(queryset, pk=pk)
serializer = AssayStudyDataSerializer(study)
return Response(serializer.data)
# Routers provide an easy way of automatically determining the URL conf.
# Be sure to import this into mps.urls
api_router = routers.DefaultRouter()
api_router.register(r'api/studies', AssayStudyViewSet)
# COMPONENTS
class AssayTargetSerializer(serializers.ModelSerializer):
class Meta:
model = AssayTarget
fields = [
'id',
'name',
'short_name',
'description',
]
class AssayTargetViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
queryset = AssayTarget.objects.all()
serializer_class = AssayTargetSerializer
http_method_names = ['get']
api_router.register(r'api/targets', AssayTargetViewSet)
class AssayMethodSerializer(serializers.ModelSerializer):
class Meta:
model = AssayMethod
fields = [
'id',
'name',
'description',
]
class AssayMethodViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
queryset = AssayMethod.objects.all()
serializer_class = AssayMethodSerializer
http_method_names = ['get']
api_router.register(r'api/methods', AssayMethodViewSet)
class AssaySampleLocationSerializer(serializers.ModelSerializer):
class Meta:
model = AssaySampleLocation
fields = [
'id',
'name',
'description',
]
class AssaySampleLocationViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
queryset = AssaySampleLocation.objects.all()
serializer_class = AssaySampleLocationSerializer
http_method_names = ['get']
api_router.register(r'api/locations', AssaySampleLocationViewSet)
# NOTICE BREAK FROM CONVENTION
class PhysicalUnitsSerializer(serializers.ModelSerializer):
name = serializers.StringRelatedField(source='unit')
unit_type = serializers.StringRelatedField()
class Meta:
model = PhysicalUnits
fields = [
'id',
'name',
'unit_type',
'description',
]
class PhysicalUnitsViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
queryset = PhysicalUnits.objects.all()
serializer_class = PhysicalUnitsSerializer
http_method_names = ['get']
api_router.register(r'api/units', PhysicalUnitsViewSet)
|
{
"content_hash": "e54d3eac608fac4d68a9c7e4fa0b6b72",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 104,
"avg_line_length": 32.51276595744681,
"alnum_prop": 0.638374451933774,
"repo_name": "UPDDI/mps-database-server",
"id": "a7c246883ffafcc0c6596816cc97411e082edecf",
"size": "15281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mps/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14194"
},
{
"name": "HTML",
"bytes": "1128291"
},
{
"name": "JavaScript",
"bytes": "701549"
},
{
"name": "Python",
"bytes": "1735408"
},
{
"name": "Shell",
"bytes": "1535"
},
{
"name": "TSQL",
"bytes": "41508"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 12);
|
{
"content_hash": "f4f7d9b560fc908a7c5ba9b2e90555b7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 169,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.7100371747211895,
"repo_name": "antoinecarme/pyaf",
"id": "9a66c51e0e1e350f36a771bd1b9473bf9167e47a",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_LinearTrend/cycle_30/ar_12/test_artificial_32_Integration_LinearTrend_30_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from dragonn.metrics import ClassificationResult
from keras.layers.core import (Activation, Dense, Dropout, Flatten, Permute,
Reshape, TimeDistributedDense)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
from keras.layers.core import (Activation, Dense, Flatten,
TimeDistributedDense)
from keras.layers.recurrent import GRU
from keras.callbacks import EarlyStopping
#class SequenceDNN(Model):
# """
# Sequence DNN models.
#
# Parameters
# ----------
# seq_length : int, optional
# length of input sequence.
# keras_model : instance of keras.models.Sequential, optional
# seq_length or keras_model must be specified.
# num_tasks : int, optional
# number of tasks. Default: 1.
# num_filters : list[int] | tuple[int]
# number of convolutional filters in each layer. Default: (15,).
# conv_width : list[int] | tuple[int]
# width of each layer's convolutional filters. Default: (15,).
# pool_width : int
# width of max pooling after the last layer. Default: 35.
# L1 : float
# strength of L1 penalty.
# dropout : float
# dropout probability in every convolutional layer. Default: 0.
# verbose: int
# Verbosity level during training. Valida values: 0, 1, 2.
#
# Returns
# -------
# Compiled DNN model.
# """
#
# def __init__(self,
# seq_length=None,
# keras_model=None,
# use_RNN=False,
# num_tasks=1,
# num_filters=(15, 15, 15),
# conv_width=(15, 15, 15),
# pool_width=35,
# GRU_size=35,
# TDD_size=15,
# L1=0,
# dropout=0.0,
# num_epochs=100,
# verbose=1):
# self.num_tasks = num_tasks
# self.num_epochs = num_epochs
# self.verbose = verbose
# self.train_metrics = []
# self.valid_metrics = []
# if keras_model is not None and seq_length is None:
# self.model = keras_model
# self.num_tasks = keras_model.layers[-1].output_shape[-1]
# elif seq_length is not None and keras_model is None:
# self.model = Sequential()
# assert len(num_filters) == len(conv_width)
# for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
# conv_height = 4 if i == 0 else 1
# self.model.add(
# Convolution2D(
# nb_filter=nb_filter,
# nb_row=conv_height,
# nb_col=nb_col,
# activation='linear',
# init='he_normal',
# input_shape=(1, 4, seq_length),
# W_regularizer=l1(L1),
# b_regularizer=l1(L1)))
# self.model.add(Activation('relu'))
# self.model.add(Dropout(dropout))
# self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
# if use_RNN:
# num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
# self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
# self.model.add(Permute((2, 1)))
# self.model.add(GRU(GRU_size, return_sequences=True))
# self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
# self.model.add(Flatten())
# self.model.add(Dense(output_dim=self.num_tasks))
# self.model.add(Activation('sigmoid'))
# self.model.compile(optimizer='adam', loss='binary_crossentropy')
# else:
# raise ValueError(
# "Exactly one of seq_length or keras_model must be specified!")
#
# def train(self,
# X,
# y,
# validation_data,
# early_stopping_metric='Loss',
# early_stopping_patience=5,
# save_best_model_to_prefix=None):
# if y.dtype != bool:
# assert set(np.unique(y)) == {0, 1}
# y = y.astype(bool)
# multitask = y.shape[1] > 1
# if not multitask:
# num_positives = y.sum()
# num_sequences = len(y)
# num_negatives = num_sequences - num_positives
# if self.verbose >= 1:
# print('Training model (* indicates new best result)...')
# X_valid, y_valid = validation_data
# early_stopping_wait = 0
# best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
# for epoch in range(1, self.num_epochs + 1):
# self.model.fit(
# X,
# y,
# batch_size=128,
# nb_epoch=1,
# class_weight={
# True: num_sequences / num_positives,
# False: num_sequences / num_negatives
# } if not multitask else None,
# verbose=self.verbose >= 2)
# epoch_train_metrics = self.test(X, y)
# epoch_valid_metrics = self.test(X_valid, y_valid)
# self.train_metrics.append(epoch_train_metrics)
# self.valid_metrics.append(epoch_valid_metrics)
# if self.verbose >= 1:
# print('Epoch {}:'.format(epoch))
# print('Train {}'.format(epoch_train_metrics))
# print('Valid {}'.format(epoch_valid_metrics), end='')
# current_metric = epoch_valid_metrics[early_stopping_metric].mean()
# if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
# if self.verbose >= 1:
# print(' *')
# best_metric = current_metric
# best_epoch = epoch
# early_stopping_wait = 0
# if save_best_model_to_prefix is not None:
# self.save(save_best_model_to_prefix)
# else:
# if self.verbose >= 1:
# print()
# if early_stopping_wait >= early_stopping_patience:
# break
# early_stopping_wait += 1
# if self.verbose >= 1:
# print('Finished training after {} epochs.'.format(epoch))
# if save_best_model_to_prefix is not None:
# print("The best model's architecture and weights (from epoch {0}) "
# 'were saved to {1}.arch.json and {1}.weights.h5'.format(
# best_epoch, save_best_model_to_prefix))
#
# def predict(self, X):
# return self.model.predict(X, batch_size=128, verbose=False)
#
# def get_sequence_filters(self):
# """
# Returns 3D array of 2D sequence filters.
# """
# return self.model.layers[0].get_weights()[0].squeeze(axis=1)
#
# def deeplift(self, X, batch_size=200):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) deeplift score array.
# """
# assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1
# from deeplift.conversion import keras_conversion as kc
#
# # convert to deeplift model and get scoring function
# deeplift_model = kc.convert_sequential_model(self.model, verbose=False)
# score_func = deeplift_model.get_target_contribs_func(
# find_scores_layer_idx=0)
# # use a 40% GC reference
# input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, :, None]]
# # get deeplift scores
# deeplift_scores = np.zeros((self.num_tasks,) + X.shape)
# for i in range(self.num_tasks):
# deeplift_scores[i] = score_func(
# task_idx=i,
# input_data_list=[X],
# batch_size=batch_size,
# progress_update=None,
# input_references_list=input_references)
# return deeplift_scores
#
# def in_silico_mutagenesis(self, X):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) ISM score array.
# """
# mutagenesis_scores = np.empty(X.shape + (self.num_tasks,), dtype=np.float32)
# wild_type_predictions = self.predict(X)
# wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
# np.newaxis]
# for sequence_index, (sequence, wild_type_prediction) in enumerate(
# zip(X, wild_type_predictions)):
# mutated_sequences = np.repeat(
# sequence[np.newaxis], np.prod(sequence.shape), axis=0)
# # remove wild-type
# arange = np.arange(len(mutated_sequences))
# horizontal_cycle = np.tile(
# np.arange(sequence.shape[-1]), sequence.shape[-2])
# mutated_sequences[arange, :, :, horizontal_cycle] = 0
# # add mutant
# vertical_repeat = np.repeat(
# np.arange(sequence.shape[-2]), sequence.shape[-1])
# mutated_sequences[arange, :, vertical_repeat, horizontal_cycle] = 1
# # make mutant predictions
# mutated_predictions = self.predict(mutated_sequences)
# mutated_predictions = mutated_predictions.reshape(sequence.shape +
# (self.num_tasks,))
# mutagenesis_scores[
# sequence_index] = wild_type_prediction - mutated_predictions
# return np.rollaxis(mutagenesis_scores, -1)
#
# @staticmethod
# def _plot_scores(X, output_directory, peak_width, score_func, score_name):
# from dragonn.plot import plot_bases_on_ax
# scores = score_func(X).squeeze(
# axis=2) # (num_task, num_samples, num_bases, sequence_length)
# try:
# os.makedirs(output_directory)
# except OSError:
# pass
# num_tasks = len(scores)
# for task_index, task_scores in enumerate(scores):
# for sequence_index, sequence_scores in enumerate(task_scores):
# # sequence_scores is num_bases x sequence_length
# basewise_max_sequence_scores = sequence_scores.max(axis=0)
# plt.clf()
# figure, (top_axis, bottom_axis) = plt.subplots(2)
# top_axis.plot(
# range(1,
# len(basewise_max_sequence_scores) + 1),
# basewise_max_sequence_scores)
# top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
# peak_position = basewise_max_sequence_scores.argmax()
# top_axis.axvspan(
# peak_position - peak_width,
# peak_position + peak_width,
# color='grey',
# alpha=0.1)
# peak_sequence_scores = sequence_scores[:, peak_position - peak_width:
# peak_position + peak_width].T
# # Set non-max letter_heights to zero
# letter_heights = np.zeros_like(peak_sequence_scores)
# letter_heights[np.arange(len(letter_heights)),
# peak_sequence_scores.argmax(axis=1)] = \
# basewise_max_sequence_scores[peak_position - peak_width :
# peak_position + peak_width]
# plot_bases_on_ax(letter_heights, bottom_axis)
# bottom_axis.set_xticklabels(
# tuple(
# map(str,
# np.arange(peak_position - peak_width,
# peak_position + peak_width + 1))))
# bottom_axis.tick_params(axis='x', labelsize='small')
# plt.xlabel('Position')
# plt.ylabel('Score')
# plt.savefig(
# os.path.join(output_directory, 'sequence_{}{}'.format(
# sequence_index, '_task_{}'.format(task_index)
# if num_tasks > 1 else '')))
# plt.close()
#
# def plot_deeplift(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.deeplift,
# score_name='DeepLift')
#
# def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
# self._plot_scores(
# X,
# output_directory,
# peak_width,
# score_func=self.in_silico_mutagenesis,
# score_name='ISM')
#
# def plot_architecture(self, output_file):
# from dragonn.visualize_util import plot as plot_keras_model
# plot_keras_model(self.model, output_file, show_shape=True)
#
# def save(self, save_best_model_to_prefix):
# arch_fname = save_best_model_to_prefix + '.arch.json'
# weights_fname = save_best_model_to_prefix + '.weights.h5'
# open(arch_fname, 'w').write(self.model.to_json())
# self.model.save_weights(weights_fname, overwrite=True)
#
# @staticmethod
# def load(arch_fname, weights_fname=None):
# model_json_string = open(arch_fname).read()
# sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
# if weights_fname is not None:
# sequence_dnn.model.load_weights(weights_fname)
# return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
self.model = Sequential()
self.model.add(
GRU(gru_size, return_sequences=True, input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X,
y,
batch_size=128,
nb_epoch=100,
validation_data=validation_data,
class_weight={
True: num_sequences / num_positives,
False: num_sequences / num_negatives
} if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self,
prefix='./gkmSVM',
word_length=11,
mismatches=3,
C=1,
threads=1,
cache_memory=100,
verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str,
(word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(('gkmtrain', self.options, pos_fname, neg_fname,
self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join([
'gkmpredict', test_fname, self.model_file, temp_ofp.name, threads_option
])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
|
{
"content_hash": "19f9438ac31647708739f699ec28f68d",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 89,
"avg_line_length": 38.096018735362996,
"alnum_prop": 0.596606626913383,
"repo_name": "ktaneishi/deepchem",
"id": "2930b28edd70f873d8dcec5930982df541408d8b",
"size": "16267",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "contrib/dragonn/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2553147"
},
{
"name": "Shell",
"bytes": "11547"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import math
from compas.geometry import quaternion_multiply
from compas.geometry import quaternion_conjugate
from compas.geometry import quaternion_unitize
from compas.geometry import quaternion_canonize
from compas.geometry import quaternion_norm
from compas.geometry import quaternion_is_unit
from compas.geometry import quaternion_from_matrix
from compas.geometry.primitives import Primitive
class Quaternion(Primitive):
r"""A quaternion is defined by 4 components, X, Y, Z, and W.
Parameters
----------
w : float
The scalar (real) part of a quaternion.
x, y, z : float
Components of the vector (complex, imaginary) part of a quaternion.
Attributes
----------
w : float
The W component of the quaternion.
x : float
The X component of the quaternion.
y : float
The Y component of the quaternion.
z : float
The Z component of the quaternion.
wxyz : list[float], read-only
Quaternion as a list of float in the 'wxyz' convention.
xyzw : list[float], read-only
Quaternion as a list of float in the 'xyzw' convention.
norm : float, read-only
The length (euclidean norm) of the quaternion.
is_unit : bool, read-only
True if the quaternion is unit-length.
False otherwise.
Notes
-----
The default convention to represent a quaternion :math:`q` in this module
is by four real values :math:`w`, :math:`x`, :math:`y`, :math:`z`.
The first value :math:`w` is the scalar (real) part,
and :math:`x`, :math:`y`, :math:`z` form the vector (complex, imaginary) part [1]_, so that:
.. math::
q = w + xi + yj + zk
where :math:`i, j, k` are basis components with following multiplication rules [2]_:
.. math::
\begin{align}
ii &= jj = kk = ijk = -1 \\
ij &= k, \quad ji = -k \\
jk &= i, \quad kj = -i \\
ki &= j, \quad ik = -j
\end{align}
Quaternions are associative but not commutative.
**Quaternion as rotation.**
A rotation through an angle :math:`\theta` around an axis
defined by a euclidean unit vector :math:`u = u_{x}i + u_{y}j + u_{z}k`
can be represented as a quaternion:
.. math::
q = cos(\frac{\theta}{2}) + sin(\frac{\theta}{2}) [u_{x}i + u_{y}j + u_{z}k]
i.e.:
.. math::
\begin{align}
w &= cos(\frac{\theta}{2}) \\
x &= sin(\frac{\theta}{2}) u_{x} \\
y &= sin(\frac{\theta}{2}) u_{y} \\
z &= sin(\frac{\theta}{2}) u_{z}
\end{align}
For a quaternion to represent a rotation or orientation, it must be unit-length.
A quaternion representing a rotation :math:`p` resulting from applying a rotation
:math:`r` to a rotation :math:`q`, i.e.: :math:`p = rq`,
is also unit-length.
References
----------
.. [1] http://mathworld.wolfram.com/Quaternion.html
.. [2] http://mathworld.wolfram.com/HamiltonsRules.html
.. [3] https://github.com/matthew-brett/transforms3d/blob/master/transforms3d/quaternions.py
Examples
--------
>>> Q = Quaternion(1.0, 1.0, 1.0, 1.0).unitized()
>>> R = Quaternion(0.0,-0.1, 0.2,-0.3).unitized()
>>> P = R*Q
>>> P.is_unit
True
"""
__slots__ = ["_w", "_x", "_y", "_z"]
def __init__(self, w, x, y, z, **kwargs):
super(Quaternion, self).__init__(**kwargs)
self._w = None
self._x = None
self._y = None
self._z = None
self.w = w
self.x = x
self.y = y
self.z = z
# ==========================================================================
# data
# ==========================================================================
@property
def DATASCHEMA(self):
""":class:`schema.Schema` : Schema of the data representation."""
from schema import Schema
return Schema({"w": float, "x": float, "y": float, "z": float})
@property
def JSONSCHEMANAME(self):
"""str : Name of the schema of the data representation in JSON format."""
return "quaternion"
@property
def data(self):
"""dict : Representation of the quaternion as a dict containing only native Python objects."""
return {"w": self.w, "x": self.x, "y": self.y, "z": self.z}
@data.setter
def data(self, data):
self.w = data["w"]
self.x = data["x"]
self.y = data["y"]
self.z = data["z"]
@classmethod
def from_data(cls, data):
"""Construct a quaternion from a data dict.
Parameters
----------
data : dict
The data dictionary.
Returns
-------
:class:`~compas.geometry.Quaternion`
The constructed quaternion.
Examples
--------
>>>
"""
return cls(data["w"], data["x"], data["y"], data["z"])
# ==========================================================================
# properties
# ==========================================================================
@property
def w(self):
return self._w
@w.setter
def w(self, w):
self._w = float(w)
@property
def x(self):
return self._x
@x.setter
def x(self, x):
self._x = float(x)
@property
def y(self):
return self._y
@y.setter
def y(self, y):
self._y = float(y)
@property
def z(self):
return self._z
@z.setter
def z(self, z):
self._z = float(z)
@property
def wxyz(self):
return [self.w, self.x, self.y, self.z]
@property
def xyzw(self):
return [self.x, self.y, self.z, self.w]
@property
def norm(self):
return quaternion_norm(self)
@property
def is_unit(self):
return quaternion_is_unit(self)
# ==========================================================================
# customization
# ==========================================================================
def __getitem__(self, key):
if key == 0:
return self.w
if key == 1:
return self.x
if key == 2:
return self.y
if key == 3:
return self.z
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.w = value
return
if key == 1:
self.x = value
return
if key == 2:
self.y = value
if key == 3:
self.z = value
raise KeyError
def __eq__(self, other, tol=1e-05):
if not hasattr(other, "__iter__") or not hasattr(other, "__len__") or len(self) != len(other):
return False
for v1, v2 in zip(self, other):
if math.fabs(v1 - v2) > tol:
return False
return True
def __iter__(self):
return iter(self.wxyz)
def __len__(self):
return 4
def __repr__(self):
return "Quaternion({:.{prec}f}, {:.{prec}f}, {:.{prec}f}, {:.{prec}f})".format(
self.w, self.x, self.y, self.z, prec=3
)
def __mul__(self, other):
"""Multiply operator for two quaternions.
Parameters
----------
other : [float, float, float, float] | :class:`~compas.geometry.Quaternion`
A Quaternion.
Returns
-------
:class:`~compas.geometry.Quaternion`
The product :math:`P = R * Q` of this quaternion (R) multiplied by other quaternion (Q).
Notes
-----
Multiplication of two quaternions :math:`R * Q` can be interpreted as applying rotation R to an orientation Q,
provided that both R and Q are unit-length.
The result is also unit-length.
Multiplication of quaternions is not commutative!
Examples
--------
>>> Q = Quaternion(1.0, 1.0, 1.0, 1.0).unitized()
>>> R = Quaternion(0.0,-0.1, 0.2,-0.3).unitized()
>>> P = R*Q
>>> P.is_unit
True
"""
p = quaternion_multiply(list(self), list(other))
return Quaternion(*p)
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_frame(cls, frame):
"""Creates a quaternion object from a frame.
Parameters
----------
frame : :class:`~compas.geometry.Frame`
Returns
-------
:class:`~compas.geometry.Quaternion`
The new quaternion.
Examples
--------
>>> from compas.geometry import allclose
>>> from compas.geometry import Frame
>>> q = [1., -2., 3., -4.]
>>> F = Frame.from_quaternion(q)
>>> Q = Quaternion.from_frame(F)
>>> allclose(Q.canonized(), quaternion_canonize(quaternion_unitize(q)))
True
"""
w, x, y, z = frame.quaternion
return cls(w, x, y, z)
@classmethod
def from_matrix(cls, M):
"""Create a Quaternion from a transformation matrix.
Parameters
----------
M : list[list[float]]
Returns
-------
:class:`~compas.geometry.Quaternion`
The new quaternion.
Example
-------
>>> from compas.geometry import matrix_from_euler_angles
>>> ea = [0.2, 0.6, 0.2]
>>> M = matrix_from_euler_angles(ea)
>>> Quaternion.from_matrix(M)
Quaternion(0.949, 0.066, 0.302, 0.066)
"""
return cls(*quaternion_from_matrix(M))
@classmethod
def from_rotation(cls, R):
"""Create a Quaternion from a Rotatation.
Parameters
----------
R : :class:`~compas.geometry.Rotation`
Returns
-------
:class:`~compas.geometry.Quaternion`
The new quaternion.
Example
-------
>>> from compas.geometry import Frame, Rotation
>>> R = Rotation.from_frame(Frame.worldYZ())
>>> Quaternion.from_rotation(R)
Quaternion(0.500, 0.500, 0.500, 0.500)
"""
return cls.from_matrix(R.matrix)
# ==========================================================================
# methods
# ==========================================================================
def unitize(self):
"""Scales the quaternion to make it unit-length.
Returns
-------
None
Examples
--------
>>> q = Quaternion(1.0, 1.0, 1.0, 1.0)
>>> q.is_unit
False
>>> q.unitize()
>>> q.is_unit
True
"""
qu = quaternion_unitize(self)
self.w, self.x, self.y, self.z = qu
def unitized(self):
"""Returns a quaternion with a unit-length.
Returns
-------
:class:`~compas.geometry.Quaternion`
Examples
--------
>>> q = Quaternion(1.0, 1.0, 1.0, 1.0)
>>> q.is_unit
False
>>> p = q.unitized()
>>> p.is_unit
True
"""
qu = quaternion_unitize(self)
return Quaternion(*qu)
def canonize(self):
"""Makes the quaternion canonic.
Returns
-------
None
Examples
--------
>>> from compas.geometry import Frame
>>> q = Quaternion.from_frame(Frame.worldZX())
>>> q
Quaternion(-0.500, 0.500, 0.500, 0.500)
>>> q.canonize()
>>> q
Quaternion(0.500, -0.500, -0.500, -0.500)
"""
qc = quaternion_canonize(self)
self.w, self.x, self.y, self.z = qc
def canonized(self):
"""Returns a quaternion in canonic form.
Returns
-------
:class:`~compas.geometry.Quaternion`
A quaternion in canonic form.
Examples
--------
>>> from compas.geometry import Frame
>>> q = Quaternion.from_frame(Frame.worldZX())
>>> q
Quaternion(-0.500, 0.500, 0.500, 0.500)
>>> p = q.canonized()
>>> p
Quaternion(0.500, -0.500, -0.500, -0.500)
"""
qc = quaternion_canonize(self)
return Quaternion(*qc)
def conjugate(self):
"""Conjugate the quaternion.
Returns
-------
None
Examples
--------
>>> q = Quaternion(1.0, 1.0, 1.0, 1.0)
>>> q.conjugate()
>>> q
Quaternion(1.000, -1.000, -1.000, -1.000)
"""
qc = quaternion_conjugate(self)
self.w, self.x, self.y, self.z = qc
def conjugated(self):
"""Returns a conjugate quaternion.
Returns
-------
:class:`~compas.geometry.Quaternion`
The conjugated quaternion.
Examples
--------
>>> q = Quaternion(1.0, 1.0, 1.0, 1.0)
>>> p = q.conjugated()
>>> q
Quaternion(1.000, 1.000, 1.000, 1.000)
>>> p
Quaternion(1.000, -1.000, -1.000, -1.000)
"""
qc = quaternion_conjugate(self)
return Quaternion(*qc)
|
{
"content_hash": "aa14fdc4ed656c5bd0bbf2897947e835",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 118,
"avg_line_length": 26.18860510805501,
"alnum_prop": 0.4855213803450863,
"repo_name": "compas-dev/compas",
"id": "d9b14e5dbcc6ddc2ff6e1149e8ab1697862aeaa1",
"size": "13330",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas/geometry/primitives/quaternion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
}
|
from accurate_bg_check.mixin.crud import BaseCrud
class Order(BaseCrud):
resource = 'order'
client = None
def __init__(self, client=None):
if not client:
raise Exception('accurate_bg_check client instance required')
self.client = client
|
{
"content_hash": "7ec594279a7db7e0e4d2c39aaca5e868",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 25.454545454545453,
"alnum_prop": 0.65,
"repo_name": "kaushal235/accurate_bg_check",
"id": "8f5e28778ca83cbc149174745b0e772d4ead8f90",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accurate_bg_check/order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6028"
}
],
"symlink_target": ""
}
|
import json
from datetime import datetime
import argparse
import apache_beam as beam
from apache_beam.io import WriteToBigQuery
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io.gcp.internal.clients import bigquery
import logging
from apache_beam.io import ReadFromText
import random
play_by_play_schema = {
'season': 'integer'
, 'game_id': 'string'
, 'is_home': 'integer'
, 'event_id': 'integer'
, 'is_neutral': 'string'
, 'home_pts': 'integer'
, 'away_pts': 'integer'
, 'player_id': 'integer'
, 'player_full_name': 'string'
, 'game_date': 'date'
, 'elapsed_time_sec': 'integer'
, 'game_clock': 'string'
, 'period': 'integer'
, 'team_code': 'integer'
, 'event_type': 'string'
, 'shot_made': 'boolean'
, 'shot_type': 'string'
, 'points_scored': 'integer'
, 'three_point_shot': 'boolean'
, 'rebound_type': 'string'
, 'timeout_duration': 'string'
, 'foul_type': 'string'
, 'substitution_type': 'string'
}
class Format(beam.DoFn):
def process(self, element):
import logging
import json
from datetime import datetime
try:
j = json.loads(element)
except:
j = None
logging.info('THERE_WAS_AN_ISSUE_WITH_THE_FOLLOWING: ' + str(element))
if j and j != []:
score_1_team = None
score_2_team = None
home_code = j[0]['game_id'].split('-')[0]
i = 1
for p in j:
if p['actionType'] in ['2pt', '3pt', 'freethrow', 'rebound', 'block', 'assist', 'steal', 'turnover', 'foul', 'substitution', 'timeout']:
logging.info(p)
try:
team_code = int(p['teamExternalId'][2:])
except:
team_code = None
if p['actionType'] in ['2pt', '3pt', 'freethrow'] and not score_1_team and not score_2_team:
if p['score1'] > p['score2'] and team_code == home_code:
score_1_team = 'home'
score_2_team = 'away'
elif p['score1'] > p['score2'] and team_code != home_code:
score_1_team = 'away'
score_2_team = 'home'
elif p['score2'] > p['score1'] and team_code == home_code:
score_1_team = 'away'
score_2_team = 'home'
elif p['score2'] > p['score1'] and team_code != home_code:
score_1_team = 'home'
score_2_team = 'away'
game_id_split = p['game_id'].split('-')
month = str(game_id_split[3]) if len(str(game_id_split[3])) > 1 else '0' + str(game_id_split[3])
day = str(game_id_split[4]) if len(str(game_id_split[4])) > 1 else '0' + str(game_id_split[4])
game_id = str(game_id_split[0]) + '-' + str(game_id_split[1]) + '-' + str(game_id_split[2]) + '-' + month + '-' + day
game_date = str(game_id_split[2]) + '-' + month + '-' + day
player_id = p['personExternalId'] if 'personExternalId' in p.keys() else None
home_pts = 0
away_pts = 0
shot_type = None
three_point_shot = False
points_scored = 0
rebound_type = None
substitution_type = None
foul_type = None
timeout_duration = None
shot_made = False
if 'firstName' in p.keys() and 'familyName' in p.keys():
player_full_name = p['firstName'].upper() + ',' + p['familyName'].upper()
elif 'firstName' in p.keys():
player_full_name = p['firstName'].upper()
elif 'familyName' in p.keys():
player_full_name = p['familyName'].upper()
else:
player_full_name = None
gc_split = p['clock'].split(':')
mins = int(gc_split[0])
sec = int(gc_split[1])
if p['periodType'] == 'REGULAR':
period = int(p['period'])
elif p['periodType'] == 'OVERTIME':
period = 2 + int(p['period'])
if period < 3:
min_passed = 19 - mins
else:
min_passed = 4 - mins
sec_passed = 60 - sec
if period == 1:
elapsed_time_sec = (60 * min_passed) + sec_passed
elif period == 2:
elapsed_time_sec = 1200 + (60 * min_passed) + sec_passed
elif period == 3:
elapsed_time_sec = 2400 + (60 * min_passed) + sec_passed
elif period > 3:
elapsed_time_sec = 2400 + (300 * (period - 3)) + (60 * min_passed) + sec_passed
if p['actionType'] in ['2pt', '3pt', 'freethrow']:
if p['success'] == 1:
action_type = 'GOOD'
shot_made = True
elif p['success'] == 0:
action_type = 'MISS'
shot_made = False
if p['actionType'] == '3pt':
shot_type = '3PTR'
three_point_shot = True
if p['success'] == 1:
points_scored = 3
else:
points_scored = 0
elif p['actionType'] == 'freethrow':
shot_type = 'FT'
three_point_shot = False
if p['success'] == 1:
points_scored = 1
else:
points_scored = 0
elif p['actionType'] == '2pt' and p['subType'] in ['layup', 'drivinglayup']:
three_point_shot = False
shot_type = 'LAYUP'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
elif p['actionType'] == '2pt' and p['subType'] in ['dunk']:
three_point_shot = False
shot_type = 'DUNK'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
elif p['actionType'] == '2pt' and p['subType'] in ['tipin']:
three_point_shot = False
shot_type = 'TIPIN'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
elif p['actionType'] == '2pt' and p['subType'] in ['jumpshot', 'floatingjumpshot', 'stepbackjumpshot', 'pullupjumpshot', 'turnaroundjumpshot', 'fadeaway']:
three_point_shot = False
shot_type = 'JUMPER'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
elif p['actionType'] == '2pt' and p['subType'] == 'hookshot':
three_point_shot = False
shot_type = 'HOOK'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
elif p['actionType'] == '2pt' and p['subType'] == 'alleyoop':
three_point_shot = False
shot_type = 'ALLEYOOP'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
else:
three_point_shot = False
shot_type = 'OTHER'
if p['success'] == 1:
points_scored = 2
else:
points_scored = 0
else:
points_scored = 0
shot_made = False
three_point_shot = False
shot_type = None
if p['actionType'] == 'rebound' and p['subType'] not in ['offensivedeadball', 'defensivedeadball']:
action_type = 'REBOUND'
if p['subType'] == 'offensive':
rebound_type = 'OFF'
if p['subType'] == 'defensive':
rebound_type = 'DEF'
elif p['actionType'] == 'rebound' and p['subType'] in ['offensivedeadball', 'defensivedeadball']:
action_type = 'deadball'
rebound_type = 'DEADB'
else:
rebound_type = None
if p['actionType'] == 'block':
action_type = 'BLOCK'
elif p['actionType'] == 'assist':
action_type = 'ASSIST'
elif p['actionType'] == 'steal':
action_type = 'STEAL'
elif p['actionType'] == 'turnover':
action_type = 'TURNOVER'
if p['actionType'] == 'foul':
action_type = 'FOUL'
if p['subType'] in ['technical', 'benchTechnical', 'coachTechnical', 'adminTechnical']:
foul_type = 'TECH'
else:
foul_type = None
else:
foul_type = None
if p['actionType'] == 'substitution':
action_type = 'SUB'
if p['subType'] == 'in':
substitution_type = 'ON'
elif p['subType'] == 'out':
substitution_type = 'OFF'
else:
substitution_type = None
if p['actionType'] == 'timeout' and p['subType'] in ['media', 'commercial']:
action_type = 'TV_TIMEOUT'
timeout_duration = None
elif p['actionType'] == 'timeout':
if p['subType'] == 'full':
action_type = 'TIMEOUT'
timeout_duration = 'FULL'
elif p['subType'] == 'short':
action_type = 'TIMEOUT'
timeout_duration = '30SEC'
else:
action_type = 'TIMEOUT'
timeout_duration = None
else:
timeout_duration = None
if points_scored > 0:
if score_1_team == 'home':
home_pts = p['score1']
away_pts = p['score2']
else:
away_pts = p['score1']
home_pts = p['score2']
this_play = {
'season': p['season']
, 'event_id': i
, 'game_id': game_id
, 'is_home': 1 if team_code == home_code else 0
, 'is_neutral': p['neutral_site']
, 'home_pts': home_pts
, 'away_pts': away_pts
, 'player_id': player_id
, 'player_full_name': player_full_name
, 'game_date': game_date
, 'elapsed_time_sec': elapsed_time_sec
, 'game_clock': p['clock'][:5]
, 'period': period
, 'team_code': team_code
, 'event_type': action_type
, 'shot_made': shot_made
, 'shot_type': shot_type
, 'points_scored': points_scored
, 'three_point_shot': three_point_shot
, 'rebound_type': rebound_type
, 'timeout_duration': timeout_duration
, 'foul_type': foul_type
, 'substitution_type': substitution_type
}
yield this_play
i += 1
else:
logging.info(p['actionType'] + ' is not in NCAA data')
class Check(beam.DoFn):
def process(self, element):
print 'new item'
print element
def run(argv=None):
import random
import datetime
currentDT = datetime.datetime.now()
# Your GCP Project ID and GCS locations
# are passed through as part of your Dataflow job command
# BigQuery output info
dataset = 'lab_dev'
table = 'play_by_play'
# Dataflow job name (don't edit)
job_name = 'play-by-play-{}'.format(currentDT.strftime("%Y-%m-%d-%H-%M-%S"))
filepath = 'gs://cloud-training-demos/ncaa/next-bootcamp/2018-19/play_by_play/*'
pipeline_args = [
# change these
'--runner=DataflowRunner',
'--project={}'.format(argv['project_id']),
'--dataset={}'.format(dataset),
'--table={}'.format(table),
'--staging_location={}'.format(argv['staging']),
'--temp_location={}'.format(argv['temp_location']),
'--num_workers=5',
'--max_num_workers=20',
'--region={}'.format(argv['region']),
'--job_name={}'.format(job_name)
]
pipeline_options = PipelineOptions(pipeline_args)
with beam.Pipeline(options=pipeline_options) as p:
files = p | ReadFromText(filepath)
keyed = files | 'Key' >> beam.Map(lambda x: (random.randint(1, 101), x))
grouped = keyed | 'GBK' >> beam.GroupByKey()
flattended = grouped | 'Expand' >> beam.FlatMap(lambda x: x[1])
to_insert = flattended | 'Format' >> beam.ParDo(Format())
# to_insert | beam.ParDo(Check())
table_schema = bigquery.TableSchema()
for col, col_type in play_by_play_schema.iteritems():
this_schema = bigquery.TableFieldSchema()
this_schema.name = col
this_schema.type = col_type
this_schema.mode = 'nullable'
table_schema.fields.append(this_schema)
to_insert | WriteToBigQuery(
table='{_project_}:{_dataset_}.{_table_}'.format(_dataset_ = dataset, _project_ = argv['project_id'], _table_ = table),
schema=table_schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--project_id',
help='Project ID where the job is to run',
required=True
)
parser.add_argument(
'--temp_location',
help='Bucket to store output temporarily',
required=True
)
parser.add_argument(
'--staging',
help='Folder on GCS to store ',
required=True
)
parser.add_argument(
'--region',
help='Region where to run the job',
required=True
)
args, _ = parser.parse_known_args()
hparams = args.__dict__
run(hparams)
|
{
"content_hash": "665b5835d814be542adee625adf20289",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 179,
"avg_line_length": 42.898963730569946,
"alnum_prop": 0.4093846246754031,
"repo_name": "GoogleCloudPlatform/training-data-analyst",
"id": "9a5fed7c7b1081c9f5278cb5767d82c8a00eb73c",
"size": "16559",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "blogs/ncaa/dataflow/play_by_play.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39536"
},
{
"name": "C#",
"bytes": "23445"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "53087"
},
{
"name": "Dockerfile",
"bytes": "90856"
},
{
"name": "Go",
"bytes": "93755"
},
{
"name": "HCL",
"bytes": "73891"
},
{
"name": "HTML",
"bytes": "2342167"
},
{
"name": "Java",
"bytes": "2441030"
},
{
"name": "JavaScript",
"bytes": "3957504"
},
{
"name": "Jinja",
"bytes": "257585"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "242016061"
},
{
"name": "Makefile",
"bytes": "12642"
},
{
"name": "PigLatin",
"bytes": "11558"
},
{
"name": "Pug",
"bytes": "457977"
},
{
"name": "Python",
"bytes": "18543833"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Scala",
"bytes": "27161"
},
{
"name": "Shell",
"bytes": "763259"
},
{
"name": "TypeScript",
"bytes": "66858"
}
],
"symlink_target": ""
}
|
from proteus import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools)
from threep_navier_stokes_sed_p import *
from tank import *
if timeDiscretization=='vbdf':
timeIntegration = VBDF
timeOrder=2
stepController = Min_dt_cfl_controller
elif timeDiscretization=='flcbdf':
timeIntegration = FLCBDF
#stepController = FLCBDF_controller_sys
stepController = Min_dt_cfl_controller
time_tol = 10.0*ns_sed_nl_atol_res
atol_u = {0:time_tol,1:time_tol}
rtol_u = {0:time_tol,1:time_tol}
else:
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_cfl_controller
femSpaces = {0:basis,
1:basis}
massLumping = False
numericalFluxType = None
conservativeFlux = None
numericalFluxType = RANS3PSed.NumericalFlux
subgridError = RANS3PSed.SubgridError(coefficients,nd,lag=ns_sed_lag_subgridError,hFactor=hFactor)
shockCapturing = RANS3PSed.ShockCapturing(coefficients,nd,ns_sed_shockCapturingFactor,lag=ns_sed_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = None
linearSmoother = None
matrix = SparseMatrix
if useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
linear_solver_options_prefix = 'rans3p_sed_'
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
linTolFac = 0.01
l_atol_res = 0.01*ns_sed_nl_atol_res
nl_atol_res = ns_sed_nl_atol_res
useEisenstatWalker = False
maxNonlinearIts = 50
maxLineSearches = 0
conservativeFlux = {0:'point-eval'}
#conservativeFlux = {0:'pwl-bdm-opt'}
#auxiliaryVariables=[pointGauges,lineGauges]
auxiliaryVariables = ct.domain.auxiliaryVariables['thp']+[ct.vs_output]
|
{
"content_hash": "7a1696e317fe40bbba57241238954a87",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 116,
"avg_line_length": 30.625,
"alnum_prop": 0.7356009070294784,
"repo_name": "erdc-cm/air-water-vv",
"id": "3966270554157d8f092553b03fdb001e6fca9b36",
"size": "2205",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "2d/sediment/friction_angle_dambrek_sediment/threep_navier_stokes_sed_n.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1128"
},
{
"name": "GLSL",
"bytes": "3787"
},
{
"name": "Jupyter Notebook",
"bytes": "8264154"
},
{
"name": "M",
"bytes": "435"
},
{
"name": "Python",
"bytes": "1992474"
},
{
"name": "Shell",
"bytes": "14414"
}
],
"symlink_target": ""
}
|
from dynamodb_wrapper.client import Client
from dynamodb_wrapper.fields import Field, FieldType
from dynamodb_wrapper.models import DynamoDBModel
|
{
"content_hash": "294169fa4986f3db0807f4cafa0ece79",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 48.666666666666664,
"alnum_prop": 0.863013698630137,
"repo_name": "vicenteneto/dynamodb-wrapper",
"id": "972128980ae7dff70ce1f53542a164d9f19c68c4",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamodb_wrapper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5459"
}
],
"symlink_target": ""
}
|
import datetime
from flask import g
from ..._compat import as_unicode
from mongoengine import Document
from mongoengine import DateTimeField, StringField, ReferenceField, ListField, BooleanField, IntField
def get_user_id():
try:
return g.user.id
except Exception as e:
return None
class Permission(Document):
name = StringField(max_length=100, required=True, unique=True)
def __unicode__(self):
return self.name
class ViewMenu(Document):
name = StringField(max_length=100, required=True, unique=True)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __unicode__(self):
return self.name
class PermissionView(Document):
permission = ReferenceField(Permission)
view_menu = ReferenceField(ViewMenu)
def __unicode__(self):
return str(self.permission).replace('_', ' ') + ' on ' + str(self.view_menu)
def __repr__(self):
return str(self.permission).replace('_', ' ') + ' on ' + str(self.view_menu)
class Role(Document):
name = StringField(max_length=64, required=True, unique=True)
permissions = ListField(ReferenceField(PermissionView))
def __unicode__(self):
return self.name
def __repr__(self):
return self.name
class User(Document):
first_name = StringField(max_length=64, required=True)
last_name = StringField(max_length=64, required=True)
username = StringField(max_length=64, required=True, unique=True)
password = StringField(max_length=256)
active = BooleanField()
email = StringField(max_length=64, required=True, unique=True)
last_login = DateTimeField()
login_count = IntField()
fail_login_count = IntField()
roles = ListField(ReferenceField(Role))
created_on = DateTimeField(default=datetime.datetime.now)
changed_on = DateTimeField(default=datetime.datetime.now)
created_by = ReferenceField('self', default=get_user_id())
changed_by = ReferenceField('self', default=get_user_id())
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return as_unicode(self.id)
def get_full_name(self):
return u'{0} {1}'.format(self.first_name, self.last_name)
def __unicode__(self):
return self.get_full_name()
class RegisterUser(Document):
first_name = StringField(max_length=64, required=True)
last_name = StringField(max_length=64, required=True)
username = StringField(max_length=64, required=True, unique=True)
password = StringField(max_length=256)
email = StringField(max_length=64, required=True)
registration_date = DateTimeField(default=datetime.datetime.now)
registration_hash = StringField(max_length=256)
|
{
"content_hash": "f9f5382339f5f7614d98c39f8b50034b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 101,
"avg_line_length": 29.198019801980198,
"alnum_prop": 0.6700576466598847,
"repo_name": "qpxu007/Flask-AppBuilder",
"id": "ba943972b14e68df09f34a5fd03bfccedc9b7912",
"size": "2949",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flask_appbuilder/security/mongoengine/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47287"
},
{
"name": "HTML",
"bytes": "79465"
},
{
"name": "JavaScript",
"bytes": "159467"
},
{
"name": "Python",
"bytes": "435820"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
}
|
import os
import sys
import pygame
from common import mvc
import textrect
import math
from common.constants import *
from client.constants import *
from common.util.rect import Rect
class Model(mvc.Model):
def __init__(self, theMap, isHost):
super(Model, self).__init__()
self.theMap = theMap
self.openEditor = False
self.sendNetMessage = False
self.starting = False
self.bg = pygame.Surface(SCREEN_SIZE)
self.bg.fill(CHARACTER_SELECT_BG_COLOR)
self.group = CharacterPanelGroup(self.theMap.numOfCharactersPerTeam())
self.currSelected = None
x = ((CHARACTER_SELECT_PANEL_SELECTION_BORDER_WIDTH * 2) +
CHARACTER_SELECT_PANEL_SIZE[0])
y = ((CHARACTER_SELECT_PANEL_SELECTION_BORDER_WIDTH * 2) +
CHARACTER_SELECT_PANEL_SIZE[1])
self.selectionBorder = pygame.Surface((x, y))
self.selectionBorder.fill(CHARACTER_SELECT_PANEL_SELECTION_BORDER_COLOR)
tempRect = Rect((0, 0), CHARACTER_SELECT_PLAYER_SIZE)
tempRect.bottom = SCREEN_SIZE[1] - CHARACTER_SELECT_GROUP_FROM_TOP
tempRect.centerx = SCREEN_SIZE[0] / 2
self.clientPanel = PlayerPanel(tempRect, "Client", self.group.num)
tempRect = Rect((0, 0), CHARACTER_SELECT_PLAYER_SIZE)
tempRect.bottom = (self.clientPanel.rect.top -
CHARACTER_SELECT_GROUP_SPACING)
tempRect.centerx = SCREEN_SIZE[0] / 2
self.hostPanel = PlayerPanel(tempRect, "Host", self.group.num)
if isHost:
self.myPanel = self.hostPanel
self.theirPanel = self.clientPanel
else:
self.myPanel = self.clientPanel
self.theirPanel = self.hostPanel
tempRect = Rect((0, 0), CHARACTER_SELECT_BUTTON_SIZE)
tempRect.left = self.myPanel.rect.right + CHARACTER_SELECT_GROUP_SPACING
tempRect.top = self.myPanel.rect.top
self.readyButton = Button(tempRect, "Ready")
if isHost:
tempRect = Rect((0, 0), CHARACTER_SELECT_BUTTON_SIZE)
tempRect.left = (self.theirPanel.rect.right +
CHARACTER_SELECT_GROUP_SPACING)
tempRect.top = self.theirPanel.rect.top
self.startButton = Button(tempRect, "Start")
else:
self.startButton = None
x = ((CHARACTER_SELECT_PANEL_SELECTION_BORDER_WIDTH * 2) +
CHARACTER_SELECT_BUTTON_SIZE[0])
y = ((CHARACTER_SELECT_PANEL_SELECTION_BORDER_WIDTH * 2) +
CHARACTER_SELECT_BUTTON_SIZE[1])
self.selectionBorderButton = pygame.Surface((x, y))
self.selectionBorderButton.fill(
CHARACTER_SELECT_PANEL_SELECTION_BORDER_COLOR)
self.loadingImage = INTERFACE_GRAPHICS[9]
self.loadingRect = Rect( (0, 0), self.loadingImage.get_size() )
self.loadingRect.center = ( SCREEN_SIZE[0] / 2, SCREEN_SIZE[1] / 2)
def update(self):
pass
def mouseMoved(self, pos):
test = False
for p in self.group.characterPanels:
if p.rect.collidepoint(pos):
self.currSelected = p
test = True
return
temp = [self.readyButton, self.startButton]
for b in temp:
if not b is None:
if b.enabled:
if b.rect.collidepoint(pos):
self.currSelected = b
test = True
return
if not test:
self.currSelected = None
def click(self):
if not self.currSelected is None:
if isinstance(self.currSelected, CharacterPanel):
if not self.myPanel.ready:
self.openEditor = True
self.sendNetMessage = True
elif (self.currSelected is self.readyButton):
if self.myPanel.ready:
self.myPanel.ready = False
self.myPanel.changeVal()
self.sendNetMessage = True
else:
if self.group.isMax():
self.myPanel.ready = True
self.myPanel.changeVal()
self.sendNetMessage = True
if not self.startButton is None:
self.startButton.setButton((self.myPanel.ready )
and (self.theirPanel.ready))
elif (self.currSelected is self.startButton):
if (self.myPanel.ready and self.theirPanel.ready):
self.starting = True
self.sendNetMessage = True
def click2(self):
if not self.currSelected is None:
if isinstance(self.currSelected, CharacterPanel):
if not self.myPanel.ready:
self.setCharacter(None)
self.sendNetMessage = True
def getSelectionBorder(self):
if isinstance(self.currSelected, Button):
border = self.selectionBorderButton
else:
border = self.selectionBorder
if self.currSelected is None:
loc = None
else:
x = (self.currSelected.rect.left -
CHARACTER_SELECT_PANEL_SELECTION_BORDER_WIDTH)
y = (self.currSelected.rect.top -
CHARACTER_SELECT_PANEL_SELECTION_BORDER_WIDTH)
loc = (x, y)
return border, loc
def setCharacter(self, c):
if not (self.alreadyUsed(c)):
self.currSelected.setCharacter(c)
num = self.group.getNumActive()
self.myPanel.changeVal(num)
self.readyButton.setButton(self.group.isMax())
def alreadyUsed(self, c):
if not c is None:
for p in self.group.characterPanels:
if not p.character is None:
if p.character.name == c.name:
return True
return False
def buildNetMessage(self):
if self.sendNetMessage:
self.sendNetMessage = False
msg = str(self.group.getNumActive())
if self.group.getNumActive() < 10:
msg = "0" + msg
if self.starting:
msg = msg + "s"
elif self.myPanel.ready:
msg = msg + "r"
else:
msg = msg + "0"
else:
msg = "-00"
if len(msg) != CHARACTER_SELECT_NET_MESSAGE_SIZE:
raise Exception()
return msg
def netMessageSize(self):
return CHARACTER_SELECT_NET_MESSAGE_SIZE
def parseNetMessage(self, msg, p):
if msg[0] != "-":
try:
num = int(msg[0:2])
except:
raise
if msg[2] == "r":
ready = True
elif msg[2] == "s":
self.starting = True
return
else:
ready = False
self.theirPanel.ready = ready
self.theirPanel.changeVal(num)
if not self.startButton is None:
self.startButton.setButton((self.myPanel.ready )
and (self.theirPanel.ready))
def getCharacters(self):
chars = []
for i in self.group.characterPanels:
chars.append(i.character)
return chars
def numEnemiesExpected(self):
return self.theirPanel.maxVal
def getCurrSelectedName(self):
if ((not self.currSelected is None) and
(not self.currSelected.character is None)):
return self.currSelected.character.name
else:
return None
class PlayerPanel(object):
def __init__(self, rect, name, maxVal):
self.rect = rect
self.name = name
self.ready = False
self.maxVal = maxVal
self.backPanel = pygame.Surface(self.rect.size)
self.backPanel.fill(CHARACTER_SELECT_PANEL_COLOR_FILL)
pygame.draw.rect(self.backPanel, CHARACTER_SELECT_PANEL_COLOR_BORDER,
(0 + CHARACTER_SELECT_PANEL_BORDER_SIZE,
0 + CHARACTER_SELECT_PANEL_BORDER_SIZE,
(self.rect.width -
(CHARACTER_SELECT_PANEL_BORDER_SIZE * 2)),
(self.rect.height -
(CHARACTER_SELECT_PANEL_BORDER_SIZE * 2)) ),
CHARACTER_SELECT_PANEL_BORDER_WIDTH)
self.changeVal(0)
def changeVal(self, val=None):
if not val is None:
self.val = val
self.panel = pygame.Surface(self.rect.size)
self.panel.blit(self.backPanel, (0,0))
font = CHARACTER_SELECTION_FONT
textHeight = font.get_linesize() + 2
tempRect = Rect( (0, 0), (self.rect.width, textHeight) )
text = textrect.render_textrect(self.name, font, tempRect,
CHARACTER_SELECTION_FONT_COLOR,
ALMOST_BLACK, 0, True)
tempRect = Rect((0, 0), text.get_size())
tempRect.top = ((CHARACTER_SELECT_PANEL_SIZE[1] / 2) -
(tempRect.height / 2))
loc = (tempRect.left + CHARACTER_SELECT_PANEL_BORDER_WIDTH +
CHARACTER_SELECT_PANEL_BORDER_SIZE,
tempRect.top)
self.panel.blit(text, loc)
if self.ready:
msg = "READY"
color = CHARACTER_SELECTION_READY_COLOR
else:
msg = str(self.val) + "/" + str(self.maxVal)
color = CHARACTER_SELECTION_FONT_COLOR
text = textrect.render_textrect(msg, font, tempRect,
color, ALMOST_BLACK, 2, True)
loc = (tempRect.left - CHARACTER_SELECT_PANEL_BORDER_WIDTH -
CHARACTER_SELECT_PANEL_BORDER_SIZE,
tempRect.top)
self.panel.blit(text, loc)
def draw(self, screen):
screen.blit(self.panel, self.rect.topleft)
class Button(object):
def __init__(self, rect, msg):
self.rect = rect
self.msg = msg
self.enabled = True
self.backPanel = pygame.Surface(self.rect.size)
self.backPanel.fill(CHARACTER_SELECT_PANEL_COLOR_FILL)
self.setButton(False)
def setButton(self, val):
if not (val == self.enabled):
self.enabled = val
self.panel = pygame.Surface(self.rect.size)
self.panel.blit(self.backPanel, (0,0))
if self.enabled:
color = CHARACTER_SELECTION_BUTTON_COLOR_ON
else:
color = CHARACTER_SELECTION_BUTTON_COLOR_OFF
font = CHARACTER_SELECTION_FONT
textHeight = font.get_linesize() + 2
tempRect = Rect( (0, 0), (self.rect.width, textHeight) )
text = textrect.render_textrect(self.msg, font, tempRect,
color,
ALMOST_BLACK, 1, True)
tempRect = Rect((0, 0), text.get_size())
tempRect.top = ((CHARACTER_SELECT_PANEL_SIZE[1] / 2) -
(tempRect.height / 2))
self.panel.blit(text, tempRect.topleft)
def draw(self, screen):
screen.blit(self.panel, self.rect.topleft)
class CharacterPanel(object):
def __init__(self, rect, character):
self.rect = rect
self.backPanel = pygame.Surface(self.rect.size)
self.backPanel.fill(CHARACTER_SELECT_PANEL_COLOR_FILL)
pygame.draw.rect(self.backPanel, CHARACTER_SELECT_PANEL_COLOR_BORDER,
(0 + CHARACTER_SELECT_PANEL_BORDER_SIZE,
0 + CHARACTER_SELECT_PANEL_BORDER_SIZE,
(self.rect.width -
(CHARACTER_SELECT_PANEL_BORDER_SIZE * 2)),
(self.rect.height -
(CHARACTER_SELECT_PANEL_BORDER_SIZE * 2)) ),
CHARACTER_SELECT_PANEL_BORDER_WIDTH)
self.setCharacter(None)
def setCharacter(self, c):
self.character = c
self.panel = pygame.Surface(CHARACTER_SELECT_PANEL_SIZE)
self.panel.blit(self.backPanel, (0, 0))
if not self.character is None:
font = CHARACTER_SELECTION_FONT
msg = self.character.name
textHeight = font.get_linesize() + 2
tempRect = Rect( (0, 0),
(CHARACTER_SELECT_PANEL_SIZE[0],
textHeight) )
text = textrect.render_textrect(msg, font, tempRect,
CHARACTER_SELECTION_FONT_COLOR,
ALMOST_BLACK, 1, True)
tempRect = Rect((0, 0), text.get_size())
tempRect.left = ((CHARACTER_SELECT_PANEL_SIZE[0] / 2) -
(tempRect.width / 2))
tempRect.top = ((CHARACTER_SELECT_PANEL_SIZE[1] / 2) -
(tempRect.height / 2))
self.panel.blit(text, tempRect.topleft)
class CharacterPanelGroup(object):
def __init__(self, num):
self.num = num
numOfCols = int(math.ceil(float(num) /
float(CHARACTER_SELECT_PANELS_PER_COL)))
numOfPanelsInCol = []
for i in range(numOfCols):
numOfPanelsInCol.append(0)
count = 0
currCol = 0
while count < num:
numOfPanelsInCol[currCol] += 1
count += 1
currCol += 1
if currCol == numOfCols:
currCol = 0
self.characterPanels = []
for i in range(len(numOfPanelsInCol)):
colMid = (SCREEN_SIZE[0] / (len(numOfPanelsInCol) + 1)) * (i+1)
for j in range(numOfPanelsInCol[i]):
rect = Rect((0, 0), CHARACTER_SELECT_PANEL_SIZE)
rect.centerx = colMid
rect.top = (CHARACTER_SELECT_GROUP_FROM_TOP +
((CHARACTER_SELECT_PANEL_SIZE[1] +
CHARACTER_SELECT_GROUP_SPACING)
* j))
self.characterPanels.append(CharacterPanel(rect, None))
def getNumActive(self):
count = 0
for i in self.characterPanels:
if not i.character is None:
count += 1
return count
def isMax(self):
return (self.getNumActive() == self.num)
def draw(self, screen):
for p in self.characterPanels:
screen.blit(p.panel, p.rect.topleft)
|
{
"content_hash": "206d53a8cb2e8b3a3c0689caf82a75f2",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 80,
"avg_line_length": 35.111638954869356,
"alnum_prop": 0.5305100798268164,
"repo_name": "Wopple/fimbulvetr",
"id": "a00f98e6b3dc77966962a214ec2f7049ff1e54f8",
"size": "14782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/client/characterSelect_m.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "621329"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
}
|
BASE_LANGUAGE = 'fr'
TARGET_LANGUAGE = 'de'
FILENAME = 'algorea.json'
TARGET_FILENAME = 'algorea.json.new'
NEW_TRANSLATIONS_FILE = 'translations_german.json'
import json, re
# Read target language translations
try:
available_translations = json.load(open('%s/%s' % (TARGET_LANGUAGE, FILENAME)))
except:
print("Warning: Couldn't read `%s/%s`." % (TARGET_LANGUAGE, FILENAME))
available_translations = {}
print("Loaded %d available_translations." % len(available_translations.keys()))
# Read new translations
new_translations = json.load(open(NEW_TRANSLATIONS_FILE, 'r'))
ntnb = 0
for item in new_translations[TARGET_LANGUAGE]:
if len(item) != 3:
print("Warning: item with wrong length: %s" % item)
continue
available_translations[item[0]] = item[2]
ntnb += 1
print("Loaded %d new_translations, %d total translations." % (ntnb, len(available_translations.keys())))
# Read original translations file and write new one
tr_regexp = re.compile(r'^\s*"([^"]+)"\s*:\s*')
target_file = open('%s/%s' % (TARGET_LANGUAGE, TARGET_FILENAME), 'w')
write_buffer = None # Buffer to check if we should put a comma or not
for l in open('%s/%s' % (BASE_LANGUAGE, FILENAME), 'r'):
tr_m = tr_regexp.match(l)
if tr_m:
# It's a translation line
line_base = tr_m.group(0)
key = tr_m.group(1)
if key in available_translations:
if write_buffer is not None:
# Complicated things just to know if we need a comma...
target_file.write(',\n')
target_file.write(write_buffer)
write_buffer = ''
new_line = '%s"%s"' % (line_base, available_translations[key].replace('"', '\\"'))
target_file.write(new_line)
write_buffer = ''
else:
print("Warning: key `%s` not found in available_translations." % key)
else:
# It's another line
l_strip = l.strip()
if l_strip == '{':
target_file.write(l)
elif l_strip == '}':
target_file.write('\n')
target_file.write(write_buffer)
write_buffer = ''
target_file.write(l)
else:
if l_strip != '':
print("Warning: line `%s` unrecognized; copying as-is." % l[:-1])
if write_buffer is None: write_buffer = ''
write_buffer += l
target_file.write(write_buffer)
target_file.close()
print("Done!")
|
{
"content_hash": "9a59afa1b20016b80e6be9e873c9fe62",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 104,
"avg_line_length": 35.15714285714286,
"alnum_prop": 0.5900040633888664,
"repo_name": "France-ioi/commonFramework",
"id": "4751aaefd71dc91e6dc4f566593d664271722ca7",
"size": "2751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i18n/write-translations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10952"
},
{
"name": "JavaScript",
"bytes": "95582"
},
{
"name": "PHP",
"bytes": "131612"
},
{
"name": "Python",
"bytes": "5575"
}
],
"symlink_target": ""
}
|
""" module containing command line interface implementation
and utilities.
"""
import argparse
from subprocess import call
import os
import json
from clamm import config, get_config_path, config_template
from clamm import util
def create_library_parsers(subps):
"""creates library sub-parsers
"""
lib_p = subps.add_parser(
"library",
help="""Commands for acting on each audio file in the library,
or a specified directory under the library.""")
lib_p.add_argument(
"-d", "--dir", type=str, default=config["path"]["library"],
help="""
the target directory (default: config['path']['library'])
""")
lib_subps = lib_p.add_subparsers(dest="sub_cmd")
# ACTION
lib_act_p = lib_subps.add_parser(
"action",
help="""
Apply one of the many small(er) library actions.
Actions can be chained together, as in
$ clamm library action --prune_artist_tags --synchronize_artist
""")
lib_act_p.add_argument("-k", "--key", help="tag key")
lib_act_p.add_argument("-v", "--val", help="tag value")
lib_act_p.add_argument(
"--prune_artist_tags", action="store_true",
help="""
Conform artist/albumartist tag key names by applying
config['library']['tags']['prune_artist'] rule.
e.g., ALBUMARTIST instead of ALBUM_ARTIST
""")
lib_act_p.add_argument(
"--recently_added", action="store_true",
help="""
Generate a recently_added playlist by looking at the
date of the parent directory.
""")
lib_act_p.add_argument(
"--remove_junk_tags", action="store_true",
help="""
Similar to prune_artist_tags, but indiscriminately
removes tags in config['library']['tags']['junk'].
""")
lib_act_p.add_argument(
"--change_tag_by_name", action="store_true",
help="""
globally change a single tag field, applied to a
directory or library. Can also be used to delete
a tag by name.
""")
lib_act_p.add_argument(
"--handle_composer_as_artist", action="store_true",
help="""
Test for and handle composer embedded in artist fields.
Background:
Many taggers/publishers deal with classical music
tags by lumping the composer in with the artist,
as in ARTIST=JS Bach; Glenn Gould
""")
lib_act_p.add_argument(
"--synchronize_artist", action="store_true",
help="""
Verify there is a corresponding entry in tags.json for
each artist found in the tag file. If an entry is not
found, user is prompted to add a new artist.
Find the arrangement that is best fit for a given file.
Finally, synchronize the file tags to the database.
""")
lib_act_p.add_argument(
"--synchronize_composer", action="store_true",
help="""
Verify there is a corresponding entry in tags.json for
the composer found in the tag file.
If an entry is not found, user is prompted to add a
new composer.
Finally, synchronize the file tags to the database.
""")
lib_act_p.add_argument(
"--get_artist_counts", action="store_true",
help="""
Update the occurence count of each artist in tags.json.
These are then used for ordering new arrangements.
""")
lib_act_p.add_argument(
"--get_arrangement_set", action="store_true",
help="""
get the set and counts of all instrumental groupings
via sorted arrangements
""")
lib_subps.add_parser(
"initialize",
help="""
Initialize a new folder / library by applying a sequence
of library actions.
""")
lib_subps.add_parser(
"synchronize",
help="""
synchronize the library file tags with the tags
database""")
lib_play_p = lib_subps.add_parser("playlist", help="")
lib_play_p.add_argument(
"-q", '--query', type=str, nargs='+',
help="""structure --> TAG_KEY TRACK_RELATION TAG_VALUE SET_OPERATOR
example --> ARRANGMENT contains guitar AND COMPOSER contains
BACH""")
def create_config_parsers(subps):
""" creates config sub-parsers
"""
config_p = subps.add_parser(
"config",
help="""
commands providing access to the configuration
""")
config_subps = config_p.add_subparsers(dest="sub_cmd")
config_subps.add_parser(
"init",
help="Copy the config template to config location")
config_subps.add_parser(
"edit",
help="edit the config.json file in $EDITOR")
config_subps.add_parser(
"show",
help="pretty print the current configuration to stdout")
def create_database_parsers(subps):
""" creates tag database sub-parsers
"""
db_p = subps.add_parser(
"tags", help="commands providing access to tag database")
db_subps = db_p.add_subparsers(dest="sub_cmd")
db_subps.add_parser(
"edit", help="edit the tags.json file in $EDITOR")
db_subps.add_parser(
"show", help="pretty print the tags.json file to stdout")
def create_stream_parsers(subps):
"""creates streams subparsers
"""
strm_p = subps.add_parser(
"streams",
help="""
commands for working with streams of audio data
""")
strm_subps = strm_p.add_subparsers(dest="sub_cmd")
strm_init_p = strm_subps.add_parser(
"listing",
help="""
utilize a listing.json file to create a batch of new streams
""")
strm_init_p.add_argument(
"-l", "--listing", type=str, default="templates/listing.json",
help="Path to listing.json specification.")
strm_trck_p = strm_subps.add_parser(
"tracks",
help="""
process a raw pcm stream to tagged album tracks
""")
strm_trck_p.add_argument(
"-s", "--streampath", type=str, default="",
help=" path to a raw pcm stream file ")
strm_strm_p = strm_subps.add_parser(
"stream",
help="""
combination of batch listing pcm stream generation and
iterative conversion of pcm streams to tagged tracks
""")
strm_strm_p.add_argument(
"-l", "--listing", type=str, default="listing.json",
help="Path to listing.json specification.")
strm_strm_p.add_argument(
"-s", "--streamfolder", type=str,
default=config["path"]["pcm"],
help="""
path to directory containing 1 or more pcm streams,
defaults to path given in config.json
""")
def parse_inputs():
"""populate a heirarchical argument parser
"""
# top-level
p = argparse.ArgumentParser(
prog="CLAMM",
description="""
CLAssical Music Manager
""")
subps = p.add_subparsers(dest="cmd")
# sub-levels
create_database_parsers(subps)
create_config_parsers(subps)
create_stream_parsers(subps)
create_library_parsers(subps)
return p
def tags_show(args):
"""Dump tags database to ``STDOUT``
"""
with open(config["path"]["database"]) as db:
tags = json.load(db)
print(json.dumps(tags, ensure_ascii=False, indent=4))
def tags_edit(args):
"""Open tag database in ``$EDITOR``
"""
call([os.environ["EDITOR"], config["path"]["database"]])
def config_init(args):
"""copy config template to ``$HOME/.config/clamm/config.json``
"""
call(['cp', config_template, get_config_path()])
def config_show(args):
"""Dump config.json to ``STDOUT``
"""
print(json.dumps(config, ensure_ascii=False, indent=4))
def config_edit(args):
"""Open config.json in ``$EDITOR``.
"""
call([os.environ["EDITOR"], get_config_path()])
def streams_tracks(args):
""" Calls :func:`~streams.stream2tracks` with ``streampath`` provided
at command line.
.. code-block:: bash
$ clamm streams initialize
"""
import clamm.streams
clamm.streams.stream2tracks(args.streampath)
def streams_listing(args):
""" Calls :func:`~streams.listing2streams` with ``listing`` provided
at command line.
.. code-block:: bash
$ clamm library initialize
"""
import clamm.streams
clamm.streams.listing2streams(args.listing)
def streams_stream(args):
""" Calls :func:`~streams.main`
"""
import clamm.streams
clamm.streams.main(args)
def library_action(args):
""" calls :func:`~clamm.audiolib.AudioLib.walker` with ``args`` provided
at command line.
Example
.. code-block:: bash
$ clamm library action --recently_added
"""
import clamm.audiolib
alib = clamm.audiolib.AudioLib(args)
funcdict = {q[0]: q[1] for q in args._get_kwargs()
if isinstance(q[1], bool)}
for funcname, flag in funcdict.items():
if flag:
util.printr(funcname)
alib.func = funcname
func = eval("alib.ltfa.{}".format(funcname))
alib.walker(func)
def library_initialize(args):
""" calls :func:`~clamm.audiolib.AudioLib.initialize` with ``args`` provided
at command line.
Example
.. code-block:: bash
$ clamm library initialize
"""
import clamm.audiolib
clamm.audiolib.AudioLib(args).initialize()
def library_synchronize(args):
""" calls :func:`~clamm.audiolib.AudioLib.synchronize` with ``args``
provided at command line.
Example
.. code-block:: bash
$ clamm library synchronize
"""
import clamm.audiolib
clamm.audiolib.AudioLib(args).synchronize()
def library_playlist(args):
""" calls :func:`~clamm.audiolib.AudioLib.playlist` with ``args``
provided at command line.
Example
.. code-block:: bash
$ clamm library playlist
"""
import clamm.audiolib
clamm.audiolib.AudioLib(args).playlist()
def main():
"""clamm entrance point.
Parses and executes the action specified by the command line inputs.
"""
args = parse_inputs().parse_args()
# retrieve the parsed cmd/sub/... and evaluate
full_cmd = "{}_{}".format(args.cmd, args.sub_cmd)
try:
functor = eval(full_cmd)
except NameError as ne:
util.printr("failed to parse the command {}...".format(full_cmd))
raise ne
util.printr("parsed and executing {}...".format(full_cmd))
functor(args)
|
{
"content_hash": "51456a2f85c4f3b474194fa3f48b9703",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 80,
"avg_line_length": 28.825065274151434,
"alnum_prop": 0.5782608695652174,
"repo_name": "p5a0u9l/clamm",
"id": "42d100345c0554827c9bf585b7fffc63e7a58c4d",
"size": "11040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clamm/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "688"
},
{
"name": "Jupyter Notebook",
"bytes": "1863872"
},
{
"name": "Matlab",
"bytes": "2169"
},
{
"name": "Python",
"bytes": "81435"
},
{
"name": "Ruby",
"bytes": "18339"
},
{
"name": "Shell",
"bytes": "1315"
}
],
"symlink_target": ""
}
|
import re
import copy
from collections.abc import Iterable
import zipfile
import numpy as np
import skrf
def debug_counter(n=-1):
count = 0
while count != n:
count += 1
yield count
def is_numeric(val):
try:
float(val)
return True
except ValueError:
return False
def has_duplicate_value(value, values, index):
"""
convenience function to check if there is another value of the current index in the list
Parameters
----------
value :
any value in a list
values : Iterable
the iterable containing the values
index : int
the index of the current item we are checking for
Returns
-------
bool,int
returns None if no duplicate found, or the index of the first found duplicate
"""
for i, val in enumerate(values):
if i == index:
continue
if value == val:
return i
return False
def unique_name(name, names, exclude=-1):
"""
pass in a name and a list of names, and increment with _## as necessary to ensure a unique name
Parameters
----------
name : str
the chosen name, to be modified if necessary
names : list
list of names (str)
exclude : int
the index of an item to be excluded from the search
"""
if not has_duplicate_value(name, names, exclude):
return name
else:
if re.match(r"_\d\d", name[-3:]):
name_base = name[:-3]
suffix = int(name[-2:])
else:
name_base = name
suffix = 1
for num in range(suffix, 100, 1):
name = f"{name_base:s}_{num:02d}"
if not has_duplicate_value(name, names, exclude):
break
return name
def trace_color_cycle(n=1000):
"""
:type n: int
:return:
"""
# TODO: make this list longer
lime_green = "#00FF00"
cyan = "#00FFFF"
magenta = "#FF00FF"
yellow = "#FFFF00"
pink = "#C04040"
blue = "#0000FF"
lavendar = "#FF40FF"
turquoise = "#00FFFF"
count = 0
colors = [yellow, cyan, magenta, lime_green, pink, blue, lavendar, turquoise]
num = len(colors)
while count < n:
yield colors[count % num]
count += 1
def snp_string(ntwk, comments=None):
"""
get the RI .snp touchstone file string, which we will use for saving with zip files
*** ONLY SUPPORTS 1 and 2-port networks
Parameters
----------
ntwk : skrf.Network
a one or two-port Network
comments : str, list
any comments to append to the top of the file
Returns
-------
str
"""
if type(comments) == str:
lines = comments.splitlines()
elif type(comments) == list:
lines = comments
elif type(comments) == tuple:
lines = list(comments)
elif comments:
raise TypeError("Must provide either a string, or a list of strings")
else:
lines = None
if lines:
for i, line in enumerate(lines):
if not line.startswith("!"):
lines[i] = "! " + line
s2p_comments = "\n".join(lines) + "\n"
else:
s2p_comments = ""
if ntwk.nports == 1:
S11 = ntwk.s[:, 0, 0]
rows = np.vstack((ntwk.f, S11.real, S11.imag)).T.tolist()
elif ntwk.nports == 2:
S11 = ntwk.s[:, 0, 0]
S21 = ntwk.s[:, 1, 0]
S12 = ntwk.s[:, 0, 1]
S22 = ntwk.s[:, 1, 1]
rows = np.vstack(
(ntwk.f, S11.real, S11.imag, S21.real, S21.imag, S12.real, S12.imag, S22.real, S22.imag)).T.tolist()
else:
raise ValueError("Network must be a two-port network")
return s2p_comments + "# Hz S RI R 50\n" + \
"\n".join(" ".join(f"{val:0.8g}" for val in row) for row in rows)
def network_from_zip(zipfid):
"""
convenience function to read zipinfo file into a network
Parameters
----------
zipfid : zipfile.ZipExtFile
Returns
-------
skrf.Network
"""
ntwk = skrf.Network()
ntwk.read_touchstone(zipfid)
return ntwk
def read_zipped_touchstones(ziparchive):
"""
similar to skrf.io.read_all_networks, which works for directories but only for touchstones in ziparchives
Parameters
----------
ziparchive : zipfile.ZipFile
Returns
-------
dict
"""
networks = dict()
fnames = [f.filename for f in ziparchive.filelist]
for fname in fnames:
if fname[-4:].lower() in (".s1p", ".s2p", ".s3p", ".s4p"):
with ziparchive.open(fname) as zfid:
network = network_from_zip(zfid)
networks[network.name] = network
return networks
|
{
"content_hash": "438dc6b7cba4edab0c663e3043a1a42e",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 112,
"avg_line_length": 24.278350515463917,
"alnum_prop": 0.5605095541401274,
"repo_name": "scikit-rf/scikit-rf",
"id": "3f70fca65d659b1fcfefe69bdb4e04833d9d759f",
"size": "4710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skrf_qtapps/skrf_qtwidgets/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6908"
},
{
"name": "Python",
"bytes": "1998676"
},
{
"name": "TypeScript",
"bytes": "1286336"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("feedback", "0002_auto_20150913_1904"),
]
operations = [
migrations.AlterField(
model_name="scheduleitemchoicefeedback",
name="value",
field=models.SmallIntegerField(db_index=True),
preserve_default=True,
),
migrations.AlterIndexTogether(
name="scheduleitemchoicefeedback",
index_together=set(
[("schedule_item", "value"), ("device", "schedule_item")]
),
),
]
|
{
"content_hash": "a3c0520573ae7f1345ea4207ffdd7982",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 26.36,
"alnum_prop": 0.575113808801214,
"repo_name": "pythonindia/junction",
"id": "7d7cbbebf001425df4c9a192cf338947af06b8c7",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "junction/feedback/migrations/0003_auto_20150913_2203.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190844"
},
{
"name": "HTML",
"bytes": "161794"
},
{
"name": "JavaScript",
"bytes": "49000"
},
{
"name": "Python",
"bytes": "379163"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
}
|
from django.contrib.gis.gdal import SpatialReference, CoordTransform, SRSException
from django.contrib.gis.geos import Point, GEOSGeometry
import math
class MapTools():
def __init__(self, mapTileSize=256):
EARTH_RADIUS = 6378137
# as in WGS84
# resolution at zoom 0 (?)
# 2r pi = umfang/perimeter in meters
# gives the number of tilesizeXtilesize tiles needed for earth radius
self.init_resolution = 2 * math.pi * EARTH_RADIUS / mapTileSize
# gives a resolution in meters per pixel
self.mapTileSize = mapTileSize
# shoft mercator origin to bottom left
# bottomleft of lnglat
BottomLeft_4326 = Point(-180, -85.05112878, srid=4326)
# bottomleft of mercator projection
BottomLeft_Mercator = self.point_ToMercator(BottomLeft_4326)
self.originshift_mercator = BottomLeft_Mercator
self.DEBUG = False
def point_ToMercator(self, point):
source_srid = point.srid
sourcecoord = SpatialReference(source_srid)
try:
mercator = SpatialReference("3857")
except SRSException:
try:
mercator = SpatialReference("900913")
except SRSException:
mercator = SpatialReference("3785")
trans = CoordTransform(sourcecoord, mercator)
point.transform(trans)
return point
def point_ToLatLng(self, point):
source_srid = SpatialReference(point.srid)
wsg84 = SpatialReference("4326")
trans = CoordTransform(source_srid, wsg84)
point.transform(trans)
return point
# "World" in this case means mercator values with shifted origin (to bottom left)
# the shift produces coordinates that are always > 0
# origin shifted to bottomleft
# unit is meters
def point_MercatorToWorld(self, point):
# set the origin of the mercator projected COordinate SYstem to bottom
# left
point.x = point.x - self.originshift_mercator.x
point.y = point.y - self.originshift_mercator.y
# google shifts to topleft. "google world coordinates" would be:
# wy = ORIGINSHIFT - merc_coords[1]
# and then calculating world to pixels at zoom 0 gives googles world coordinates
# as it is more common, we use bottomleft as cosy origin
return point
# meters/pixel depending on zoom level, 2**zoom gives the number of tiles in a row/column per zoom level:
# 2^0 = 1 tile, 2^1 = 2 tiles, 2^2 = 4tiles per row...
def resolution(self, zoom):
res = (self.init_resolution / (2 ** zoom))
return res
# convert meters into pixel count
def point_WorldToPixels(self, point, zoom):
# receives point in world coordinates and calculates floating point
# pixel coordinates
# resolution in meters/pixel
res = self.resolution(zoom)
point.x = point.x / res
point.y = point.y / res
if self.DEBUG:
print('resolution: %s , zoom: %s \n' % (res, zoom))
return point
def point_AnyToAny(self, point, source_srid, target_srid):
source_srid = SpatialReference(source_srid)
target_srid = SpatialReference(target_srid)
trans = CoordTransform(source_srid, target_srid)
point.transform(trans)
return point
def bounds_PixelToMercator(self, bounds, zoom):
res = self.resolution(zoom)
mleft = bounds['left'] * res + self.originshift_mercator.x
mtop = bounds['top'] * res + self.originshift_mercator.y
mright = bounds['right'] * res + self.originshift_mercator.x
mbottom = bounds['bottom'] * res + self.originshift_mercator.y
mbounds = {
'left': mleft, 'top': mtop, 'right': mright, 'bottom': mbottom}
return mbounds
# receives a point in pixel coordinates and returns the cellid according
# to the tilesize
def point_PixelToCellID(self, point, gridSize):
if self.DEBUG:
print('gridsize: %i' % gridSize)
print('x: %f' % point.x)
cellX = int(math.ceil(point.x / float(gridSize)) - 1)
cellY = int(math.ceil(point.y / float(gridSize)) - 1)
# cellX,cellY = self.maptiles.PixelsToTile(point.x, point.y)
if self.DEBUG:
print('cellX: %s' % cellX)
return [cellX, cellY]
# returns tile bounds in pixels
def cellIDToTileBounds(self, cellID, gridSize):
# x, y = cellID.split(',')
x = cellID[0]
y = cellID[1]
left = int(x) * gridSize # minx
bottom = int(y) * gridSize # miny
right = (int(x) + 1) * gridSize # maxx
top = (int(y) + 1) * gridSize # maxy
if self.DEBUG:
print('pixelbounds for cell %s%s: left: %s, top: %s, right: %s, bottom: %s' % (
cellID[0], cellID[1], left, top, right, bottom))
pixelbounds = {
'left': left, 'top': top, 'right': right, 'bottom': bottom}
return pixelbounds
'''
VIEWPORT
--------------- ----------------
| A| |cdefghijklmnoA|
| | -----> |pqrstuvwxyz123|
|B | |B4567890CDEFGH|
--------------- ---------------
Given the topright and bottom left Cell IDs, for example A(10,2) and B(15,1) calculate all cells spanned by A and B
Possibility:
---------------
|xxxA xM|
|xxxx xx|
|Lxxx Bx|
---------------
convert this into two rectangles AL + MB
'''
def calculate_ClusterCells(self, rectangleList):
clusterCells = []
for rect in rectangleList:
max_x = max(rect["topright"][0], rect["bottomleft"][0])
max_y = max(rect["topright"][1], rect["bottomleft"][1])
min_x = min(rect["topright"][0], rect["bottomleft"][0])
min_y = min(rect["topright"][1], rect["bottomleft"][1])
for x in range(min_x, max_x + 1, 1):
for y in range(min_y, max_y + 1, 1):
cell = (x,y)
# cell = '%s,%s' % (x, y)
clusterCells.append(cell)
return clusterCells
def getClusterCells(self, toprightCellID, bottomleftCellID, zoom):
if toprightCellID[0] >= bottomleftCellID[0]:
clusterCells = self.calculate_ClusterCells(
[{"topright": toprightCellID, "bottomleft": bottomleftCellID}])
else:
# topright is left of bottomleft
bottomleftEdgeCellID = [0, bottomleftCellID[1]]
cellMax = (2 ** zoom) - 1
toprightEdgeCellID = [cellMax, toprightCellID[1]]
clusterCells = self.calculate_ClusterCells([{"topright": toprightCellID, "bottomleft": bottomleftEdgeCellID}, {
"topright": toprightEdgeCellID, "bottomleft": bottomleftCellID}])
return clusterCells
def clusterCellToBounds(self, cell, zoom, gridSize, srid):
bounds = []
pixelbounds = self.cellIDToTileBounds(cell, gridSize)
mercatorbounds = self.bounds_PixelToMercator(pixelbounds, zoom)
# convert mercatorbounds to latlngbounds
cell_topright = Point(mercatorbounds['right'], mercatorbounds['top'], srid=3857)
cell_bottomleft = Point(mercatorbounds['left'], mercatorbounds['bottom'], srid=3857)
# if it is not a latlng database, convert the polygons
if srid != 3857:
self.point_AnyToAny(cell_topright, 3857, srid)
self.point_AnyToAny(cell_bottomleft, 3857, srid)
poly_string = self.bounds_ToPolyString({'top': cell_topright.y, 'right': cell_topright.x,
'bottom': cell_bottomleft.y, 'left': cell_bottomleft.x})
return poly_string
def getClusterCellsAsPolyList(self, toprightCellID, bottomleftCellID, zoom, gridSize, srid):
cells_as_poly = []
cells_as_keys = self.getClusterCells(toprightCellID, bottomleftCellID, zoom)
for cell in cells_as_keys:
poly_string = self.clusterCellToBounds(cell, zoom, gridSize, srid)
poly = GEOSGeometry(poly_string, srid=srid)
cells_as_poly.append(poly)
return cells_as_poly
# bounds -> points -> poly (5 points as starting and end point are the
# same)
def bounds_ToPolyString(self, bounds):
# a->b->c->d->a
poly = 'POLYGON((%f %f, %f %f, %f %f, %f %f, %f %f))' % (bounds['right'], bounds['top'],
bounds['right'], bounds[
'bottom'],
bounds['left'], bounds[
'bottom'],
bounds['left'], bounds[
'top'],
bounds['right'], bounds['top'])
return poly
# this one needs points in 3758, 900913 or 3857
def points_calcPixelDistance(self, pointA, pointB, zoom):
usable_srids = [3758, 900913, 3857]
if pointA.srid not in usable_srids:
pointA.transform(3857)
if pointB.srid not in usable_srids:
pointB.transform(3857)
# calc distance in meters
distance_m = math.sqrt(
(pointA.x - pointB.x) ** 2 + (pointA.y - pointB.y) ** 2)
# convert this to pixeldistance
res = self.resolution(zoom)
distance_p = distance_m / res
return int(distance_p)
def getCellIDForPoint(self, point_lnglat, zoom, gridSize):
if point_lnglat.srid != 4326:
point_lnglat.transform(4326)
point_mercator = self.point_ToMercator(point_lnglat)
point_world = self.point_MercatorToWorld(point_mercator)
point_pixels = self.point_WorldToPixels(point_world, zoom)
cellid = self.point_PixelToCellID(point_pixels, gridSize)
return cellid
def getCellForPointAsGeos(self, point, zoom, gridSize, srid):
point = point.clone()
cell_id = self.getCellIDForPoint(point, zoom, gridSize)
poly_string = self.clusterCellToBounds(cell_id, zoom, gridSize, srid)
poly = GEOSGeometry(poly_string, srid=srid)
return poly
|
{
"content_hash": "59887d3c698c292c1bbf3c7f6cd7c500",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 123,
"avg_line_length": 33.733333333333334,
"alnum_prop": 0.5653115000941088,
"repo_name": "biodiv/anycluster",
"id": "3e2f99c7567921604af5219994a7b5892295c349",
"size": "10626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "anycluster/MapTools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9381"
},
{
"name": "CSS",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "148"
},
{
"name": "JavaScript",
"bytes": "54882"
},
{
"name": "Makefile",
"bytes": "645"
},
{
"name": "Perl",
"bytes": "1437"
},
{
"name": "Python",
"bytes": "131121"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_block_diag as block_diag
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as lower_triangular
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
def _block_diag_dense(expected_shape, blocks):
"""Convert a list of blocks, into a dense block diagonal matrix."""
rows = []
num_cols = 0
for block in blocks:
# Get the batch shape for the block.
batch_row_shape = array_ops.shape(block)[:-1]
zeros_to_pad_before_shape = array_ops.concat(
[batch_row_shape, [num_cols]], axis=-1)
zeros_to_pad_before = array_ops.zeros(
shape=zeros_to_pad_before_shape, dtype=block.dtype)
num_cols += array_ops.shape(block)[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
zeros_to_pad_after_shape, dtype=block.dtype)
rows.append(array_ops.concat(
[zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
return array_ops.concat(rows, axis=-2)
class SquareLinearOperatorBlockDiagTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
@property
def _operator_build_infos(self):
build_info = linear_operator_test_util.OperatorBuildInfo
return [
build_info((0, 0)),
build_info((1, 1)),
build_info((1, 3, 3)),
build_info((5, 5), blocks=[(2, 2), (3, 3)]),
build_info((3, 7, 7), blocks=[(1, 2, 2), (3, 2, 2), (1, 3, 3)]),
build_info((2, 1, 5, 5), blocks=[(2, 1, 2, 2), (1, 3, 3)]),
]
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
expected_blocks = (
build_info.__dict__["blocks"] if "blocks" in build_info.__dict__
else [shape])
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True)
for block_shape in expected_blocks
]
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in matrices]
operator = block_diag.LinearOperatorBlockDiag(
[linalg.LinearOperatorFullMatrix(
l,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
for l in lin_op_matrices])
# Should be auto-set.
self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(build_info.shape)
matrices = linear_operator_util.broadcast_matrix_batch_dims(matrices)
block_diag_dense = _block_diag_dense(expected_shape, matrices)
if not use_placeholder:
block_diag_dense.set_shape(
expected_shape[:-2] + [expected_shape[-1], expected_shape[-1]])
return operator, block_diag_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = block_diag.LinearOperatorBlockDiag(
[linalg.LinearOperatorFullMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_block_diag_cholesky_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_diag.LinearOperatorBlockDiag(
[
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_self_adjoint=True,
),
],
is_positive_definite=True,
is_self_adjoint=True,
)
cholesky_factor = operator.cholesky()
self.assertTrue(isinstance(
cholesky_factor,
block_diag.LinearOperatorBlockDiag))
self.assertEqual(2, len(cholesky_factor.operators))
self.assertTrue(
isinstance(
cholesky_factor.operators[0],
lower_triangular.LinearOperatorLowerTriangular)
)
self.assertTrue(
isinstance(
cholesky_factor.operators[1],
lower_triangular.LinearOperatorLowerTriangular)
)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = block_diag.LinearOperatorBlockDiag(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
block_diag.LinearOperatorBlockDiag(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorFullMatrix(matrix, name="right")
operator = block_diag.LinearOperatorBlockDiag([operator_1, operator_2])
self.assertEqual("left_ds_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
block_diag.LinearOperatorBlockDiag(operators)
def test_non_square_operator_raises(self):
operators = [
linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False),
linalg.LinearOperatorFullMatrix(rng.rand(3, 3))
]
with self.assertRaisesRegexp(ValueError, "square matrices"):
block_diag.LinearOperatorBlockDiag(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegexp(ValueError, "non-empty"):
block_diag.LinearOperatorBlockDiag([])
if __name__ == "__main__":
test.main()
|
{
"content_hash": "5d55afa7fa7a9c4b7ac7e1a693a8d7a3",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 93,
"avg_line_length": 35.40096618357488,
"alnum_prop": 0.6615720524017468,
"repo_name": "hehongliang/tensorflow",
"id": "f0cc5d709f9bfec2e3dcfadecc8f949bb6ce6e6d",
"size": "8018",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194748"
},
{
"name": "C++",
"bytes": "26947133"
},
{
"name": "CMake",
"bytes": "174938"
},
{
"name": "Go",
"bytes": "908627"
},
{
"name": "Java",
"bytes": "323804"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "249901"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "22872386"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336334"
}
],
"symlink_target": ""
}
|
"""Run doctests for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import textwrap
import tensorflow as tf
import numpy as np
from absl import flags
from absl.testing import absltest
import tensorflow_estimator.python.estimator.estimator_lib as tfe
import tensorflow.compat.v2 as tf
tf.estimator = tfe
tf.compat.v1.enable_v2_behavior()
# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest # pylint: disable=g-import-not-at-top, g-bad-import-order
FLAGS = flags.FLAGS
flags.DEFINE_string('module', None, 'A specific module to run doctest on.')
flags.DEFINE_boolean('list', None,
'List all the modules in the core package imported.')
flags.DEFINE_string('file', None, 'A specific file to run doctest on.')
flags.mark_flags_as_mutual_exclusive(['module', 'file'])
flags.mark_flags_as_mutual_exclusive(['list', 'file'])
PACKAGE = 'tensorflow_estimator.python.'
def find_modules():
"""Finds all the modules in the core package imported.
Returns:
A list containing all the modules in tensorflow.python.
"""
tf_modules = []
for name, module in sys.modules.items():
if name.startswith(PACKAGE):
tf_modules.append(module)
return tf_modules
def filter_on_submodules(all_modules, submodule):
"""Filters all the modules based on the module flag.
The module flag has to be relative to the core package imported.
For example, if `submodule=keras.layers` then, this function will return
all the modules in the submodule.
Args:
all_modules: All the modules in the core package.
submodule: Submodule to filter from all the modules.
Returns:
All the modules in the submodule.
"""
filtered_modules = [
mod for mod in all_modules if PACKAGE + submodule in mod.__name__
]
return filtered_modules
def get_module_and_inject_docstring(file_path):
"""Replaces the docstring of the module with the changed file's content.
Args:
file_path: Path to the file
Returns:
A list containing the module changed by the file.
"""
file_path = os.path.abspath(file_path)
mod_index = file_path.find(PACKAGE.replace('.', os.sep))
file_mod_name, _ = os.path.splitext(file_path[mod_index:])
file_module = sys.modules[file_mod_name.replace(os.sep, '.')]
with open(file_path, 'r') as f:
content = f.read()
file_module.__doc__ = content
return [file_module]
class TfTestCase(tf.test.TestCase):
def set_up(self, test):
self.setUp()
def tear_down(self, test):
self.tearDown()
class CustomOutputChecker(doctest.OutputChecker):
"""Changes the `want` and `got` strings.
This allows it to be customized before they are compared.
"""
ID_RE = re.compile(r'\bid=(\d+)\b')
ADDRESS_RE = re.compile(r'\bat 0x[0-9a-f]*?>')
def check_output(self, want, got, optionflags):
# Replace tf.Tensor's id with ellipsis(...) because tensor's id can change
# on each execution. Users may forget to use ellipsis while writing
# examples in docstrings, so replacing the id with `...` makes it safe.
want = self.ID_RE.sub('id=...', want)
want = self.ADDRESS_RE.sub('at ...>', want)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
_MESSAGE = textwrap.dedent("""\n
#############################################################
Check the documentation
(go/testable-docstrings) on how to write testable docstrings.
#############################################################""")
def output_difference(self, example, got, optionflags):
got = got + self._MESSAGE
return doctest.OutputChecker.output_difference(self, example, got,
optionflags)
def load_tests(unused_loader, tests, unused_ignore):
"""Loads all the tests in the docstrings and runs them."""
tf_modules = find_modules()
if FLAGS.module:
tf_modules = filter_on_submodules(tf_modules, FLAGS.module)
if FLAGS.list:
print('**************************************************')
for mod in tf_modules:
print(mod.__name__)
print('**************************************************')
return tests
if FLAGS.file:
tf_modules = get_module_and_inject_docstring(FLAGS.file)
for module in tf_modules:
testcase = TfTestCase()
tests.addTests(
doctest.DocTestSuite(
module,
test_finder=doctest.DocTestFinder(exclude_empty=False),
extraglobs={
'tf': tf,
'np': np,
'os': os
},
setUp=testcase.set_up,
tearDown=testcase.tear_down,
checker=CustomOutputChecker(),
optionflags=(doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_BLANKLINE),
))
return tests
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "1060d44ef4f8bc6e2fca034ad8c23f93",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 78,
"avg_line_length": 29.039772727272727,
"alnum_prop": 0.6325572295049893,
"repo_name": "tensorflow/estimator",
"id": "a3cecab84cdd0822b117abb022679a3ddf615ce7",
"size": "5800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_estimator/python/estimator/tf_estimator_doctest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11293"
},
{
"name": "Python",
"bytes": "3919795"
},
{
"name": "Shell",
"bytes": "4038"
},
{
"name": "Starlark",
"bytes": "86773"
}
],
"symlink_target": ""
}
|
try:
from urllib.parse import urlparse # Py3
except ImportError:
from urlparse import urlparse
from holmes.utils import is_valid, _
from holmes.validators.base import Validator
class LinkWithRelCanonicalValidator(Validator):
@classmethod
def get_violation_definitions(cls):
return {
'absent.meta.canonical': {
'title': _('Link with rel="canonical" not found'),
'description': _(
'As can be seen in this page '
'<a href="https://support.google.com/webmasters/answer/'
'139394?hl=en">About rel="canonical"</a>, it\'s a good '
'practice to include rel="canonical" urls in the pages '
'for your website.'
),
'category': _('SEO'),
'generic_description': _(
'Validates the absent of link with rel="canonical" on '
'the head of a page. This indicates the preferred URL '
'to use to access the green dress post, so that the '
'search results will be more likely to show users '
'that URL structure.'
),
'unit': 'boolean'
}
}
@classmethod
def get_default_violations_values(cls, config):
return {
'absent.meta.canonical': {
'value': config.FORCE_CANONICAL,
'description': config.get_description('FORCE_CANONICAL')
}
}
def validate(self):
force_canonical = self.get_violation_pref('absent.meta.canonical')
if not force_canonical:
# Only pages with query string parameters
if self.page_url:
if not is_valid(self.page_url):
return
if not urlparse(self.page_url).query:
return
head = self.get_head()
if head:
canonical = [item for item in head if item.get('rel') == 'canonical']
if not canonical:
self.add_violation(
key='absent.meta.canonical',
value=None,
points=30
)
def get_head(self):
return self.review.data.get('page.head', None)
|
{
"content_hash": "978142d7abc6f63bb4ff9ee0e3cd5c05",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 81,
"avg_line_length": 33.95652173913044,
"alnum_prop": 0.5121638924455826,
"repo_name": "holmes-app/holmes-api",
"id": "437eff4e0c924617949e1f397c3da6537877ed06",
"size": "2405",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "holmes/validators/link_with_rel_canonical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "212454"
},
{
"name": "Makefile",
"bytes": "11334"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "809395"
}
],
"symlink_target": ""
}
|
import logging
import os
from django.core.management import BaseCommand
from django.utils.text import slugify
from emgapi import models as emg_models
from ..lib.genome_util import sanity_check_genome_output, \
sanity_check_catalogue_dir, find_genome_results, \
get_genome_result_path, \
read_tsv_w_headers, read_json, \
apparent_accession_of_genome_dir
logger = logging.getLogger(__name__)
cog_cache = {}
ipr_cache = {}
class Command(BaseCommand):
obj_list = list()
results_directory = None
genome_folders = None
catalogue_obj = None
catalogue_dir = None
database = None
def add_arguments(self, parser):
parser.add_argument('results_directory', action='store', type=str, )
parser.add_argument('catalogue_directory', action='store', type=str,
help='The folder within `results_directory` where the results files are. '
'e.g. "genomes/skin/1.0/"')
parser.add_argument('catalogue_name', action='store', type=str,
help='The name of this catalogue (without any version label), e.g. "Human Skin"')
parser.add_argument('catalogue_version', action='store', type=str,
help='The version label. E.g. "1.0" or "2021-01"')
parser.add_argument('gold_biome', action='store', type=str,
help="Primary biome for the catalogue, as a GOLD lineage. "
"E.g. root:Host-Associated:Human:Digestive\\ System:Large\\ intestine")
parser.add_argument('--database', type=str,
default='default')
def handle(self, *args, **options):
self.results_directory = os.path.realpath(options.get('results_directory').strip())
if not os.path.exists(self.results_directory):
raise FileNotFoundError('Results dir {} does not exist'
.format(self.results_directory))
catalogue_name = options['catalogue_name'].strip()
version = options['catalogue_version'].strip()
catalogue_dir = options['catalogue_directory'].strip()
gold_biome = options['gold_biome'].strip()
self.catalogue_dir = os.path.join(self.results_directory, catalogue_dir)
self.database = options['database']
self.catalogue_obj = self.get_catalogue(catalogue_name, version, gold_biome, catalogue_dir)
logger.info("CLI %r" % options)
genome_dirs = find_genome_results(self.catalogue_dir)
logger.info(
'Found {} genome dirs to upload'.format(len(genome_dirs)))
[sanity_check_genome_output(d) for d in genome_dirs]
sanity_check_catalogue_dir(self.catalogue_dir)
for d in genome_dirs:
self.upload_dir(d)
self.upload_catalogue_files()
self.catalogue_obj.calculate_genome_count()
self.catalogue_obj.save()
def get_catalogue(self, catalogue_name, catalogue_version, gold_biome, catalogue_dir):
logging.warning('GOLD')
logging.warning(gold_biome)
biome = self.get_gold_biome(gold_biome)
catalogue, _ = emg_models.GenomeCatalogue.objects \
.using(self.database) \
.get_or_create(
catalogue_id=slugify('{0}-v{1}'.format(catalogue_name, catalogue_version).replace('.', '-')),
defaults={
'version': catalogue_version,
'name': '{0} v{1}'.format(catalogue_name, catalogue_version),
'biome': biome,
'result_directory': catalogue_dir
})
return catalogue
def upload_dir(self, directory):
logger.info('Uploading dir: {}'.format(directory))
genome, has_pangenome = self.create_genome(directory)
self.upload_cog_results(genome, directory)
self.upload_kegg_class_results(genome, directory)
self.upload_kegg_module_results(genome, directory)
self.upload_antismash_geneclusters(genome, directory)
self.upload_genome_files(genome, directory, has_pangenome)
def get_gold_biome(self, lineage):
biome = emg_models.Biome.objects.using(self.database).filter(lineage__iexact=lineage).first()
if not biome:
raise emg_models.Biome.DoesNotExist()
return biome
def get_or_create_genome_set(self, setname):
return emg_models.GenomeSet.objects.using(self.database).get_or_create(name=setname)[0]
def prepare_genome_data(self, genome_dir):
d = read_json(os.path.join(genome_dir, f'{apparent_accession_of_genome_dir(genome_dir)}.json'))
has_pangenome = 'pangenome' in d
d['biome'] = self.get_gold_biome(d['gold_biome'])
d['genome_set'] = self.get_or_create_genome_set(d.get('genome_set', 'NCBI'))
if has_pangenome:
d.update(d['pangenome'])
del d['pangenome']
d.setdefault('num_genomes_total', 1)
if 'num_genomes_non_redundant' in d:
del d['num_genomes_non_redundant']
if 'geographic_origin' in d:
d['geo_origin'] = self.get_geo_location(d['geographic_origin'])
del d['geographic_origin']
del d['gold_biome']
return d, has_pangenome
def get_geo_location(self, location):
return emg_models.GeographicLocation \
.objects.using(self.database).get_or_create(name=location)[0]
def attach_geo_location(self, genome, location):
genome.pangenome_geographic_range.add(self.get_geo_location(location))
def create_genome(self, genome_dir):
data, has_pangenome = self.prepare_genome_data(genome_dir)
geo_locations = data.get('geographic_range')
data.pop('geographic_range', None)
data.pop('genome_accession', None)
data['result_directory'] = get_genome_result_path(genome_dir)
data['catalogue'] = self.catalogue_obj
g, created = emg_models.Genome.objects.using(self.database).update_or_create(
accession=data['accession'],
defaults=data)
g.save(using=self.database)
if geo_locations:
[self.attach_geo_location(g, l) for l in geo_locations]
return g, has_pangenome
def upload_cog_results(self, genome, d):
genome_cogs = os.path.join(d, 'genome', f'{genome.accession}_cog_summary.tsv')
self.upload_cog_result(genome, genome_cogs)
logger.info('Loaded Genome COG for {}'.format(genome.accession))
def upload_cog_result(self, genome, f):
counts = read_tsv_w_headers(f)
for cc in counts:
self.upload_cog_count(genome, cc)
logger.info('Loaded Genome COG for {}'.format(genome.accession))
def upload_cog_count(self, genome, cog_count):
c_name = cog_count['COG_category']
cog = self.get_cog_cat(c_name)
count_val = int(cog_count['Counts'])
defaults = {'genome_count': 0}
count, created = emg_models.GenomeCogCounts.objects \
.using(self.database) \
.get_or_create(genome=genome,
cog=cog,
defaults=defaults)
count.genome_count = count_val
count.save(using=self.database)
def get_cog_cat(self, c_name):
return emg_models.CogCat.objects.using(self.database) \
.get_or_create(name=c_name)[0]
def upload_kegg_class_results(self, genome, d):
genome_kegg_classes = os.path.join(d, 'genome', f'{genome.accession}_kegg_classes.tsv')
self.upload_kegg_class_result(genome, genome_kegg_classes)
logger.info(
'Loaded Genome KEGG classes for {}'.format(genome.accession))
def upload_kegg_class_result(self, genome, f):
kegg_matches = read_tsv_w_headers(f)
for kegg_match in kegg_matches:
self.upload_kegg_class_count(genome, kegg_match)
def get_kegg_class(self, kegg_cls_id):
return emg_models.KeggClass.objects.using(self.database) \
.get_or_create(class_id=kegg_cls_id)[0]
def upload_kegg_class_count(self, genome, kegg_match):
kegg_id = kegg_match['KEGG_class']
kegg_class = self.get_kegg_class(kegg_id)
count_val = int(kegg_match['Counts'])
defaults = {'genome_count': 0}
count, created = emg_models.GenomeKeggClassCounts.objects \
.using(self.database) \
.get_or_create(genome=genome,
kegg_class=kegg_class,
defaults=defaults)
count.genome_count = count_val
count.save(using=self.database)
def upload_kegg_module_results(self, genome, d):
genome_kegg_modules = os.path.join(d, 'genome', f'{genome.accession}_kegg_modules.tsv')
self.upload_kegg_module_result(genome, genome_kegg_modules)
logger.info(
'Loaded Genome KEGG modules for {}'.format(genome.accession))
def upload_kegg_module_result(self, genome, f):
kegg_matches = read_tsv_w_headers(f)
for kegg_match in kegg_matches:
self.upload_kegg_module_count(genome, kegg_match)
def get_kegg_module(self, name):
return emg_models.KeggModule.objects.using(self.database) \
.get_or_create(name=name)[0]
def upload_kegg_module_count(self, genome, kegg_match):
kegg_module_id = kegg_match['KEGG_module']
kegg_module = self.get_kegg_module(kegg_module_id)
count_val = int(kegg_match['Counts'])
defaults = {'genome_count': 0}
count, created = emg_models.GenomeKeggModuleCounts.objects \
.using(self.database) \
.get_or_create(genome=genome,
kegg_module=kegg_module,
defaults=defaults)
count.genome_count = count_val
count.save(using=self.database)
def upload_antismash_geneclusters(self, genome, directory):
"""Upload AS results in the DB
"""
file = os.path.join(directory, 'genome', 'geneclusters.txt')
if not os.path.exists(file):
logger.warning('Genome {} does not have antiSMASH geneclusters'.format(genome.accession))
return
with open(file, 'rt') as tsv:
for row in tsv:
*_, cluster, features, _ = row.split('\t')
as_cluster, _ = emg_models.AntiSmashGC.objects \
.using(self.database) \
.get_or_create(name=cluster)
count_val = len(features.split(';')) if len(features) else 0
model, _ = emg_models.GenomeAntiSmashGCCounts.objects \
.using(self.database) \
.get_or_create(genome=genome, antismash_genecluster=as_cluster,
genome_count=count_val)
model.save(using=self.database)
logger.info(
'Loaded Genome AntiSMASH geneclusters for {}'.format(genome.accession))
def upload_genome_files(self, genome, directory, has_pangenome):
logger.info('Uploading genome files...')
self.upload_genome_file(genome, directory, 'Predicted CDS (aa)', 'fasta',
genome.accession + '.faa', 'Genome analysis', 'genome', True)
self.upload_genome_file(genome, directory, 'Nucleic Acid Sequence', 'fasta',
genome.accession + '.fna', 'Genome analysis', 'genome', True)
self.upload_genome_file(genome, directory, 'Nucleic Acid Sequence index', 'fai',
genome.accession + '.fna.fai', 'Genome analysis', 'genome', True, )
self.upload_genome_file(genome, directory, 'Genome Annotation', 'gff',
genome.accession + '.gff', 'Genome analysis', 'genome', True)
self.upload_genome_file(genome, directory, 'Genome antiSMASH Annotation', 'gff',
genome.accession + '_antismash.gff', 'Genome analysis', 'genome', False)
self.upload_genome_file(genome, directory, 'EggNog annotation', 'tsv',
genome.accession + '_eggNOG.tsv', 'Genome analysis', 'genome', False)
self.upload_genome_file(genome, directory, 'InterProScan annotation', 'tsv',
genome.accession + '_InterProScan.tsv', 'Genome analysis', 'genome', False)
self.upload_genome_file(genome, directory, 'Genome rRNA Sequence', 'fasta',
genome.accession + '_rRNAs.fasta', 'Genome analysis', 'genome', False)
if has_pangenome:
self.upload_genome_file(genome, directory, 'Pangenome core genes list', 'tab',
'core_genes.txt', 'Pan-Genome analysis', 'pan-genome', False)
self.upload_genome_file(genome, directory, 'Pangenome DNA sequence', 'fasta',
'pan-genome.fna', 'Pan-Genome analysis', 'pan-genome', False)
self.upload_genome_file(genome, directory,
'Gene Presence / Absence matrix',
'tsv', 'gene_presence_absence.Rtab', 'Pan-Genome analysis', 'pan-genome', False)
self.upload_genome_file(genome, directory,
'Pairwise Mash distances of conspecific genomes',
'nwk', 'mashtree.nwk', 'Pan-Genome analysis', 'pan-genome', False)
def prepare_file_upload(self, desc_label, file_format, filename, group_name=None, subdir_name=None):
obj = {}
desc = emg_models.DownloadDescriptionLabel \
.objects.using(self.database) \
.filter(description_label__iexact=desc_label) \
.first()
obj['description'] = desc
if desc is None:
logger.error('Desc_label missing: {0}'.format(desc_label))
quit()
fmt = emg_models.FileFormat \
.objects.using(self.database) \
.filter(format_extension=file_format, compression=False) \
.first()
obj['file_format'] = fmt
name = os.path.basename(filename)
obj['realname'] = name
obj['alias'] = name
if group_name:
group = emg_models.DownloadGroupType \
.objects.using(self.database) \
.filter(group_type__iexact=group_name) \
.first()
obj['group_type'] = group
if subdir_name:
subdir = emg_models.DownloadSubdir \
.objects.using(self.database) \
.filter(subdir=subdir_name) \
.first()
obj['subdir'] = subdir
return obj
def upload_genome_file(self, genome, directory, desc_label, file_format, filename, group_type, subdir,
require_existent_and_non_empty):
defaults = self.prepare_file_upload(desc_label, file_format, filename, group_type, subdir)
path = os.path.join(directory, subdir, filename)
if not (os.path.isfile(path) and os.path.getsize(path) > 0):
if require_existent_and_non_empty:
raise FileNotFoundError(f"Required file at {path} either missing or empty")
else:
logger.warning(f"File not found or empty at {path}. This is allowable, but will not be uploaded.")
return
emg_models.GenomeDownload.objects.using(self.database).update_or_create(genome=genome,
alias=defaults['alias'],
defaults=defaults)
def upload_catalogue_files(self):
self.upload_catalogue_file(self.catalogue_obj,
'Phylogenetic tree of catalogue genomes',
'json',
'phylo_tree.json')
def upload_catalogue_file(self, catalogue, desc_label, file_format, filename):
defaults = self.prepare_file_upload(desc_label, file_format, filename, None, None)
emg_models.GenomeCatalogueDownload.objects.using(self.database).update_or_create(
genome_catalogue=catalogue,
alias=defaults['alias'],
defaults=defaults)
|
{
"content_hash": "a7762b5b5161fe1cf72eb532d4f333ed",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 116,
"avg_line_length": 43.630319148936174,
"alnum_prop": 0.5869551965864066,
"repo_name": "EBI-Metagenomics/emgapi",
"id": "09c8babb87de845302b37bd7a356fc8b4544f605",
"size": "16405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emgapianns/management/commands/import_genomes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1971"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Go",
"bytes": "96319"
},
{
"name": "HTML",
"bytes": "50709"
},
{
"name": "JavaScript",
"bytes": "152539"
},
{
"name": "Python",
"bytes": "1247363"
},
{
"name": "Shell",
"bytes": "1189"
}
],
"symlink_target": ""
}
|
import csv # remove later
import json
import urllib
import urllib2
import requests
from operator import itemgetter
from collections import OrderedDict
def get_server_csm_stats(server):
good = 0
bad = 0
indeterminate = 0
# may not exist, use try/except to fail through
try:
deadvar = server.issues['sca']['findings']
except:
return({'good':good,
'bad':bad,
'indeterminate':indeterminate})
for issue in server.issues['sca']['findings']:
for entry in issue['details']:
if entry['status'] == 'bad':
bad += 1
elif entry['status'] == 'indeterminate':
indeterminate += 1
elif entry['status'] == 'good':
good += 1
retval = {'good':good,
'bad':bad,
'indeterminate':indeterminate}
return(retval)
def get_server_fim_stats(server):
infected = 0
safe = 0
unknown = 0
config = {}
server.infected = []
server.positives = 0
#send hashes to VirusTotal and get results #OLD Code. useful if opening files instead.
# file_to_send = open(server.vtfile, "rb").read()
# csv_format = file_to_send.replace("\n",",").strip()
# params = {'apikey': server.vtkey, 'resource': csv_format}
# print params
# print server.vtfile
#send hashes to VirusTotal and get results
vt_hashes = ""
for x in server.new_hashes:
vt_hashes = vt_hashes + server.new_hashes[x]
# print vt_hashes
params = {'apikey': server.vtkey, 'resource': vt_hashes}
response = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params=params)
vt = response.json()
# print "in cruncher, printing vt results:"
# print json.dumps(vt, indent = 2)
# print "after print of vt results"
#process output of VirusTotal; seek positives and add infected hashes to the list
for i in range( 0, len(vt) ):
if vt[i]['response_code'] == 1:
try:
if vt[i]['positives'] > 0:
# print "infected file found"
server.infected.append( vt[i]['resource'] )
else:
unknown = unknown + 1 #how to tell safe from unknown?
except:
print("error in cruncher.get_server_fim_stats")
# print "num infected:"
# print len(server.infected)
server.vt = vt #all results from VirusTotal for output
infected = len(server.infected)
server.infected = OrderedDict.fromkeys(server.infected) #remove duplicate hashes
safe = len(vt) - infected
unknown = 1 #not sure how to set this field
retval = {'known_virus':infected,
'known_safe':safe,
'unknown':unknown}
return(retval)
def get_server_sva_stats(server):
critical = 0
non_critical = 0
# Here if we can' set deadvar, it's assumed that there are no SVA results for that host. So we fail through.
try:
deadvar = server.issues['svm']['findings']
except:
retval = {'critical':critical,'non_critical':non_critical}
return(retval)
for issue in server.issues['svm']['findings']:
if issue['status'] == 'bad':
if issue['critical'] == True:
critical += 1
elif issue['critical'] == False:
non_critical += 1
retval = {'critical':critical,
'non_critical':non_critical}
return(retval)
def all_server_stats(servers):
all_cves = []
all_crit_pkgs = []
all_noncrit_pkgs = []
retval_cve = {}
retval_crit_pkg = {}
retval_noncrit_pkg = {}
for s in servers:
try:
deadvar = s.issues['svm']['findings']
except:
continue
for issue in s.issues['svm']['findings']:
if issue['status'] == 'bad':
for entry in issue['cve_entries']:
if entry['suppressed'] == False:
all_cves.append(entry['cve_entry'])
cve_consolidate = set(all_cves)
cve_consolidated = sorted(list(cve_consolidate), key=itemgetter(1))
for u_cve in cve_consolidated:
retval_cve[str(u_cve)] = all_cves.count(str(u_cve))
for t in servers:
try:
deadvar = t.issues['svm']['findings']
except:
continue
for issue in t.issues['svm']['findings']:
if issue['status'] == 'bad':
if issue['critical'] == True:
all_crit_pkgs.append(str(issue['package_name'] + issue['package_version']))
elif issue['critical'] == False:
all_noncrit_pkgs.append(str(issue['package_name'] + issue['package_version']))
noncrit_pkgs_consolidated = set(all_noncrit_pkgs)
noncrit_pkgs_consolidated = sorted(list(noncrit_pkgs_consolidated))
crit_pkgs_consolidated = set(all_crit_pkgs)
crit_pkgs_consolidated = sorted(list(crit_pkgs_consolidated))
for u_cpkg in crit_pkgs_consolidated:
retval_crit_pkg[str(u_cpkg)] = all_crit_pkgs.count(str(u_cpkg))
for u_ncpkg in noncrit_pkgs_consolidated:
retval_noncrit_pkg[str(u_ncpkg)] = all_noncrit_pkgs.count(str(u_ncpkg))
return(retval_cve, retval_noncrit_pkg, retval_crit_pkg)
|
{
"content_hash": "c61569b469cffd3297393119dc445e08",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 113,
"avg_line_length": 36.0958904109589,
"alnum_prop": 0.5827324478178368,
"repo_name": "david-sackmary/whack-a-mole",
"id": "2faa1a1d4253a444eded8dba1e04fe5f953f1008",
"size": "5332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cruncher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37327"
}
],
"symlink_target": ""
}
|
from sklearn.datasets import make_circles
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
X, y = make_circles(n_samples=1500,
factor=.4,
noise=.05)
km = KMeans(n_clusters=2)
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=km.fit_predict(X))
ac = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='complete')
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=ac.fit_predict(X))
db = DBSCAN(eps=0.2)
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=db.fit_predict(X));
|
{
"content_hash": "26359f5d968d1c545c00688dd493ae90",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 84,
"avg_line_length": 29.944444444444443,
"alnum_prop": 0.6363636363636364,
"repo_name": "RTHMaK/RPGOne",
"id": "0ed7269ef64d68dcfadf14039c9d68c2ad3d6eb9",
"size": "539",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scipy-2017-sklearn-master/notebooks/solutions/21_clustering_comparison.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Batchfile",
"bytes": "15029"
},
{
"name": "CSS",
"bytes": "41709"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Go",
"bytes": "287"
},
{
"name": "HTML",
"bytes": "633076"
},
{
"name": "JavaScript",
"bytes": "1128791"
},
{
"name": "Jupyter Notebook",
"bytes": "927247"
},
{
"name": "Makefile",
"bytes": "31756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Matlab",
"bytes": "9454"
},
{
"name": "PHP",
"bytes": "708541"
},
{
"name": "PowerShell",
"bytes": "68503"
},
{
"name": "Python",
"bytes": "2278740"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "62555"
},
{
"name": "Smarty",
"bytes": "5752"
},
{
"name": "TeX",
"bytes": "34544"
}
],
"symlink_target": ""
}
|
import os
import sys
# Exported functions
class mzb_atom(str):
pass
def notify(metric, value):
_mzbench_pipe.write("M {{{0}, {1}}}.\n".format(_encode_metric(metric), value))
def get_metric_value(name):
return _call(mzb_atom('mzb_metrics'), mzb_atom('get_value'), [name])
def _call(module, function, args):
_mzbench_pipe.write("C {0}.\n".format(_encode_term((module, function, args))))
return _read_call_result()
# Internal functions
def _instruction_end(result):
_mzbench_pipe.write("T {0}.\n".format(_encode_term(result)))
def _instruction_failed((t, o, st)):
_mzbench_pipe.write("E {0} {1}.\n".format(t, o))
def _module_funcs(module_name):
return dir(module_name)
def _read_call_result():
res = sys.stdin.readline().rstrip()
line_num = int(sys.stdin.readline().rstrip())
content = []
while line_num > 0:
content.append(sys.stdin.readline().rstrip())
line_num -= 1
if (res == "OK"):
return eval("\n".join(content)) if content else None
else:
raise Exception("\n".join(content))
def _encode_term(term):
T = type(term)
if (term is None): return "undefined"
elif (list == T): return _encode_list(term)
elif (tuple == T): return _encode_tuple(term)
elif (dict == T): return _encode_dict(term)
elif (int == T): return _encode_num(term)
elif (float == T): return _encode_num(term)
elif (mzb_atom == T): return _encode_atom(term)
elif (str == T): return _encode_str(term)
elif (unicode == T): return _encode_str(term)
else: return _encode_str("<unknown python term: {0}>".format(term))
def _encode_atom(a):
return "'{0}'".format(a)
def _encode_list(l):
return '[' + ', '.join([_encode_term(e) for e in l]) + ']'
def _encode_tuple(l):
return '{' + ', '.join([_encode_term(e) for e in l]) + '}'
def _encode_dict(d):
return '#{' + ', '.join([ _encode_term(k) + '=>' + _encode_term(d[k]) for k in d]) + '}'
def _encode_num(n):
return str(n)
def _encode_str(s):
return '"{0}"'.format(s)
def _encode_funcs_list(func_list):
return '[' + ', '.join(['"{0}"'.format(e) for e in func_list]) + ']'
def _encode_metric(metric):
return '{{"{0}", {1}}}'.format(_encode_string_for_erlang(metric[0]), _encode_string_for_erlang(metric[1]))
# May fail in some complicated cases
def _encode_string_for_erlang(string):
return string.replace('\\', '\\\\').replace('"', '\"')
# MZBench communication initialization
if 'MZ_PYTHON_WORKER_FIFO_NAME' not in os.environ:
sys.exit("MZ_PYTHON_WORKER_FIFO_NAME environment variable must be defined!")
_mzbench_pipe = open(os.environ['MZ_PYTHON_WORKER_FIFO_NAME'], 'r+', 0)
|
{
"content_hash": "15209550458e990d63d99f91afea57e6",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 110,
"avg_line_length": 26.184466019417474,
"alnum_prop": 0.6091954022988506,
"repo_name": "ethercrow/mzbench",
"id": "ee984afd44009716527cb30bedf645fffd1daaa0",
"size": "2697",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "worker_templates/python_empty/src/mzbench.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4250"
},
{
"name": "CSS",
"bytes": "5352"
},
{
"name": "Erlang",
"bytes": "443920"
},
{
"name": "Gnuplot",
"bytes": "480"
},
{
"name": "HTML",
"bytes": "3280"
},
{
"name": "JavaScript",
"bytes": "58006"
},
{
"name": "Lua",
"bytes": "300"
},
{
"name": "Makefile",
"bytes": "14344"
},
{
"name": "Python",
"bytes": "81745"
},
{
"name": "Shell",
"bytes": "22841"
}
],
"symlink_target": ""
}
|
import warnings
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import numpy as np
import pickle
import getopt
import time
import sys
import os
import csv
import spacy
import re
import math
import random
from dnc.dnc import *
from recurrent_controller import *
from post_controller import *
from moviepy.video.io.VideoFileClip import VideoFileClip
from PIL import Image
from termcolor import colored
from train_until import *
from dataset_generator import VideoCaptionDataset
anno_file = './dataset/MSR_en.csv'
dict_file = './dataset/MSR_BBC_EN_dict.csv'
# dict_file = './dataset/MSR_en_dict.csv'
w2v_dict_file = './dataset/MSR_enW2V_dict.csv'
video_dir = './dataset/YouTubeClips/'
word2v_emb_file = './dataset/MSR_enW2V.npy'
if __name__ == '__main__':
dirname = os.path.dirname(__file__)
ckpts_dir = os.path.join(dirname, 'checkpoints')
data_dir = os.path.join(dirname, 'data', 'en-10k')
tb_logs_dir = os.path.join(dirname, 'logs')
feat_files = [re.match('features_(\d+)_(\d+)\.npy', f) for f in os.listdir(path='./dataset/')]
feat_files_tup = []
for f in feat_files:
if f is not None:
feat_files_tup.append((f.string, int(f.group(1)), int(f.group(2)))) # (file_name, start_id, end_id)
feat_files_tup.sort(key=lambda x: x[1]) # sort by start data id.
llprint("Loading Data ... ")
# w2v_emb = np.load(word2v_emb_file) * 10 # [word_num, vector_len]
w2v_emb = None
# Hamming = 64
Hamming = None
mul_onehot = (256, 2)
# mul_onehot_map = mul_onehot_remap(config=mul_onehot_map)
if w2v_emb is not None:
data, lexicon_dict = load(anno_file, w2v_dict_file)
word_space_size = output_size = w2v_emb.shape[1]
elif Hamming is not None:
data, lexicon_dict = load(anno_file, dict_file)
word_space_size = output_size = Hamming
elif mul_onehot is not None:
data, lexicon_dict = load(anno_file, dict_file)
word_space_size = output_size = mul_onehot[0] * mul_onehot[1]
else:
data, lexicon_dict = load(anno_file, dict_file)
word_space_size = output_size = len(lexicon_dict)
bbc_croups = load_json('dataset/bbc_croups.json')
sequence_max_length = 500
llprint("Done!\n")
batch_size = 1
input_size = 2048
words_count = 512
word_size = 512
read_heads = 4
learning_rate = 1e-4
momentum = 0.8
from_checkpoint = None
iterations = len(data)
data_size = 30000
start_step = 0
last_sum = 1
last_log = 1
mis_data_offset = 0
# execution mode
single_repeat = False
feedback = True
DEBUG = False
TEST = False
show_sentence = False
options, _ = getopt.getopt(sys.argv[1:], '', ['checkpoint=', 'iterations=', 'start=', 'sig_vertifi=', 'debug=', 'test=', 'show_sentence='])
"""
༼ つ ◕_◕ ༽つ ❤ ☀ ☆ ☂ ☻ ♞ ☯
"""
for opt in options:
if opt[0] == '--checkpoint':
from_checkpoint = opt[1]
elif opt[0] == '--iterations':
iterations = int(opt[1])
elif opt[0] == '--start':
last_sum = last_log = start_step = int(opt[1])
elif opt[0] == '--sig_vertifi':
lowerc = opt[1].lower()
single_repeat = lowerc == 't' or lowerc == 'true' or lowerc == '1'
elif opt[0] == '--debug':
lowerc = opt[1].lower()
DEBUG = lowerc == 't' or lowerc == 'true' or lowerc == '1'
elif opt[0] == '--test':
lowerc = opt[1].lower()
TEST = lowerc == 't' or lowerc == 'true' or lowerc == '1'
elif opt[0] == '--show_sentence':
lowerc = opt[1].lower()
show_sentence = lowerc == 't' or lowerc == 'true' or lowerc == '1'
graph = tf.Graph()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with graph.as_default():
with tf.Session(graph=graph, config=config) as session:
llprint("Building Computational Graph ... ")
llprint("Done!")
llprint("Building DNC ... ")
# ncomputer = DNC(
# L2RecurrentController,
# input_size,
# output_size,
# sequence_max_length,
# words_count,
# word_size,
# read_heads,
# batch_size,
# output_feedback=feedback
# )
ncomputer = DNCDuo(
MemRNNController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
testing=TEST,
output_feedback=feedback
)
output, memory_view = ncomputer.get_outputs()
softmax_output = tf.nn.softmax(output)
memory_states = ncomputer.get_memoory_states()
# optimizer = tf.train.RMSPropOptimizer(learning_rate, momentum=momentum)
optimizer = tf.train.AdamOptimizer(learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
loss_weights = tf.placeholder(tf.float32, [batch_size, None])
target_range = tf.placeholder_with_default([[1, 1] for _ in range(batch_size)], [batch_size, 2]) # start index, target len
zero = tf.constant(0, dtype=tf.float32)
# output tensors will containing all output from both input steps and output steps.
"""
Loss functions
"""
loss_decode = None
if w2v_emb is not None:
loss = tf.losses.absolute_difference(output * tf.expand_dims(loss_weights, axis=2), ncomputer.target_output)
loss /= tf.reduce_sum(loss_weights)
# flat_read_vectors = tf.reshape(new_read_vectors, (-1, word_size * read_heads))
elif Hamming is not None:
# TODO: code binary CE loss
loss = tf.contrib.keras.backend.binary_crossentropy(
output,
ncomputer.target_output,
from_logits=True
)
loss = tf.reduce_sum(loss)
total_size = tf.reduce_sum(loss_weights)
total_size += 1e-12 # to avoid division by 0 for all-0 weights
loss /= total_size
elif mul_onehot is not None:
target_output_mul_id = tf.placeholder(tf.int32, [batch_size, None, mul_onehot[1]], name='targets_mul_id')
softmax_slice = []
softout_slice = []
for i in range(mul_onehot[1]):
st = i * mul_onehot[0]
ed = (i + 1) * mul_onehot[0]
seq_loss = tf.contrib.seq2seq.sequence_loss(output[:, :, st:ed], target_output_mul_id[:, :, i], loss_weights)
softmax_slice.append(seq_loss)
softout_slice.append(tf.nn.softmax(output[:, :, st:ed]))
loss = tf.reduce_sum(tf.stack(softmax_slice))
softmax_output = tf.concat(softout_slice, 2)
else: # using one-hot embedding
loss = tf.contrib.seq2seq.sequence_loss(output, ncomputer.target_output_id, loss_weights)
"""
loss for DNCAuto's write_vecotr autoencoder.
"""
if type(ncomputer) is DNCAuto:
loss_decode = tf.losses.absolute_difference(
ncomputer.input_data,
ncomputer.get_decoder_output()
)
loss += loss_decode
summeries = []
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
# TODO: add grad noise base on "saddle point condition"
noise = tf.random_normal(tf.shape(var), stddev=1e-3)
gradients[i] = (tf.clip_by_value(grad, -5, 5) + noise, var)
for (grad, var) in gradients:
if grad is not None:
summeries.append(tf.summary.histogram(var.name + '/grad', grad))
trainable_var = tf.trainable_variables()
for v in trainable_var:
summeries.append(tf.summary.histogram(v.name + '/values', v))
apply_gradients = optimizer.apply_gradients(gradients)
summeries.append(tf.summary.scalar("Loss", loss))
summerize_op = tf.summary.merge(summeries)
no_summerize = tf.no_op()
summerizer = tf.summary.FileWriter(tb_logs_dir, graph)
llprint("Done!")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!")
if from_checkpoint is not None:
llprint("Restoring Checkpoint %s ... " % (from_checkpoint))
ncomputer.restore(session, ckpts_dir, from_checkpoint)
llprint("Done!")
# session = tf_debug.LocalCLIDebugWrapperSession(session)
# session.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
last_100_losses = []
last_avg_min_max = [0, 0, 0]
start = 0 if start_step == 0 else start_step + 1
end = 4000000
# end = start_step + iterations + 1 if start_step + iterations + 1 < len(data) else len(data)
reuse_data_param = 1
start_time_100 = time.time()
end_time_100 = None
avg_100_time = 0.
avg_counter = 0
dataset = VideoCaptionDataset('./dataset', data, lexicon_dict, dataset_size=data_size, show_log=True, batch_size=batch_size, croups=bbc_croups)
for i, batch in zip(range(start, end), dataset.get_batchs(start_iter=start, feedback=feedback, word_emb=w2v_emb, mul_onehot=mul_onehot, norm=True)):
try:
input_data = batch['input_data']
target_outputs = batch['target_outputs']
seq_len = batch['seq_len']['seq_len']
mask = batch['mask']
target_step = None
summerize = (i - last_sum >= 100)
runtime_statistics = (i % 1000 == 0) and False
# take_checkpoint = (i != 0) and (i % 200 == 0)
print('Feed features into DNC.')
# reapeating same input for 'seq_reapte' times.
# for n in range(seq_reapte):
first_loss = None
n = 0
if runtime_statistics:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
feed = {
ncomputer.input_data: input_data,
ncomputer.target_output: target_outputs,
ncomputer.target_output_id: onehot_vec2id(target_outputs),
ncomputer.sequence_length: seq_len,
loss_weights: mask.reshape([batch_size, -1]),
# target_range: target_step
}
if mul_onehot2ids is not None:
feed[target_output_mul_id] = mul_onehot2ids(target_outputs)
loss_de_value, loss_value, out_value, _, summary = session.run([
loss_decode if loss_decode is not None else tf.no_op(),
loss,
softmax_output,
apply_gradients,
summerize_op if summerize else no_summerize,
], feed_dict=feed,
options=run_options,
run_metadata=run_metadata
)
summerizer.add_run_metadata(run_metadata, 'step%d' % i)
elif DEBUG:
feed = {
ncomputer.input_data: input_data,
ncomputer.target_output: target_outputs,
ncomputer.target_output_id: onehot_vec2id(target_outputs),
ncomputer.sequence_length: seq_len,
loss_weights: mask.reshape([batch_size, -1]),
# target_range: target_step
}
if mul_onehot2ids is not None:
feed[target_output_mul_id] = mul_onehot2ids(target_outputs)
loss_de_value, loss_value, out_value, raw_output, grads = session.run([
loss_decode if loss_decode is not None else tf.no_op(),
loss,
softmax_output,
output,
gradients,
], feed_dict=feed)
debug_var = {
"loss": loss_value,
"softmax_out": out_value,
"raw_out": raw_output,
"mask": mask.reshape([batch_size, -1]),
"target": target_outputs,
"grad": {var[1].name: v for var, v in zip(gradients, grads)},
}
Target_sent = decode_output(lexicon_dict, target_outputs[0], word2v_emb=w2v_emb, hamming=Hamming, mul_onehot=mul_onehot)
print(colored('Target: ', color='cyan'), Target_sent[0])
DNC_sent = decode_output(lexicon_dict, out_value[0], target=Target_sent[0], word2v_emb=w2v_emb, mul_onehot=mul_onehot)
for out in DNC_sent:
print(colored('DCN: ', color='green'), out)
summary = None
np.save("debug_%s.npy" % from_checkpoint, debug_var)
sys.exit(0)
elif TEST:
out_value, raw_output, mem_tuple, mem_matrix = session.run([
softmax_output,
output,
memory_view,
memory_states,
], feed_dict={
ncomputer.input_data: input_data,
ncomputer.target_output: target_outputs,
ncomputer.target_output_id: onehot_vec2id(target_outputs),
ncomputer.sequence_length: seq_len,
loss_weights: mask.reshape([batch_size, -1]),
# target_range: target_step
})
Target_sent = decode_output(lexicon_dict, target_outputs[0], word2v_emb=w2v_emb, hamming=Hamming, mul_onehot=mul_onehot)
print(colored('Target: ', color='cyan'), Target_sent[0])
DNC_sent = decode_output(lexicon_dict, out_value[0], target=Target_sent[0], word2v_emb=w2v_emb, mul_onehot=mul_onehot)
for out in DNC_sent:
print(colored('DCN: ', color='green'), out)
summary = None
np.save(os.path.join('./Visualize', get_video_name(sample) + '_memView_%s.npy' % from_checkpoint), mem_tuple)
np.save(os.path.join('./Visualize', get_video_name(sample) + '_memMatrix_%s.npy' % from_checkpoint), mem_matrix)
np.save(os.path.join('./Visualize', get_video_name(sample) + '_outputMatrix_%s.npy' % from_checkpoint), out_value)
# if i >= start + 10:
sys.exit(0)
else:
"""
Training
"""
feed = {
ncomputer.input_data: input_data,
ncomputer.target_output: target_outputs,
ncomputer.target_output_id: onehot_vec2id(target_outputs),
ncomputer.sequence_length: seq_len,
loss_weights: mask.reshape([batch_size, -1]),
# target_range: target_step
}
if mul_onehot2ids is not None:
feed[target_output_mul_id] = mul_onehot2ids(target_outputs)
loss_value, out_value, _, summary = session.run([
# loss_decode if loss_decode is not None else tf.no_op(),
loss,
softmax_output,
apply_gradients,
summerize_op if summerize else no_summerize,
], feed_dict=feed)
print(colored('[%d]Loss: ' % n, color='green'), loss_value)
print('-' * 100)
last_100_losses.append(loss_value)
if summary is not None:
summerizer.add_summary(summary, i)
if i - last_sum >= 100:
last_sum = i
llprint(" Avg. Cross-Entropy: %.7f" % (np.mean(last_100_losses)))
llprint(" Max. %.7f Min. %.7f" % (max(last_100_losses), min(last_100_losses)))
last_avg_min_max = [np.mean(last_100_losses), min(last_100_losses), max(last_100_losses)]
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print("Avg. 100 iterations time: %.2f minutes" % (avg_100_time))
print("Approx. time to completion: %.2f hours" % (estimated_time))
try:
os.system("python3 PyGoogleSheet/pyGooSheet.py --step %d --value %.7f" % (i, np.mean(last_100_losses)))
except:
print(colored('Error: ', color='red'), 'fail to update google sheet!')
start_time_100 = time.time()
last_100_losses = []
if i - last_log >= 1000:
last_log = i
llprint("Saving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, 'step-%d' % (i))
llprint("Done!")
except KeyboardInterrupt:
llprint("Saving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, 'step-%d' % (i))
llprint("Done!")
sys.exit(0)
|
{
"content_hash": "660f1db23a9ee1900b660ded992ea0e1",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 160,
"avg_line_length": 41.82327586206897,
"alnum_prop": 0.48613830773987426,
"repo_name": "HimariO/VideoSum",
"id": "c0a65d1e7f157c046f32ada34aa134cb0ce885e8",
"size": "19432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/video/train_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "315417"
},
{
"name": "Python",
"bytes": "391627"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
import sys
#print('Number of arguments:', len(sys.argv), 'arguments.')
#print('Argument List:', str(sys.argv))
#students = sys.argv
students = {"Tom":[100,90,88,95,55], "John":[70,78,60,89], "Amy":[50,66,20,88,30], "Bob":[99,64,80,72,90], "Tony":[100,83,92] }
#print(students)
#print(type(students))
#students.pop(0)
#max
firstStudent = {}
#min
lastStudent = {}
maxcount = 0
for student,scores in students.items():
count = 0
for score in scores:
count = count + 1
if maxcount < count:
maxcount = count
print("maxcount:%d"%maxcount)
for student,scores in students.items():
sum = 0
count = 0
for score in scores:
sum += float(score)
count = count + 1
studentDict = {"name":student,"sum":sum,"count":count,"avg":sum/count}
#studentDict = {"name":student,"sum":sum,"count":count,"avg":sum/maxcount}
if not firstStudent:
firstStudent = studentDict
if not lastStudent:
lastStudent = studentDict
if studentDict["avg"] > firstStudent["avg"]:
firstStudent = studentDict
if studentDict["avg"] < lastStudent["avg"]:
lastStudent = studentDict
print(studentDict)
print('The highest student:',firstStudent["name"],': avg is ',firstStudent["avg"])
print('The lowest student:',lastStudent["name"],': avg is ',lastStudent["avg"])
|
{
"content_hash": "d774d67cdc91cb98a77ff2298605ca4c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 127,
"avg_line_length": 28.363636363636363,
"alnum_prop": 0.6826923076923077,
"repo_name": "yuzheng/fbPOIMapping",
"id": "630a16b2bb4b738da76315769589938bc1b88982",
"size": "1409",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "score-ex.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
Created on Nov 26, 2011
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from __future__ import division
import os
import numpy as np
import h5py
import time
from file_util import call_function_with_retry
import maps_definitions
import maps_mda
#-----------------------------------------------------------------------------
class h5:
def __init__(self, logger):
self.logger = logger
#-----------------------------------------------------------------------------
def write_mca_hdf5(self, filename, mca_arr, overwrite=True):
# set compression level where applicable:
gzip = 5
file_status = 0
entry_exists = 0
verbose = 0
# test whether a file with this filename already exists:
try:
# Open HDF5 file
f = h5py.File(filename, 'r')
if verbose:
self.logger.debug('Have HDF5 file: %s', filename)
file_exists = 1
file_is_hdf = 1
file_status = 2
#MAPS HDF5 group
if 'MAPS' in f:
if verbose:
self.logger.debug('MAPS group found in file: %s', filename)
mapsGrp = f['MAPS']
file_status = 3
if 'mca_arr' in mapsGrp:
if verbose:
self.logger.debug('MAPS/mca_arr found in file: %s', filename)
file_status = 4
# at the moment, simply overwrite the mca_arr section of
# the file; in the future, may want to test, and only
# overwrite if specific flag is set.
f.close()
except:
if verbose:
self.logger.debug('Creating new file: %s', filename)
if verbose:
self.logger.debug('file_status: %s', file_status)
if overwrite:
file_status = 0
if file_status <= 1:
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'w'))
#f = h5py.File(filename, 'w')
else :
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'a'))
#f = h5py.File(filename, 'a')
if file_status <= 3 :
# create a group for maps to hold the data
mapsGrp = f.create_group('MAPS')
# now set a comment
mapsGrp.attrs['comments'] = 'This is the group that stores all relevant information created (and read) by the the MAPS analysis software'
if file_status >= 4 :
mapsGrp = f['MAPS']
entry_exists = 1
if entry_exists == 0:
# create dataset and save full spectra
data = np.transpose(mca_arr)
dimensions = data.shape
if len(data.shape) == 1:
chunk_dimensions = (dimensions[0])
elif len(data.shape) == 2:
chunk_dimensions = (dimensions[0], 1)
elif len(data.shape) == 3:
chunk_dimensions = (dimensions[0], 1, 1)
elif len(data.shape) == 4:
chunk_dimensions = (dimensions[0], 1, 1, 1)
else:
chunk_dimensions = data.shape
comment = 'these are the full spectra at each pixel of the dataset'
ds_data = mapsGrp.create_dataset('mca_arr', data = data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
else:
# save the data to existing array
# delete old dataset, create new and save full spectra
data = np.transpose(mca_arr)
dimensions = data.shape
chunk_dimensions = (dimensions[0], 1, 1)
comment = 'these are the full spectra at each pixel of the dataset'
del mapsGrp['mca_arr']
ds_data = mapsGrp.create_dataset('mca_arr', data = data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
f.close()
return
#-----------------------------------------------------------------------------
def write_hdf5(self, thisdata, filename, mca_arr, energy_channels, extra_pv=None, extra_pv_order=None, update=False):
#set compression level where applicable:
gzip = 7
if update == False:
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'w'))
#f = h5py.File(filename, 'w')
# create a group for maps to hold the data
mapsGrp = f.create_group('MAPS')
# now set a comment
mapsGrp.attrs['comments'] = 'This is the group that stores all relevant information created (and read) by the the MAPS analysis software'
else:
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'a'))
#f = h5py.File(filename, 'a')
if 'MAPS' not in f:
self.logger.error('error, hdf5 file does not contain the required MAPS group. I am aborting this action')
return
mapsGrp = f['MAPS']
if 'XRF_roi' in mapsGrp:
del mapsGrp['XRF_roi']
# this is the data we want to write into the hdf5 file
entryname = 'XRF_roi'
comment = 'these are elemental maps created from full spectra using spectral ROIs'
data = np.transpose(thisdata.dataset_orig[:, :, :, 0])
# choose an image / map as a chunk
dimensions = data.shape
chunk_dimensions = (1, dimensions[1], dimensions[2])
ds_data = mapsGrp.create_dataset(entryname, data=data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
#self.logger.debug('total of data 0', np.sum(data)
if 'XRF_fits' in mapsGrp:
del mapsGrp['XRF_fits']
entryname = 'XRF_fits'
comment = 'these are elemental maps created from full spectra using per pixel fitting'
data = np.transpose(thisdata.dataset_orig[:, :, :, 1])
dimensions = data.shape
chunk_dimensions = (1, dimensions[1], dimensions[2])
if np.sum(data) != 0.0:
contains_fitted_data = 1
else:
contains_fitted_data = 0
if contains_fitted_data :
ds_data = mapsGrp.create_dataset(entryname, data=data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
if 'XRF_roi_plus' in mapsGrp:
del mapsGrp['XRF_roi_plus']
entryname = 'XRF_roi_plus'
comment = 'these are elemental maps created from full spectra accounting for crosstalk between channels / elements'
data = np.transpose(thisdata.dataset_orig[:, :, :, 2])
if np.sum(data) != 0.:
contains_roiplus_data = 1
else:
contains_roiplus_data = 0
dimensions = data.shape
chunk_dimensions = (1, dimensions[1], dimensions[2])
if contains_roiplus_data :
ds_data = mapsGrp.create_dataset(entryname, data=data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
self.logger.debug('total of data 2: %s', np.sum(data))
entryname = 'scalers'
comment = 'these are scaler information acquired during the scan'
data = np.transpose(thisdata.dmaps_set[:, :, :])
dimensions = data.shape
chunk_dimensions = (1, dimensions[1], dimensions[2])
ds_data = mapsGrp.create_dataset(entryname, data=data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
entryname = 'x_axis'
if entryname not in mapsGrp:
comment = 'stores the values of the primary fast axis positioner, typically sample x'
data = thisdata.x_coord_arr
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'y_axis'
if entryname not in mapsGrp:
comment = 'stores the values of the slow axis positioner, typically sample y'
data = thisdata.y_coord_arr
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'energy'
comment = 'stores the values of the energy axis'
data = thisdata.energy
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
# now write integrated spectrum as dataset
entryname = 'int_spec'
comment = 'spectrum integrated over the full dataset'
data = thisdata.energy_spec
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
# now write max channel spectrum as dataset
entryname = 'max_chan_spec'
comment = 'several maximum channel spectra integrated over the full dataset'
data = np.transpose(thisdata.max_chan_spec)
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
# now write quantification of roi dataset
entryname = 'XRF_roi_quant'
comment = 'quantification curve for the ROI based dataset'
data = np.transpose(thisdata.dataset_calibration[:, 0, :])
data = np.reshape(data, (data.shape[0], 1, data.shape[1]))
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
# now write quantification of fits dataset
entryname = 'XRF_fits_quant'
comment = 'quantification curve for the fits based dataset'
data = np.transpose(thisdata.dataset_calibration[:, 1, :])
data = np.reshape(data, (data.shape[0], 1, data.shape[1]))
if contains_fitted_data:
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
# now write quantification of ROI+ dataset
entryname = 'XRF_roi_plus_quant'
comment = 'quantification curve for the datasets based on ROI + definitions ( with background subtraction)'
data = np.transpose(thisdata.dataset_calibration[:, 2, :])
data = np.reshape(data, (data.shape[0], 1, data.shape[1]))
if contains_roiplus_data:
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'us_amp'
comment = 'sensitivity of the upstream amplifier'
data = thisdata.us_amp
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'ds_amp'
comment = 'sensitivity of the downstream amplifier'
data = thisdata.ds_amp
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
try:
entryname = 'energy_calib'
comment = 'energy calibration'
data = thisdata.energy_fit
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
except:
self.logger.exception('Error: HDF5: Could not write energy calibration')
entryname = 'version'
comment = 'this is the version number of the file structure'
data = thisdata.version
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
try:
entryname = 'scan_time_stamp'
comment = 'time the scan was acquired'
data = thisdata.scan_time_stamp
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
except:
self.logger.exception('HDF5: Could not write scan_time_stamp')
entryname = 'write_date'
comment = 'time this analysis was carried out'
data = str(thisdata.write_date)
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'scaler_names'
comment = 'names of the scalers saved'
data = thisdata.dmaps_names
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'scaler_units'
comment = 'units of the scalers saved'
data = thisdata.dmaps_units
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'channel_names'
comment = 'names of the channels saved'
data = thisdata.chan_names
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'channel_units'
comment = 'units of the channels saved'
data = thisdata.chan_units[:]
data = zip(*data)
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'extra_strings'
comment = 'additional string values saved in the dataset'
data = thisdata.extra_str_arr
if len(data) > 0:
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'add_long'
comment = 'additional long values'
data = [thisdata.add_long['a'], thisdata.add_long['b'], thisdata.add_long['c'], thisdata.add_long['d'], thisdata.add_long['e']]
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'add_float'
comment = 'additional float values'
data = [thisdata.add_float['a'], thisdata.add_float['b'], thisdata.add_float['c'], thisdata.add_float['d'], thisdata.add_float['e']]
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'add_string'
comment = 'additional string values'
data = [thisdata.add_str['a'], thisdata.add_str['b'], thisdata.add_str['c'], thisdata.add_str['d'], thisdata.add_str['e'],
thisdata.add_str['f'], thisdata.add_str['g'], thisdata.add_str['h'], thisdata.add_str['i'], thisdata.add_str['j'],
thisdata.add_str['k'], thisdata.add_str['l'], thisdata.add_str['m'], thisdata.add_str['n'], thisdata.add_str['o']]
ds_data = mapsGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
if update == False:
self.logger.info('saving full spectra to hdf5')
# now save full spectra
entryname = 'mca_arr'
comment = 'these are the full spectra at each pixel of the dataset'
data = np.transpose(mca_arr)
dimensions = data.shape
chunk_dimensions = (dimensions[0], 1, 1)
for retry in range(3):
try:
ds_data = mapsGrp.create_dataset(entryname, data=data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
break
except:
self.logger.error("Error creating dataset %s, retry %d", entryname, retry)
# create a subgroup FOR make_maps_conf
if 'make_maps_conf' not in mapsGrp:
mmcGrp = mapsGrp.create_group('make_maps_conf')
else:
mmcGrp = mapsGrp['make_maps_conf']
entryname = 'use_default_dirs'
comment = ''
data = thisdata.make_maps_conf.use_default_dirs
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'use_beamline'
data = thisdata.make_maps_conf.use_beamline
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'version'
data = thisdata.make_maps_conf.version
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'use_det'
data = thisdata.make_maps_conf.use_det
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'calibration_offset'
data = thisdata.make_maps_conf.calibration.offset
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'calibration_slope'
data = thisdata.make_maps_conf.calibration.slope
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
entryname = 'calibration_quad'
data = thisdata.make_maps_conf.calibration.quad
ds_data = mmcGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
'''
if 'nbs1832' not in mmcGrp:
nbs1832Grp = mmcGrp.create_group('nbs1832')
else:
nbs1832Grp = mmcGrp['nbs1832']
element_standard
substructure = thisdata.make_maps_conf.nbs32
entryname = 'name'
data = substructure.name
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'date'
data = substructure.date
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'live_time'
data = substructure.live_time
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'real_time'
data = substructure.real_time
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'current'
data = substructure.current
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'us_ic'
data = substructure.us_ic
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'ds_ic'
data = substructure.ds_ic
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'us_amp'
data = substructure.us_amp
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
entryname = 'ds_amp'
data = substructure.ds_amp
ds_data = nbs1832Grp.create_dataset(entryname, data = data)
if 'nbs1833' not in mmcGrp:
nbs1833Grp = mmcGrp.create_group('nbs1833')
else:
nbs1833Grp = mmcGrp['nbs1833']
substructure = thisdata.make_maps_conf.nbs33
entryname = 'name'
data = substructure.name
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'date'
data = substructure.date
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'live_time'
data = substructure.live_time
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'real_time'
data = substructure.real_time
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'current'
data = substructure.current
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'us_ic'
data = substructure.us_ic
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'ds_ic'
data = substructure.ds_ic
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'us_amp'
data = substructure.us_amp
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
entryname = 'ds_amp'
data = substructure.ds_amp
ds_data = nbs1833Grp.create_dataset(entryname, data = data)
'''
if 'element_standard' not in mmcGrp:
elStandardGrp = mmcGrp.create_group('element_standard')
else:
elStandardGrp = mmcGrp['element_standard']
substructure = thisdata.make_maps_conf.element_standard
entryname = 'name'
data = substructure.name
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'date'
data = substructure.date
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'live_time'
data = substructure.live_time
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'real_time'
data = substructure.real_time
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'current'
data = substructure.current
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'us_ic'
data = substructure.us_ic
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'ds_ic'
data = substructure.ds_ic
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'us_amp'
data = substructure.us_amp
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'ds_amp'
data = substructure.ds_amp
ds_data = elStandardGrp.create_dataset(entryname, data=data)
entryname = 'axo_e_cal'
data = np.transpose(thisdata.make_maps_conf.e_cal)
ds_data = mmcGrp.create_dataset(entryname, data=data)
'''
if thisdata.version >= 9 :
entryname = 'axo_e_cal'
data = np.transpose(thisdata.make_maps_conf.axo_e_cal)
ds_data = mmcGrp.create_dataset(entryname, data = data)
'''
entryname = 'fit_t_be'
data = thisdata.make_maps_conf.fit_t_be
ds_data = mmcGrp.create_dataset(entryname, data = data)
entryname = 'fit_t_ge'
data = thisdata.make_maps_conf.fit_t_GE
ds_data = mmcGrp.create_dataset(entryname, data = data)
if not extra_pv is None:
if type(extra_pv) == dict:
entryname = 'extra_pvs'
comment = 'additional process variables saved in the original dataset'
data = []
#self.logger.debug('extra_pv: %s', extra_pv)
if extra_pv_order:
for k in extra_pv_order:
v = extra_pv[k]
data.append([k, str(v[2]), v[0], v[1]])
else:
for k in sorted(extra_pv.iterkeys()):
v = extra_pv[k]
data.append([k, str(v[2]), v[0], v[1]])
ds_data = mapsGrp.create_dataset(entryname, data = np.transpose(data))
ds_data.attrs['comments'] = comment
entryname = 'extra_pvs_as_csv'
comment = 'additional process variables saved in the original dataset, name and value fields reported as comma seperated values'
if extra_pv_order:
data = []
for k in extra_pv_order:
v = extra_pv[k]
data.append(k + ', '+ str(v[2]))
ds_data = mapsGrp.create_dataset(entryname, data = data)
ds_data.attrs['comments'] = comment
else:
ds_data = mapsGrp.create_dataset(entryname, data = ds_data)
ds_data.attrs['comments'] = comment
else:
entryname = 'extra_pvs'
comment = 'additional process variables saved in the original dataset'
ds_data = mapsGrp.create_dataset(entryname, data=extra_pv)
ds_data.attrs['comments'] = comment
if not extra_pv_order is None:
entryname = 'extra_pvs_as_csv'
comment = 'additional process variables saved in the original dataset, name and value fields reported as comma seperated values'
ds_data = mapsGrp.create_dataset(entryname, data=extra_pv_order)
ds_data.attrs['comments'] = comment
f.close()
return
#-----------------------------------------------------------------------------
def maps_change_xrf_read_hdf5(self, sfile, make_maps_conf):
maps_def = maps_definitions.maps_definitions(self.logger)
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (sfile, 'r'))
if f is None:
self.logger.error('Error could not open file %s', sfile)
return None, None, None, None, 0
if 'MAPS' not in f:
self.logger.error('error, hdf5 file does not contain the required MAPS group. I am aborting this action')
return None, None, None, None, 0
maps_group_id = f['MAPS']
entryname = 'XRF_roi'
this_xrfdata, valid_read = self.read_hdf5_core(maps_group_id, entryname)
if valid_read == 0:
self.logger.error('error, reading: %s', entryname)
return None, None, None, None, 0
this_xrfdata = np.transpose(this_xrfdata)
dimensions = this_xrfdata.shape
# if this is a 2D (x, y) scan dimensions should be 3
n_cols = dimensions[0]
n_rows = dimensions[1]
n_used_chan = dimensions[2]
entryname = 'scalers'
this_scalers, valid_read = self.read_hdf5_core(maps_group_id, entryname)
if valid_read == 0:
self.logger.error('error, reading: %s', entryname)
return None, None, None, None, 0
this_scalers = this_scalers.transpose()
dimensions = this_scalers.shape
n_used_dmaps = dimensions[2]
entryname = 'energy'
this_energy, valid_read = self.read_hdf5_core(maps_group_id, entryname)
if valid_read == 0:
self.logger.error('error, reading: %s', entryname)
return None, None, None, None, 0
dimensions = this_energy.shape
n_channels = dimensions[0]
# default, one for roi based , one for fitted images and one for sigma.
dataset_size = 3
# any current maps version deal with multiple detectors in creating
# different files, one for each detector, and at the end one
# average
no_detectors = 1
version = 9
XRFmaps_info = maps_def.define_xrfmaps_info(n_cols, n_rows, dataset_size,
n_channels, n_channels, no_detectors,
n_used_chan, n_used_dmaps,
make_maps_conf, version=9)
XRFmaps_info.n_ev = n_channels
XRFmaps_info.n_energy = n_channels
XRFmaps_info.energy = this_energy
XRFmaps_info.dmaps_set = this_scalers
XRFmaps_info.dataset_names = ['ROI sum', 'fitted', 'sigma']
for i in range(n_used_chan):
XRFmaps_info.dataset_orig[:, :, i, 0] = this_xrfdata[:,:,i]
this_xrfdata = 0
dset_name = 'scan_time_stamp'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.scan_time_stamp = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'write_date'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.write_date = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'x_axis'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.x_coord_arr = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'y_axis'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.y_coord_arr = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'scaler_names'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.dmaps_names = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'scaler_units'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.dmaps_units = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'channel_names'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.chan_names = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'channel_units'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.chan_units = zip(*this_data)
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'XRF_fits'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
for i in range(n_used_chan):
XRFmaps_info.dataset_orig[:, :, i, 1] = this_data[:, :, i]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'XRF_sigma'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
for i in range(n_used_chan):
XRFmaps_info.dataset_orig[:, :, i, 2] = this_data[:, :, i]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'XRF_roi_plus'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
for i in range(n_used_chan):
XRFmaps_info.dataset_orig[:, :, i, 2] = this_data[:, :, i]
XRFmaps_info.dataset_names[2] = 'XRF_roi+'
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'XRF_roi_quant'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
XRFmaps_info.dataset_calibration[:, 0, :] = this_data[:, 0, :]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'XRF_roi_plus_quant'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
XRFmaps_info.dataset_calibration[:, 2, :] = this_data[:, 0, :]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'XRF_fits_quant'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
XRFmaps_info.dataset_calibration[:, 1, :] = this_data[:, 0, :]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'energy'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.energy_spec = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'int_spec'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.energy_spec = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'energy_calib'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.energy_fit = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'max_chan_spec'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
this_data = np.transpose(this_data)
if valid_read:
XRFmaps_info.max_chan_spec = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'us_amp'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.us_amp = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'ds_amp'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.ds_amp = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'extra_strings'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.extra_str_arr = this_data
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'extra_pvs'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.extra_pv = this_data[...]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
dset_name = 'extra_pvs_as_csv'
this_data, valid_read = self.read_hdf5_core(maps_group_id, dset_name)
if valid_read:
XRFmaps_info.extra_pv_as_csv = this_data[...]
else:
self.logger.warning('could not read '+dset_name+' in file '+sfile)
XRFmaps_info.img_type = 7
f.close()
return XRFmaps_info, n_cols, n_rows, n_channels, valid_read
#-----------------------------------------------------------------------------
def read_hdf5_core(self, gid, entryname, verbose=False):
valid_read = 0
this_data = 0
try:
if entryname not in gid:
if verbose:
self.logger.error('read error: did not find the entry: %s in %s', entryname, gid)
return this_data, valid_read
dataset_id = gid[entryname]
this_data = dataset_id[...]
valid_read = 1
except:
self.logger.exception('Could not read '+entryname)
return this_data, valid_read
#-----------------------------------------------------------------------------
def read_hdf5(self, filename):
import maps_elements
me = maps_elements.maps_elements(self.logger)
info_elements = me.get_element_info()
maps_def = maps_definitions.maps_definitions(self.logger)
maps_conf = maps_def.set_maps_definitions('2-ID-E', info_elements)
XRFmaps_info, n_cols, n_rows, n_channels, valid_read = self.maps_change_xrf_read_hdf5(filename, maps_conf)
return XRFmaps_info, valid_read
#-----------------------------------------------------------------------------
def add_exchange(self, main, make_maps_conf):
imgdat_filenames = []
if main['dataset_files_to_proc'][0] == 'all':
files = os.listdir(main['XRFmaps_dir'])
extension = '.h5'
for f in files:
if extension in f.lower():
imgdat_filenames.append(f)
else:
imgdat_filenames = [mdafile.replace('.mda', '.h5') for mdafile in main['dataset_files_to_proc']]
gzip = 7
no_files = len(imgdat_filenames)
current_directory = main['master_dir']
for n_filenumber in range(no_files):
sFile = os.path.join(main['XRFmaps_dir'], imgdat_filenames[n_filenumber])
self.logger.info('Adding exchange to %s', sFile)
XRFmaps_info, n_cols, n_rows, n_channels, valid_read = self.maps_change_xrf_read_hdf5(sFile, make_maps_conf)
if valid_read == 0:
self.logger.error('Error calling h5p.maps_change_xrf_read_hdf5(%s, %s)', sFile, make_maps_conf)
return
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (sFile, 'a'))
#f = h5py.File(sFile, 'a')
if 'MAPS' not in f:
self.logger.error('error, hdf5 file does not contain the required MAPS group. I am aborting this action')
return
mapsGrp = f['MAPS']
# create a subgroup for exchange
if 'exchange' not in f:
excGrp = f.create_group('exchange')
excGrp.attrs['comments'] = 'This is the group that stores a default analysed dataset'
else:
excGrp = f['exchange']
entryname = 'images'
#comment = 'these are elemental maps'
drop_val = 1
data = XRFmaps_info.dataset_orig[:, :, :, drop_val]
comment = 'these are elemental maps based on per pixel fitting'
if np.sum(data) == 0.0:
drop_val = 2
data = XRFmaps_info.dataset_orig[:, :, :, drop_val]
comment = 'these are elemental maps based on roi plus'
if np.sum(data) == 0.0:
drop_val = 0
data = XRFmaps_info.dataset_orig[:, :, :, drop_val]
comment = 'these are elemental maps based on rois'
dataset = XRFmaps_info.dataset
dataset[:, :, 0:XRFmaps_info.n_used_dmaps] = XRFmaps_info.dmaps_set[:, :, :]
dataset[:, :, XRFmaps_info.n_used_dmaps:XRFmaps_info.n_used_dmaps+XRFmaps_info.n_used_chan] = data[:, :, :]
drop_vtwo = 0 # for now just use ds ic for normalization
if drop_vtwo == 0:
wo = []
if 'ds_ic' in XRFmaps_info.dmaps_names:
wo = np.where(XRFmaps_info.dmaps_names == 'ds_ic')
#if drop_vtwo == 1 : wo = XRFmaps_info.dmaps_names.index('us_ic')
#if drop_vtwo == 2 : wo = XRFmaps_info.dmaps_names.index('SRcurrent')
ic_correction_factor = 1.
#if (drop_vtwo == 0) and (XRFmaps_info.make_maps_conf.nbs32.ds_amp[2] > 0.) and \
# (XRFmaps_info.ds_amp[2] > 0.) :
# ic_correction_factor = XRFmaps_info.make_maps_conf.nbs32.ds_amp[2]/XRFmaps_info.ds_amp[2]
#if (drop_vtwo == 1) and (XRFmaps_info.make_maps_conf.nbs32.us_amp[2] > 0.) and \
# (XRFmaps_info.us_amp[2] > 0.) :
# ic_correction_factor = XRFmaps_info.make_maps_conf.nbs32.us_amp[2]/XRFmaps_info.us_amp[2]
if len(wo[0]) > -1 :
calib = XRFmaps_info.dmaps_set[:, :, wo].astype(float)
calib = calib[:, :, 0, 0]
for k in range(XRFmaps_info.n_used_dmaps, XRFmaps_info.n_used_dmaps + XRFmaps_info.n_used_chan):
calib_factor = float(XRFmaps_info.dataset_calibration[k - XRFmaps_info.n_used_dmaps, drop_val, 2-drop_vtwo])
if calib_factor > 0:
dataset[:, :, k] = dataset[:, :, k] / calib_factor / calib
else:
mean_calib = np.mean(np.nan_to_num(calib))
dataset[:, :, k] = dataset[:, :, k] / calib * mean_calib
dataset[:, :, k] = dataset[:, :, k] * ic_correction_factor
data = np.transpose(dataset)
dimensions = data.shape
chunk_dimensions = (1, dimensions[1], dimensions[2])
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data = data, chunks=chunk_dimensions, compression='gzip', compression_opts=gzip)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
units = ['-' for x in range(XRFmaps_info.n_used_dmaps+XRFmaps_info.n_used_chan)]
self.logger.debug('len units = %s', len(units))
units[0:XRFmaps_info.n_used_dmaps] = XRFmaps_info.dmaps_units[:]
if type(XRFmaps_info.chan_units[0]) == tuple:
units[XRFmaps_info.n_used_dmaps:XRFmaps_info.n_used_dmaps + XRFmaps_info.n_used_chan] = XRFmaps_info.chan_units[: 2-drop_vtwo][0]
else:
units[XRFmaps_info.n_used_dmaps:XRFmaps_info.n_used_dmaps + XRFmaps_info.n_used_chan] = XRFmaps_info.chan_units[: 2-drop_vtwo]
names = ['' for x in range(XRFmaps_info.n_used_dmaps + XRFmaps_info.n_used_chan)]
names[0:XRFmaps_info.n_used_dmaps] = XRFmaps_info.dmaps_names[:]
names[XRFmaps_info.n_used_dmaps:XRFmaps_info.n_used_dmaps + XRFmaps_info.n_used_chan] = XRFmaps_info.chan_names[:]
entryname = 'x_axis'
comment = 'stores the values of the primary fast axis positioner, typically sample x'
data = XRFmaps_info.x_coord_arr
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data = data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
entryname = 'y_axis'
comment = 'stores the values of the slow axis positioner, typically sample y'
data = XRFmaps_info.y_coord_arr
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
entryname = 'extra_strings'
comment = 'extra pv name and values as strings'
data = XRFmaps_info.extra_str_arr
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
entryname = 'extra_pvs_as_csv'
comment = 'extra pvs strings as comma separated values'
data = [word.replace(';', ',') for word in XRFmaps_info.extra_str_arr]
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
entryname = 'extra_pvs'
comment = 'extra pvs'
data = XRFmaps_info.extra_pv
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
entryname = 'images_names'
comment = 'names of the xrf and scaler images'
data = names
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
dataset_id[...] = data
entryname = 'images_units'
comment = 'units of the xrf and scaler images'
data = units
if entryname not in excGrp:
ds_data = excGrp.create_dataset(entryname, data=data)
ds_data.attrs['comments'] = comment
else:
dataset_id = excGrp[entryname]
if (len(data),) == dataset_id.shape:
dataset_id[...] = data
else:
self.logger.error('Error: could not update %s dataset shapes are different! dataset(%s) : data(%s)', dataset_id.name, dataset_id.shape, len(data))
f.close()
time.sleep(1.0)
self.logger.info('---------------------')
self.logger.info('done adding exchange information')
self.logger.info('---------------------')
self.logger.info(' ')
#-----------------------------------------------------------------------------
def read_scan(self, filename):
#filename= 'D:/mirna/Phyton/Diamond/src/testMapspy/img.dat/5730_sample588_1.h5'
scan_data = maps_mda.scan()
self.logger.info('filename: %s', filename)
f = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'r'))
#f = h5py.File(filename, 'r')
if 'MAPS' not in f:
self.logger.error('error, hdf5 file does not contain the required MAPS group. I am aborting this action')
return
maps_group_id = f['MAPS']
this_data, valid_read = self.read_hdf5_core(maps_group_id, 'mca_arr')
if valid_read:
mca_arr = this_data
mca_arr = mca_arr.T
dimensions = mca_arr.shape
self.logger.debug('mca_arr dims: %s', dimensions)
# if this is a 2D (x, y) scan dimensions should be 3
x_pixels = dimensions[0]
y_pixels = dimensions[1]
n_used_chan = dimensions[2]
this_data, valid_read = self.read_hdf5_core(maps_group_id, 'x_axis')
if valid_read:
x_coord_arr = this_data
if len(x_coord_arr.shape) == 2:
scan_data.x_coord_arr = x_coord_arr[:, 0]
else:
scan_data.x_coord_arr = np.array(x_coord_arr)
this_data, valid_read = self.read_hdf5_core(maps_group_id, 'y_axis')
if valid_read:
y_coord_arr = this_data
if len(y_coord_arr.shape) == 2:
scan_data.y_coord_arr = y_coord_arr[0, :]
else:
scan_data.y_coord_arr = np.array(y_coord_arr)
f.close()
scan_data.scan_name = ''
scan_data.scan_time_stamp = ''
scan_data.y_pixels = y_pixels
# create mca calib description array
#scan_data.mca_calib_description_arr = mca_calib_description_arr
# create mca calibration array
#scan_data.mca_calib_arr = mca_calib_arr
scan_data.x_pixels = x_pixels
#detector_arr = fltarr(x_pixels, y_pixels, info.no_detectors)
#scan_data.detector_arr = detector_arr
#scan_data.detector_description_arr = detector_description_arr
#mca_arr = fltarr(x_pixels, y_pixels, no_energy_channels, info.no_detectors)
scan_data.mca_arr = mca_arr
return scan_data
|
{
"content_hash": "a75f0a3434e734e1b8a6e707bc25d848",
"timestamp": "",
"source": "github",
"line_count": 1140,
"max_line_length": 151,
"avg_line_length": 37.16140350877193,
"alnum_prop": 0.6533141346426211,
"repo_name": "MapsPy/MapsPy",
"id": "681fa7977d28f45f2f8d51a9dbbe608e227e38c6",
"size": "42364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "file_io/maps_hdf5.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "638544"
}
],
"symlink_target": ""
}
|
"""
Featurizers for complex.
"""
# flake8: noqa
from deepchem.feat.complex_featurizers.rdkit_grid_featurizer import RdkitGridFeaturizer
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import NeighborListAtomicCoordinates
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import NeighborListComplexAtomicCoordinates
from deepchem.feat.complex_featurizers.complex_atomic_coordinates import ComplexNeighborListFragmentAtomicCoordinates
from deepchem.feat.complex_featurizers.contact_fingerprints import ContactCircularFingerprint
from deepchem.feat.complex_featurizers.contact_fingerprints import ContactCircularVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import ChargeVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import SaltBridgeVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import CationPiVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import PiStackVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import HydrogenBondVoxelizer
from deepchem.feat.complex_featurizers.grid_featurizers import HydrogenBondCounter
from deepchem.feat.complex_featurizers.splif_fingerprints import SplifFingerprint
from deepchem.feat.complex_featurizers.splif_fingerprints import SplifVoxelizer
|
{
"content_hash": "c25c1ef46f0f1ea3b8d1ee17558bd8c4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 117,
"avg_line_length": 72.55555555555556,
"alnum_prop": 0.8874425727411945,
"repo_name": "lilleswing/deepchem",
"id": "fb9ea61f04492878434261f9b9ddddb2b5ca4ada",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepchem/feat/complex_featurizers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2597968"
},
{
"name": "Shell",
"bytes": "11491"
}
],
"symlink_target": ""
}
|
import pytest
import math
from cntestcase import cnapp
from cadnano.part.nucleicacidpart import NucleicAcidPart
def create3Helix(doc, direction, length):
part = doc.createNucleicAcidPart(is_lattice=True)
assert len(part.getidNums()) == 0
radius = part.radius()
origin_pt00 = (0, 0, 0)
origin_pt90 = (0, 2*radius, 0)
theta = math.radians(30)
origin_pt60 = (2*radius*math.cos(-theta), 2*radius*math.sin(-theta), 0)
part.createVirtualHelix(*origin_pt00, id_num=0, length=length)
part.createVirtualHelix(*origin_pt60, id_num=1, length=length)
part.createVirtualHelix(*origin_pt90, id_num=2, length=length)
return part
@pytest.mark.parametrize('direction', [(0, 0, 1), (0, 1, 0)])
def testVirtualHelixCreate(cnapp, direction):
doc = cnapp.document
part = create3Helix(doc, direction, 42)
id_nums = part.getidNums()
assert len(id_nums) == 3
def testVirtualHelixResize(cnapp):
doc = cnapp.document
start_length = 42
end_length = 84
part = create3Helix(doc, (0, 0, 1), start_length)
assert part.getVirtualHelixProperties(1, 'length') == start_length
part.setVirtualHelixSize(1, end_length)
assert part.getVirtualHelixProperties(1, 'length') == end_length
part.setVirtualHelixSize(1, start_length)
assert part.getVirtualHelixProperties(1, 'length') == start_length
def testRemove(cnapp):
doc = cnapp.document
start_length = 42
part = create3Helix(doc, (0, 0, 1), start_length)
assert len(doc.children()) == 1
us = part.undoStack()
part.remove()
assert len(doc.children()) == 0
us.undo()
assert len(doc.children()) == 1
|
{
"content_hash": "5699107f4a7dc9217d6700747bf3d825",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 31.76923076923077,
"alnum_prop": 0.6828087167070218,
"repo_name": "scholer/cadnano2.5",
"id": "c22c3a717e344b4ef2d007c7443a56572e93cef9",
"size": "1676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cadnano/tests/nucleicacidparttest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2617"
},
{
"name": "Python",
"bytes": "1624263"
},
{
"name": "QMake",
"bytes": "3719"
}
],
"symlink_target": ""
}
|
import django_filters
from tour.models import Tour
class TourFilter(django_filters.FilterSet):
class Meta:
model = Tour
fields = ('name',)
|
{
"content_hash": "8d42d7bc15aff07e7b70c95556b41e00",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 16.3,
"alnum_prop": 0.6625766871165644,
"repo_name": "micahhausler/django-tour",
"id": "37662e04a7e5bc65e68fa83ec850906370221572",
"size": "163",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tour/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4023"
},
{
"name": "HTML",
"bytes": "854"
},
{
"name": "JavaScript",
"bytes": "15743"
},
{
"name": "Python",
"bytes": "77517"
}
],
"symlink_target": ""
}
|
"""Implementations of learning abstract base class managers."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class LearningProfile:
"""The ``LearningProfile`` describes the interoperability among learning services."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def supports_visible_federation(self):
"""Tests if federation is visible.
:return: ``true`` if visible federation is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_lookup(self):
"""Tests if an objective lookup service is supported.
An objective lookup service defines methods to access
objectives.
:return: true if objective lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_query(self):
"""Tests if an objective query service is supported.
:return: ``true`` if objective query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_search(self):
"""Tests if an objective search service is supported.
:return: ``true`` if objective search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_admin(self):
"""Tests if an objective administrative service is supported.
:return: ``true`` if objective admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_notification(self):
"""Tests if objective notification is supported.
Messages may be sent when objectives are created, modified, or
deleted.
:return: ``true`` if objective notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_hierarchy(self):
"""Tests if an objective hierarchy traversal is supported.
:return: ``true`` if an objective hierarchy traversal is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_hierarchy_design(self):
"""Tests if an objective hierarchy design is supported.
:return: ``true`` if an objective hierarchy design is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_sequencing(self):
"""Tests if an objective sequencing design is supported.
:return: ``true`` if objective sequencing is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_objective_bank(self):
"""Tests if an objective to objective bank lookup session is available.
:return: ``true`` if objective objective bank lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_objective_bank_assignment(self):
"""Tests if an objective to objective bank assignment session is available.
:return: ``true`` if objective objective bank assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_smart_objective_bank(self):
"""Tests if an objective smart objective bank cataloging service is supported.
:return: ``true`` if objective smart objective banks are supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_requisite(self):
"""Tests if an objective requisite service is supported.
:return: ``true`` if objective requisite service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_requisite_assignment(self):
"""Tests if an objective requisite assignment service is supported.
:return: ``true`` if objective requisite assignment service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_lookup(self):
"""Tests if an activity lookup service is supported.
:return: ``true`` if activity lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_query(self):
"""Tests if an activity query service is supported.
:return: ``true`` if activity query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_search(self):
"""Tests if an activity search service is supported.
:return: ``true`` if activity search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_admin(self):
"""Tests if an activity administrative service is supported.
:return: ``true`` if activity admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_notification(self):
"""Tests if activity notification is supported.
Messages may be sent when activities are created, modified, or
deleted.
:return: ``true`` if activity notification is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_objective_bank(self):
"""Tests if an activity to objective bank lookup session is available.
:return: ``true`` if activity objective bank lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_objective_bank_assignment(self):
"""Tests if an activity to objective bank assignment session is available.
:return: ``true`` if activity objective bank assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_activity_smart_objective_bank(self):
"""Tests if an activity smart objective bank cataloging service is supported.
:return: ``true`` if activity smart objective banks are supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_lookup(self):
"""Tests if looking up proficiencies is supported.
:return: ``true`` if proficiency lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_query(self):
"""Tests if querying proficiencies is supported.
:return: ``true`` if proficiency query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_search(self):
"""Tests if searching proficiencies is supported.
:return: ``true`` if proficiency search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_admin(self):
"""Tests if proficiencyadministrative service is supported.
:return: ``true`` if proficiency administration is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_notification(self):
"""Tests if a proficiencynotification service is supported.
:return: ``true`` if proficiency notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_objective_bank(self):
"""Tests if a proficiency objective bank mapping lookup service is supported.
:return: ``true`` if a proficiency objective bank lookup service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_objective_bank_assignment(self):
"""Tests if a proficiency objective bank mapping service is supported.
:return: ``true`` if proficiency to objective bank mapping service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_proficiency_smart_objective_bank(self):
"""Tests if a proficiency smart objective bank cataloging service is supported.
:return: ``true`` if proficiency smart objective banks are supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_my_learning_path(self):
"""Tests if a learning path service is supported for the authenticated agent.
:return: ``true`` if learning path is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_learning_path(self):
"""Tests if a learning path service is supported.
:return: ``true`` if learning path is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_lookup(self):
"""Tests if an objective bank lookup service is supported.
:return: ``true`` if objective bank lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_query(self):
"""Tests if an objective bank query service is supported.
:return: ``true`` if objective bank query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_search(self):
"""Tests if an objective bank search service is supported.
:return: ``true`` if objective bank search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_admin(self):
"""Tests if an objective bank administrative service is supported.
:return: ``true`` if objective bank admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_notification(self):
"""Tests if objective bank notification is supported.
Messages may be sent when objective banks are created, modified,
or deleted.
:return: ``true`` if objective bank notification is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_hierarchy(self):
"""Tests if an objective bank hierarchy traversal is supported.
:return: ``true`` if an objective bank hierarchy traversal is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_objective_bank_hierarchy_design(self):
"""Tests if objective bank hierarchy design is supported.
:return: ``true`` if an objective bank hierarchy design is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_learning_batch(self):
"""Tests if a learning batch service is supported.
:return: ``true`` if a learning batch service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_record_types(self):
"""Gets the supported ``Objective`` record types.
:return: a list containing the supported ``Objective`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
objective_record_types = property(fget=get_objective_record_types)
@abc.abstractmethod
def supports_objective_record_type(self, objective_record_type):
"""Tests if the given ``Objective`` record type is supported.
:param objective_record_type: a ``Type`` indicating an ``Objective`` record type
:type objective_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``objective_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_search_record_types(self):
"""Gets the supported ``Objective`` search record types.
:return: a list containing the supported ``Objective`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
objective_search_record_types = property(fget=get_objective_search_record_types)
@abc.abstractmethod
def supports_objective_search_record_type(self, objective_search_record_type):
"""Tests if the given ``Objective`` search record type is supported.
:param objective_search_record_type: a ``Type`` indicating an ``Objective`` search record type
:type objective_search_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``objective_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_activity_record_types(self):
"""Gets the supported ``Activity`` record types.
:return: a list containing the supported ``Activity`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
activity_record_types = property(fget=get_activity_record_types)
@abc.abstractmethod
def supports_activity_record_type(self, activity_record_type):
"""Tests if the given ``Activity`` record type is supported.
:param activity_record_type: a ``Type`` indicating a ``Activity`` record type
:type activity_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``activity_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_activity_search_record_types(self):
"""Gets the supported ``Activity`` search record types.
:return: a list containing the supported ``Activity`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
activity_search_record_types = property(fget=get_activity_search_record_types)
@abc.abstractmethod
def supports_activity_search_record_type(self, activity_search_record_type):
"""Tests if the given ``Activity`` search record type is supported.
:param activity_search_record_type: a ``Type`` indicating a ``Activity`` search record type
:type activity_search_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``activity_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_proficiency_record_types(self):
"""Gets the supported ``Proficiency`` record types.
:return: a list containing the supported ``Proficiency`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
proficiency_record_types = property(fget=get_proficiency_record_types)
@abc.abstractmethod
def supports_proficiency_record_type(self, proficiency_record_type):
"""Tests if the given ``Proficiency`` record type is supported.
:param proficiency_record_type: a ``Type`` indicating a ``Proficiency`` record type
:type proficiency_record_type: ``osid.type.Type``
:return: ``true`` if the given record type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``proficiency_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_proficiency_search_record_types(self):
"""Gets the supported ``Proficiency`` search types.
:return: a list containing the supported ``Proficiency`` search types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
proficiency_search_record_types = property(fget=get_proficiency_search_record_types)
@abc.abstractmethod
def supports_proficiency_search_record_type(self, proficiency_search_record_type):
"""Tests if the given ``Proficiency`` search type is supported.
:param proficiency_search_record_type: a ``Type`` indicating a ``Proficiency`` search type
:type proficiency_search_record_type: ``osid.type.Type``
:return: ``true`` if the given ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``proficiency_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_bank_record_types(self):
"""Gets the supported ``ObjectiveBank`` record types.
:return: a list containing the supported ``ObjectiveBank`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
objective_bank_record_types = property(fget=get_objective_bank_record_types)
@abc.abstractmethod
def supports_objective_bank_record_type(self, objective_bank_record_type):
"""Tests if the given ``ObjectiveBank`` record type is supported.
:param objective_bank_record_type: a ``Type`` indicating an ``ObjectiveBank`` type
:type objective_bank_record_type: ``osid.type.Type``
:return: ``true`` if the given objective bank record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_objective_bank_search_record_types(self):
"""Gets the supported objective bank search record types.
:return: a list containing the supported ``ObjectiveBank`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
objective_bank_search_record_types = property(fget=get_objective_bank_search_record_types)
@abc.abstractmethod
def supports_objective_bank_search_record_type(self, objective_bank_search_record_type):
"""Tests if the given objective bank search record type is supported.
:param objective_bank_search_record_type: a ``Type`` indicating an ``ObjectiveBank`` search record type
:type objective_bank_search_record_type: ``osid.type.Type``
:return: ``true`` if the given search record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``objective_bank_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
class LearningManager:
"""The learning manager provides access to learning sessions and provides interoperability tests for various aspects of this service.
The sessions included in this manager are:
* ``ObjectiveLookupSession:`` a session to look up objectives
* ``ObjectiveLookupSession:`` a session to query objectives
``None``
* ``ObjectiveSearchSession:`` a session to search objectives
* ``ObjectiveAdminSession:`` a session to create, modify and
delete objectives ``None``
* ``ObjectiveNotificationSession: a`` session to receive messages
pertaining to objective ```` changes
* ``ObjectiveHierarchySession:`` a session to traverse objective
hierarchies
* ``ObjectiveHierarchyDesignSession:`` a session to design
objective hierarchies
* ``ObjectiveSequencingSession:`` a session to sequence objectives
* ``ObjectiveObjectiveBankSession:`` a session for retriieving
objective and objective bank mappings
* ``ObjectiveObjectiveBankAssignmentSession:`` a session for
managing objective and objective bank mappings
* ``ObjectiveSmartObjectiveBankSession:`` a session for managing
dynamic objective banks
* ``ObjectiveRequisiteSession:`` a session to examine objective
requisites
* ``ObjectiveRequisiteAssignmentSession:`` a session to manage
objective requisites
* ``ActivityLookupSession:`` a session to look up activities
* ``ActivityQuerySession:`` a session to query activities ``None``
* ``ActivitySearchSession:`` a session to search activities
* ``ActivityAdminSession:`` a session to create, modify and delete
activities ``None``
* ``ActivityNotificationSession: a`` session to receive messages
pertaining to activity ```` changes
* ``ActivityObjectiveBankSession:`` a session for retriieving
activity and objective bank mappings
* ``ActivityObjectiveBankAssignmentSession:`` a session for
managing activity and objective bank mappings
* ``ActivitySmartObjectiveBankSession:`` a session for managing
dynamic objective banks of activities
* ``ProficiencyLookupSession:`` a session to retrieve
proficiencies
* ``ProficiencyQuerySession:`` a session to query proficiencies
* ``ProficiencySearchSession:`` a session to search for
proficiencies
* ``ProficiencyAdminSession:`` a session to create, update, and
delete proficiencies
* ``ProficiencyNotificationSession:`` a session to receive
notifications pertaining to proficiency changes
* ``ProficiencyObjectiveBankSession:`` a session to look up
proficiency to objective bank mappings
* ``ProficiencyObjectiveBankAssignmentSession:`` a session to
manage proficiency to objective bank mappings
* ``ProficiencySmartObjectiveBankSession:`` a session to manage
smart objective banks of proficiencies
* ``MyLearningPathSession:`` a session to examine learning paths
of objectives
* ``LearningPathSession:`` a session to examine learning paths of
objectives
* ``ObjectiveBankLookupSession:`` a session to lookup objective
banks
* ``ObjectiveBankQuerySession:`` a session to query objective
banks
* ``ObjectiveBankSearchSession`` : a session to search objective
banks
* ``ObjectiveBankAdminSession`` : a session to create, modify and
delete objective banks
* ``ObjectiveBankNotificationSession`` : a session to receive
messages pertaining to objective bank changes
* ``ObjectiveBankHierarchySession:`` a session to traverse the
objective bank hierarchy
* ``ObjectiveBankHierarchyDesignSession:`` a session to manage the
objective bank hierarchy
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_objective_lookup_session(self):
"""Gets the ``OsidSession`` associated with the objective lookup service.
:return: an ``ObjectiveLookupSession``
:rtype: ``osid.learning.ObjectiveLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` is ``true``.*
"""
return # osid.learning.ObjectiveLookupSession
objective_lookup_session = property(fget=get_objective_lookup_session)
@abc.abstractmethod
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective lookup service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: ``an _objective_lookup_session``
:rtype: ``osid.learning.ObjectiveLookupSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveLookupSession
@abc.abstractmethod
def get_objective_query_session(self):
"""Gets the ``OsidSession`` associated with the objective query service.
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuerySession
objective_query_session = property(fget=get_objective_query_session)
@abc.abstractmethod
def get_objective_query_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: ``an _objective_query_session``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveQuerySession
@abc.abstractmethod
def get_objective_search_session(self):
"""Gets the ``OsidSession`` associated with the objective search service.
:return: an ``ObjectiveSearchSession``
:rtype: ``osid.learning.ObjectiveSearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_search()`` is ``true``.*
"""
return # osid.learning.ObjectiveSearchSession
objective_search_session = property(fget=get_objective_search_session)
@abc.abstractmethod
def get_objective_search_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective search service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: ``an _objective_search_session``
:rtype: ``osid.learning.ObjectiveSearchSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveSearchSession
@abc.abstractmethod
def get_objective_admin_session(self):
"""Gets the ``OsidSession`` associated with the objective administration service.
:return: an ``ObjectiveAdminSession``
:rtype: ``osid.learning.ObjectiveAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_admin()`` is ``true``.*
"""
return # osid.learning.ObjectiveAdminSession
objective_admin_session = property(fget=get_objective_admin_session)
@abc.abstractmethod
def get_objective_admin_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: ``an _objective_admin_session``
:rtype: ``osid.learning.ObjectiveAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveAdminSession
@abc.abstractmethod
def get_objective_notification_session(self, objective_receiver):
"""Gets the notification session for notifications pertaining to objective changes.
:param objective_receiver: the objective receiver
:type objective_receiver: ``osid.learning.ObjectiveReceiver``
:return: an ``ObjectiveNotificationSession``
:rtype: ``osid.learning.ObjectiveNotificationSession``
:raise: ``NullArgument`` -- ``objective_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_notification()`` is ``true``.*
"""
return # osid.learning.ObjectiveNotificationSession
@abc.abstractmethod
def get_objective_notification_session_for_objective_bank(self, objective_receiver, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective notification service for the given objective bank.
:param objective_receiver: the objective receiver
:type objective_receiver: ``osid.learning.ObjectiveReceiver``
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: ``an _objective_notification_session``
:rtype: ``osid.learning.ObjectiveNotificationSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_receiver`` or ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveNotificationSession
@abc.abstractmethod
def get_objective_hierarchy_session(self):
"""Gets the session for traversing objective hierarchies.
:return: an ``ObjectiveHierarchySession``
:rtype: ``osid.learning.ObjectiveHierarchySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy()`` is ``true``.*
"""
return # osid.learning.ObjectiveHierarchySession
objective_hierarchy_session = property(fget=get_objective_hierarchy_session)
@abc.abstractmethod
def get_objective_hierarchy_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective hierarchy traversal service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ObjectiveHierarchySession``
:rtype: ``osid.learning.ObjectiveHierarchySession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveHierarchySession
@abc.abstractmethod
def get_objective_hierarchy_design_session(self):
"""Gets the session for designing objective hierarchies.
:return: an ``ObjectiveHierarchyDesignSession``
:rtype: ``osid.learning.ObjectiveHierarchyDesignSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` is ``true``.*
"""
return # osid.learning.ObjectiveHierarchyDesignSession
objective_hierarchy_design_session = property(fget=get_objective_hierarchy_design_session)
@abc.abstractmethod
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective hierarchy design service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ObjectiveHierarchyDesignSession``
:rtype: ``osid.learning.ObjectiveHierarchyDesignSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy_design()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveHierarchyDesignSession
@abc.abstractmethod
def get_objective_sequencing_session(self):
"""Gets the session for sequencing objectives.
:return: an ``ObjectiveSequencingSession``
:rtype: ``osid.learning.ObjectiveSequencingSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_sequencing()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_sequencing()`` is ``true``.*
"""
return # osid.learning.ObjectiveSequencingSession
objective_sequencing_session = property(fget=get_objective_sequencing_session)
@abc.abstractmethod
def get_objective_sequencing_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ObjectiveSequencingSession``
:rtype: ``osid.learning.ObjectiveSequencingSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_sequencing()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_sequencing()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveSequencingSession
@abc.abstractmethod
def get_objective_objective_bank_session(self):
"""Gets the session for retrieving objective to objective bank mappings.
:return: an ``ObjectiveObjectiveBankSession``
:rtype: ``osid.learning.ObjectiveObjectiveBankSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank()`` is ``true``.*
"""
return # osid.learning.ObjectiveObjectiveBankSession
objective_objective_bank_session = property(fget=get_objective_objective_bank_session)
@abc.abstractmethod
def get_objective_objective_bank_assignment_session(self):
"""Gets the session for assigning objective to objective bank mappings.
:return: an ``ObjectiveObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ObjectiveObjectiveBankAssignmentSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank_assignment()`` is ``true``.*
"""
return # osid.learning.ObjectiveObjectiveBankAssignmentSession
objective_objective_bank_assignment_session = property(fget=get_objective_objective_bank_assignment_session)
@abc.abstractmethod
def get_objective_smart_objective_bank_session(self, objective_bank_id):
"""Gets the ``OsidSession`` to manage dynamic objective banks of objectives.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: an ``ObjectiveSmartObjectiveBankSession``
:rtype: ``osid.learning.ObjectiveSmartObjectiveBankSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_smart_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_smart_objective_bank()`` is ``true``.*
"""
return # osid.learning.ObjectiveSmartObjectiveBankSession
@abc.abstractmethod
def get_objective_requisite_session(self):
"""Gets the session for examining objective requisites.
:return: an ``ObjectiveRequisiteSession``
:rtype: ``osid.learning.ObjectiveRequisiteSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_requisite()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite()`` is ``true``.*
"""
return # osid.learning.ObjectiveRequisiteSession
objective_requisite_session = property(fget=get_objective_requisite_session)
@abc.abstractmethod
def get_objective_requisite_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ObjectiveRequisiteSession``
:rtype: ``osid.learning.ObjectiveRequisiteSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_requisite()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveRequisiteSession
@abc.abstractmethod
def get_objective_requisite_assignment_session(self):
"""Gets the session for managing objective requisites.
:return: an ``ObjectiveRequisiteAssignmentSession``
:rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite_assignment()`` is ``true``.*
"""
return # osid.learning.ObjectiveRequisiteAssignmentSession
objective_requisite_assignment_session = property(fget=get_objective_requisite_assignment_session)
@abc.abstractmethod
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ObjectiveRequisiteAssignmentSession``
:rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite_assignment()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveRequisiteAssignmentSession
@abc.abstractmethod
def get_activity_lookup_session(self):
"""Gets the ``OsidSession`` associated with the activity lookup service.
:return: an ``ActivityLookupSession``
:rtype: ``osid.learning.ActivityLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_lookup()`` is ``true``.*
"""
return # osid.learning.ActivityLookupSession
activity_lookup_session = property(fget=get_activity_lookup_session)
@abc.abstractmethod
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the activity lookup service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ActivityLookupSession``
:rtype: ``osid.learning.ActivityLookupSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityLookupSession
@abc.abstractmethod
def get_activity_query_session(self):
"""Gets the ``OsidSession`` associated with the activity query service.
:return: a ``ActivityQuerySession``
:rtype: ``osid.learning.ActivityQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` is ``true``.*
"""
return # osid.learning.ActivityQuerySession
activity_query_session = property(fget=get_activity_query_session)
@abc.abstractmethod
def get_activity_query_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the activity query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ActivityQuerySession``
:rtype: ``osid.learning.ActivityQuerySession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityQuerySession
@abc.abstractmethod
def get_activity_search_session(self):
"""Gets the ``OsidSession`` associated with the activity search service.
:return: a ``ActivitySearchSession``
:rtype: ``osid.learning.ActivitySearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_search()`` is ``true``.*
"""
return # osid.learning.ActivitySearchSession
activity_search_session = property(fget=get_activity_search_session)
@abc.abstractmethod
def get_activity_search_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the activity search service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ActivitySearchSession``
:rtype: ``osid.learning.ActivitySearchSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivitySearchSession
@abc.abstractmethod
def get_activity_admin_session(self):
"""Gets the ``OsidSession`` associated with the activity administration service.
:return: a ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.*
"""
return # osid.learning.ActivityAdminSession
activity_admin_session = property(fget=get_activity_admin_session)
@abc.abstractmethod
def get_activity_admin_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: an ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityAdminSession
@abc.abstractmethod
def get_activity_notification_session(self, activity_receiver):
"""Gets the notification session for notifications pertaining to activity changes.
:param activity_receiver: the activity receiver
:type activity_receiver: ``osid.learning.ActivityReceiver``
:return: an ``ActivityNotificationSession``
:rtype: ``osid.learning.ActivityNotificationSession``
:raise: ``NullArgument`` -- ``activity_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_notification()`` is ``true``.*
"""
return # osid.learning.ActivityNotificationSession
@abc.abstractmethod
def get_activity_notification_session_for_objective_bank(self, activity_receiver, objective_bank_id):
"""Gets the ``OsidSession`` associated with the activity notification service for the given objective bank.
:param activity_receiver: the activity receiver
:type activity_receiver: ``osid.learning.ActivityReceiver``
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:return: ``an _activity_notification_session``
:rtype: ``osid.learning.ActivityNotificationSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``activity_receiver`` or ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityNotificationSession
@abc.abstractmethod
def get_activity_objective_bank_session(self):
"""Gets the session for retrieving activity to objective bank mappings.
:return: an ``ActivityObjectiveBankSession``
:rtype: ``osid.learning.ActivityObjectiveBankSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_objective_bank()`` is ``true``.*
"""
return # osid.learning.ActivityObjectiveBankSession
activity_objective_bank_session = property(fget=get_activity_objective_bank_session)
@abc.abstractmethod
def get_activity_objective_bank_assignment_session(self):
"""Gets the session for assigning activity to objective bank mappings.
:return: an ``ActivityObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ActivityObjectiveBankAssignmentSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_objective_bank_assignment()`` is ``true``.*
"""
return # osid.learning.ActivityObjectiveBankAssignmentSession
activity_objective_bank_assignment_session = property(fget=get_activity_objective_bank_assignment_session)
@abc.abstractmethod
def get_activity_smart_objective_bank_session(self, objective_bank_id):
"""Gets the ``OsidSession`` to manage dynamic objective banks of activities.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: an ``ActivitySmartObjectiveBankSession``
:rtype: ``osid.learning.ActivitySmartObjectiveBankSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_smart_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_smart_objective_bank()`` is ``true``.*
"""
return # osid.learning.ActivitySmartObjectiveBankSession
@abc.abstractmethod
def get_proficiency_lookup_session(self):
"""Gets the ``OsidSession`` associated with the proficiency lookup service.
:return: a ``ProficiencyLookupSession``
:rtype: ``osid.learning.ProficiencyLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_lookup()`` is ``true``.*
"""
return # osid.learning.ProficiencyLookupSession
proficiency_lookup_session = property(fget=get_proficiency_lookup_session)
@abc.abstractmethod
def get_proficiency_lookup_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the proficiency lookup service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:return: a ``ProficiencyLookupSession``
:rtype: ``osid.learning.ProficiencyLookupSession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_lookup()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyLookupSession
@abc.abstractmethod
def get_proficiency_query_session(self):
"""Gets the ``OsidSession`` associated with the proficiency query service.
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_query()`` is ``true``.*
"""
return # osid.learning.ProficiencyQuerySession
proficiency_query_session = property(fget=get_proficiency_query_session)
@abc.abstractmethod
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the proficiency query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_query()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyQuerySession
@abc.abstractmethod
def get_proficiency_search_session(self):
"""Gets the ``OsidSession`` associated with the proficiency search service.
:return: a ``ProficiencySearchSession``
:rtype: ``osid.learning.ProficiencySearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_search()`` is ``true``.*
"""
return # osid.learning.ProficiencySearchSession
proficiency_search_session = property(fget=get_proficiency_search_session)
@abc.abstractmethod
def get_proficiency_search_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the proficiency search service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: a ``ProficiencySearchSession``
:rtype: ``osid.learning.ProficiencySearchSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_search()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencySearchSession
@abc.abstractmethod
def get_proficiency_admin_session(self):
"""Gets the ``OsidSession`` associated with the proficiency administration service.
:return: a ``ProficiencyAdminSession``
:rtype: ``osid.learning.ProficiencyAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_admin()`` is ``true``.*
"""
return # osid.learning.ProficiencyAdminSession
proficiency_admin_session = property(fget=get_proficiency_admin_session)
@abc.abstractmethod
def get_proficiency_admin_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the proficiency administration service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: a ``ProficiencyAdminSession``
:rtype: ``osid.learning.ProficiencyAdminSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyAdminSession
@abc.abstractmethod
def get_proficiency_notification_session(self, proficiency_receiver):
"""Gets the ``OsidSession`` associated with the proficiency notification service.
:param proficiency_receiver: the notification callback
:type proficiency_receiver: ``osid.learning.ProficiencyReceiver``
:return: a ``ProficiencyNotificationSession``
:rtype: ``osid.learning.ProficiencyNotificationSession``
:raise: ``NullArgument`` -- ``proficiency_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_notification()`` is ``true``.*
"""
return # osid.learning.ProficiencyNotificationSession
@abc.abstractmethod
def get_proficiency_notification_session_for_objective_bank(self, proficiency_receiver, objective_bank_id):
"""Gets the ``OsidSession`` associated with the proficiency notification service for the given objective bank.
:param proficiency_receiver: the notification callback
:type proficiency_receiver: ``osid.learning.ProficiencyReceiver``
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: a ``ProficiencyNotificationSession``
:rtype: ``osid.learning.ProficiencyNotificationSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``proficiency_receiver`` or ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_notification()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyNotificationSession
@abc.abstractmethod
def get_proficiency_objective_bank_session(self):
"""Gets the ``OsidSession`` to lookup proficiency/objective bank mappings.
:return: a ``ProficiencyObjectiveBankSession``
:rtype: ``osid.learning.ProficiencyObjectiveBankSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_objective_bank()`` is ``true``.*
"""
return # osid.learning.ProficiencyObjectiveBankSession
proficiency_objective_bank_session = property(fget=get_proficiency_objective_bank_session)
@abc.abstractmethod
def get_proficiency_objective_bank_assignment_session(self):
"""Gets the ``OsidSession`` associated with assigning proficiencys to objective banks.
:return: a ``ProficiencyObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ProficiencyObjectiveBankAssignmentSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_objective_bank_assignment()`` is
``true``.*
"""
return # osid.learning.ProficiencyObjectiveBankAssignmentSession
proficiency_objective_bank_assignment_session = property(fget=get_proficiency_objective_bank_assignment_session)
@abc.abstractmethod
def get_proficiency_smart_objective_bank_session(self, objective_bank_id):
"""Gets the ``OsidSession`` to manage dynamic objective banks of objectives.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: a ``ProficiencySmartObjectiveBankSession``
:rtype: ``osid.learning.ProficiencySmartObjectiveBankSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_smart_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_smart_objective_bank()`` is ``true``.*
"""
return # osid.learning.ProficiencySmartObjectiveBankSession
@abc.abstractmethod
def get_my_learning_path_session(self):
"""Gets the ``OsidSession`` associated with the my learning path service.
:return: a ``MyLearningPathSession``
:rtype: ``osid.learning.MyLearningPathSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_my_learning_path()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_my_learning_path()`` is ``true``.*
"""
return # osid.learning.MyLearningPathSession
my_learning_path_session = property(fget=get_my_learning_path_session)
@abc.abstractmethod
def get_my_learning_path_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the my learning path service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: a ``MyLearningPathSession``
:rtype: ``osid.learning.MyLearningPathSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_my_learning_path()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_my_learning_path()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.MyLearningPathSession
@abc.abstractmethod
def get_learning_path_session(self):
"""Gets the ``OsidSession`` associated with the learning path service.
:return: a ``LearningPathSession``
:rtype: ``osid.learning.LearningPathSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_learning_path()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_path()`` is ``true``.*
"""
return # osid.learning.LearningPathSession
learning_path_session = property(fget=get_learning_path_session)
@abc.abstractmethod
def get_learning_path_session_for_objective_bank(self, objective_bank_id):
"""Gets the ``OsidSession`` associated with the learning path service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:return: a ``LearningPathSession``
:rtype: ``osid.learning.LearningPathSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supporty_learning_path()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_path()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.LearningPathSession
@abc.abstractmethod
def get_objective_bank_lookup_session(self):
"""Gets the OsidSession associated with the objective bank lookup service.
:return: an ``ObjectiveBankLookupSession``
:rtype: ``osid.learning.ObjectiveBankLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_lookup() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_lookup()`` is true.*
"""
return # osid.learning.ObjectiveBankLookupSession
objective_bank_lookup_session = property(fget=get_objective_bank_lookup_session)
@abc.abstractmethod
def get_objective_bank_query_session(self):
"""Gets the OsidSession associated with the objective bank query service.
:return: an ``ObjectiveBankQuerySession``
:rtype: ``osid.learning.ObjectiveBankQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_query() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_query()`` is true.*
"""
return # osid.learning.ObjectiveBankQuerySession
objective_bank_query_session = property(fget=get_objective_bank_query_session)
@abc.abstractmethod
def get_objective_bank_search_session(self):
"""Gets the OsidSession associated with the objective bank search service.
:return: an ``ObjectiveBankSearchSession``
:rtype: ``osid.learning.ObjectiveBankSearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_search() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_search()`` is true.*
"""
return # osid.learning.ObjectiveBankSearchSession
objective_bank_search_session = property(fget=get_objective_bank_search_session)
@abc.abstractmethod
def get_objective_bank_admin_session(self):
"""Gets the OsidSession associated with the objective bank administration service.
:return: an ``ObjectiveBankAdminSession``
:rtype: ``osid.learning.ObjectiveBankAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_admin() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_admin()`` is true.*
"""
return # osid.learning.ObjectiveBankAdminSession
objective_bank_admin_session = property(fget=get_objective_bank_admin_session)
@abc.abstractmethod
def get_objective_bank_notification_session(self, objective_bank_receiver):
"""Gets the notification session for notifications pertaining to objective bank service changes.
:param objective_bank_receiver: the objective bank receiver
:type objective_bank_receiver: ``osid.learning.ObjectiveBankReceiver``
:return: an ``ObjectiveBankNotificationSession``
:rtype: ``osid.learning.ObjectiveBankNotificationSession``
:raise: ``NullArgument`` -- ``objective_bank_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_notification() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_notification()`` is true.*
"""
return # osid.learning.ObjectiveBankNotificationSession
@abc.abstractmethod
def get_objective_bank_hierarchy_session(self):
"""Gets the session traversing objective bank hierarchies.
:return: an ``ObjectiveBankHierarchySession``
:rtype: ``osid.learning.ObjectiveBankHierarchySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_hierarchy() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_hierarchy()`` is true.*
"""
return # osid.learning.ObjectiveBankHierarchySession
objective_bank_hierarchy_session = property(fget=get_objective_bank_hierarchy_session)
@abc.abstractmethod
def get_objective_bank_hierarchy_design_session(self):
"""Gets the session designing objective bank hierarchies.
:return: an ``ObjectiveBankHierarchyDesignSession``
:rtype: ``osid.learning.ObjectiveBankHierarchyDesignSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_hierarchy_design() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_hierarchy_design()`` is true.*
"""
return # osid.learning.ObjectiveBankHierarchyDesignSession
objective_bank_hierarchy_design_session = property(fget=get_objective_bank_hierarchy_design_session)
@abc.abstractmethod
def get_learning_batch_manager(self):
"""Gets a ``LearningBatchManager``.
:return: a ``LearningBatchManager``
:rtype: ``osid.learning.batch.LearningBatchManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_learning_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_learning_batch()`` is true.*
"""
return # osid.learning.batch.LearningBatchManager
learning_batch_manager = property(fget=get_learning_batch_manager)
class LearningProxyManager:
"""The learning manager provides access to learning sessions and provides interoperability tests for various aspects of this service.
Methods in this manager support the passing of a ``Proxy``. The
sessions included in this manager are:
* ``ObjectiveLookupSession:`` a session to look up objectives
* ``ObjectiveLookupSession:`` a session to query objectives
``None``
* ``ObjectiveSearchSession:`` a session to search objectives
* ``ObjectiveAdminSession:`` a session to create, modify and
delete objectives ``None``
* ``ObjectiveNotificationSession: a`` session to receive messages
pertaining to objective ```` changes
* ``ObjectiveHierarchySession:`` a session to traverse objective
hierarchies
* ``ObjectiveHierarchyDesignSession:`` a session to design
objective hierarchies
* ``ObjectiveSequencingSession:`` a session to sequence objectives
* ``ObjectiveObjectiveBankSession:`` a session for retriieving
objective and objective bank mappings
* ``ObjectiveObjectiveBankAssignmentSession:`` a session for
managing objective and objective bank mappings
* ``ObjectiveSmartObjectiveBankSession:`` a session for managing
dynamic objective banks
* ``ObjectiveRequisiteSession:`` a session to examine objective
requisites
* ``ObjectiveRequisiteAssignmentSession:`` a session to manage
objective requisites
* ``ActivityLookupSession:`` a session to look up activities
* ``ActivityQuerySession:`` a session to query activities ``None``
* ``ActivitySearchSession:`` a session to search activities
* ``ActivityAdminSession:`` a session to create, modify and delete
activities ``None``
* ``ActivityNotificationSession: a`` session to receive messages
pertaining to activity ```` changes
* ``ActivityObjectiveBankSession:`` a session for retriieving
activity and objective bank mappings
* ``ActivityObjectiveBankAssignmentSession:`` a session for
managing activity and objective bank mappings
* ``ActivitySmartObjectiveBankSession:`` a session for managing
dynamic objective banks of activities
* ``ProficiencyLookupSession:`` a session to retrieve
proficiencies
* ``ProficiencyQuerySession:`` a session to query proficiencies
* ``ProficiencySearchSession:`` a session to search for
proficiencies
* ``ProficiencyAdminSession:`` a session to create, update, and
delete proficiencies
* ``ProficiencyNotificationSession:`` a session to receive
notifications pertaining to proficiency changes
* ``ProficiencyObjectiveBankSession:`` a session to look up
proficiency to objective bank mappings
* ``ProficiencyObjectiveBankAssignmentSession:`` a session to
manage proficiency to objective bank mappings
* ``ProficiencySmartObjectiveBankSession:`` a session to manage
smart objective banks of proficiencies
* ``MyLearningPathSession:`` a session to examine learning paths
of objectives
* ``LearningPathSession:`` a session to examine learning paths of
objectives
* ``ObjectiveBankLookupSession:`` a session to lookup objective
banks
* ``ObjectiveBankQuerySession:`` a session to query objective
banks
* ``ObjectiveBankSearchSession`` : a session to search objective
banks
* ``ObjectiveBankAdminSession`` : a session to create, modify and
delete objective banks
* ``ObjectiveBankNotificationSession`` : a session to receive
messages pertaining to objective bank changes
* ``ObjectiveBankHierarchySession:`` a session to traverse the
objective bank hierarchy
* ``ObjectiveBankHierarchyDesignSession:`` a session to manage the
objective bank hierarchy
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_objective_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveLookupSession``
:rtype: ``osid.learning.ObjectiveLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` is ``true``.*
"""
return # osid.learning.ObjectiveLookupSession
@abc.abstractmethod
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective lookup service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``an _objective_lookup_session``
:rtype: ``osid.learning.ObjectiveLookupSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveLookupSession
@abc.abstractmethod
def get_objective_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` is ``true``.*
"""
return # osid.learning.ObjectiveQuerySession
@abc.abstractmethod
def get_objective_query_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``an _objective_query_session``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveQuerySession
@abc.abstractmethod
def get_objective_search_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveSearchSession``
:rtype: ``osid.learning.ObjectiveSearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_search()`` is ``true``.*
"""
return # osid.learning.ObjectiveSearchSession
@abc.abstractmethod
def get_objective_search_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective search service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``an _objective_search_session``
:rtype: ``osid.learning.ObjectiveSearchSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveSearchSession
@abc.abstractmethod
def get_objective_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveAdminSession``
:rtype: ``osid.learning.ObjectiveAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_admin()`` is ``true``.*
"""
return # osid.learning.ObjectiveAdminSession
@abc.abstractmethod
def get_objective_admin_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``an _objective_admin_session``
:rtype: ``osid.learning.ObjectiveAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveAdminSession
@abc.abstractmethod
def get_objective_notification_session(self, objective_receiver, proxy):
"""Gets the notification session for notifications pertaining to objective changes.
:param objective_receiver: the objective receiver
:type objective_receiver: ``osid.learning.ObjectiveReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveNotificationSession``
:rtype: ``osid.learning.ObjectiveNotificationSession``
:raise: ``NullArgument`` -- ``objective_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_notification()`` is ``true``.*
"""
return # osid.learning.ObjectiveNotificationSession
@abc.abstractmethod
def get_objective_notification_session_for_objective_bank(self, objective_receiver, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective notification service for the given objective bank.
:param objective_receiver: the objective receiver
:type objective_receiver: ``osid.learning.ObjectiveReceiver``
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``an _objective_notification_session``
:rtype: ``osid.learning.ObjectiveNotificationSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_receiver, objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveNotificationSession
@abc.abstractmethod
def get_objective_hierarchy_session(self, proxy):
"""Gets the session for traversing objective hierarchies.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveHierarchySession``
:rtype: ``osid.learning.ObjectiveHierarchySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy()`` is ``true``.*
"""
return # osid.learning.ObjectiveHierarchySession
@abc.abstractmethod
def get_objective_hierarchy_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective hierarchy traversal service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveHierarchySession``
:rtype: ``osid.learning.ObjectiveHierarchySession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveHierarchySession
@abc.abstractmethod
def get_objective_hierarchy_design_session(self, proxy):
"""Gets the session for designing objective hierarchies.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveHierarchyDesignSession``
:rtype: ``osid.learning.ObjectiveHierarchyDesignSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` is ``true``.*
"""
return # osid.learning.ObjectiveHierarchyDesignSession
@abc.abstractmethod
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective hierarchy design service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveHierarchyDesignSession``
:rtype: ``osid.learning.ObjectiveHierarchyDesignSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_hierarchy_design()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveHierarchyDesignSession
@abc.abstractmethod
def get_objective_sequencing_session(self, proxy):
"""Gets the session for sequencing objectives.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveSequencingSession``
:rtype: ``osid.learning.ObjectiveSequencingSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_sequencing()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_sequencing()`` is ``true``.*
"""
return # osid.learning.ObjectiveSequencingSession
@abc.abstractmethod
def get_objective_sequencing_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveSequencingSession``
:rtype: ``osid.learning.ObjectiveSequencingSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_sequencing()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_sequencing()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveSequencingSession
@abc.abstractmethod
def get_objective_objective_bank_session(self, proxy):
"""Gets the session for retrieving objective to objective bank mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveObjectiveBankSession``
:rtype: ``osid.learning.ObjectiveObjectiveBankSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank()`` is ``true``.*
"""
return # osid.learning.ObjectiveObjectiveBankSession
@abc.abstractmethod
def get_objective_objective_bank_assignment_session(self, proxy):
"""Gets the session for assigning objective to objective bank mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ObjectiveObjectiveBankAssignmentSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank_assignment()`` is ``true``.*
"""
return # osid.learning.ObjectiveObjectiveBankAssignmentSession
@abc.abstractmethod
def get_objective_smart_objective_bank_session(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` to manage dynamic objective banks of objectives.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveSmartObjectiveBankSession``
:rtype: ``osid.learning.ActivitySmartObjectiveBankSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_smart_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_smart_objective_bank()`` is ``true``.*
"""
return # osid.learning.ActivitySmartObjectiveBankSession
@abc.abstractmethod
def get_objective_requisite_session(self, proxy):
"""Gets the session for examining objective requisites.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveRequisiteSession``
:rtype: ``osid.learning.ObjectiveRequisiteSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_requisite()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite()`` is ``true``.*
"""
return # osid.learning.ObjectiveRequisiteSession
@abc.abstractmethod
def get_objective_requisite_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveRequisiteSession``
:rtype: ``osid.learning.ObjectiveRequisiteSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_requisite()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveRequisiteSession
@abc.abstractmethod
def get_objective_requisite_assignment_session(self, proxy):
"""Gets the session for managing objective requisites.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveRequisiteAssignmentSession``
:rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite_assignment()`` is ``true``.*
"""
return # osid.learning.ObjectiveRequisiteAssignmentSession
@abc.abstractmethod
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveRequisiteAssignmentSession``
:rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_requisite_assignment()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ObjectiveRequisiteAssignmentSession
@abc.abstractmethod
def get_activity_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityLookupSession``
:rtype: ``osid.learning.ActivityLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_lookup()`` is ``true``.*
"""
return # osid.learning.ActivityLookupSession
@abc.abstractmethod
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the activity lookup service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityLookupSession``
:rtype: ``osid.learning.ActivityLookupSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityLookupSession
@abc.abstractmethod
def get_activity_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityQuerySession``
:rtype: ``osid.learning.ActivityQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` is ``true``.*
"""
return # osid.learning.ActivityQuerySession
@abc.abstractmethod
def get_activity_query_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the activity query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityQuerySession``
:rtype: ``osid.learning.ActivityQuerySession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityQuerySession
@abc.abstractmethod
def get_activity_search_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivitySearchSession``
:rtype: ``osid.learning.ActivitySearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_search()`` is ``true``.*
"""
return # osid.learning.ActivitySearchSession
@abc.abstractmethod
def get_activity_search_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the activity search service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivitySearchSession``
:rtype: ``osid.learning.ActivitySearchSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivitySearchSession
@abc.abstractmethod
def get_activity_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.*
"""
return # osid.learning.ActivityAdminSession
@abc.abstractmethod
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ActivityAdminSession``
:rtype: ``osid.learning.ActivityAdminSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityAdminSession
@abc.abstractmethod
def get_activity_notification_session(self, activity_receiver, proxy):
"""Gets the notification session for notifications pertaining to activity changes.
:param activity_receiver: the activity receiver
:type activity_receiver: ``osid.learning.ActivityReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityNotificationSession``
:rtype: ``osid.learning.ActivityNotificationSession``
:raise: ``NullArgument`` -- ``activity_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_notification()`` is ``true``.*
"""
return # osid.learning.ActivityNotificationSession
@abc.abstractmethod
def get_activity_notification_session_for_objective_bank(self, activity_receiver, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the activity notification service for the given objective bank.
:param activity_receiver: the activity receiver
:type activity_receiver: ``osid.learning.ActivityReceiver``
:param objective_bank_id: the ``Id`` of the objective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``an _activity_notification_session``
:rtype: ``osid.learning.ActivityNotificationSession``
:raise: ``NotFound`` -- ``objective_bank_id`` not found
:raise: ``NullArgument`` -- ``activity_receiver, objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_activity_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.learning.ActivityNotificationSession
@abc.abstractmethod
def get_activity_objective_bank_session(self, proxy):
"""Gets the session for retrieving activity to objective bank mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityObjectiveBankSession``
:rtype: ``osid.learning.ActivityObjectiveBankSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_objective_bank()`` is ``true``.*
"""
return # osid.learning.ActivityObjectiveBankSession
@abc.abstractmethod
def get_activity_objective_bank_assignment_session(self, proxy):
"""Gets the session for assigning activity to objective bank mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ActivityObjectiveBankAssignmentSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_objective_bank_assignment()`` is ``true``.*
"""
return # osid.learning.ActivityObjectiveBankAssignmentSession
@abc.abstractmethod
def get_activity_smart_objective_bank_session(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` to manage dynamic objective banks of activities.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivitySmartObjectiveBankSession``
:rtype: ``osid.learning.ActivitySmartObjectiveBankSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_smart_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_smart_objective_bank()`` is ``true``.*
"""
return # osid.learning.ActivitySmartObjectiveBankSession
@abc.abstractmethod
def get_proficiency_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the proficiency lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyLookupSession``
:rtype: ``osid.learning.ProficiencyLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_lookup()`` is ``true``.*
"""
return # osid.learning.ProficiencyLookupSession
@abc.abstractmethod
def get_proficiency_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency lookup service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyLookupSession``
:rtype: ``osid.learning.ProficiencyLookupSession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_lookup()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyLookupSession
@abc.abstractmethod
def get_proficiency_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the proficiency query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_query()`` is ``true``.*
"""
return # osid.learning.ProficiencyQuerySession
@abc.abstractmethod
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_query()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyQuerySession
@abc.abstractmethod
def get_proficiency_search_session(self, proxy):
"""Gets the ``OsidSession`` associated with the proficiency search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencySearchSession``
:rtype: ``osid.learning.ProficiencySearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_search()`` is ``true``.*
"""
return # osid.learning.ProficiencySearchSession
@abc.abstractmethod
def get_proficiency_search_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency search service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencySearchSession``
:rtype: ``osid.learning.ProficiencySearchSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_search()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencySearchSession
@abc.abstractmethod
def get_proficiency_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the proficiency administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyAdminSession``
:rtype: ``osid.learning.ProficiencyAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_admin()`` is ``true``.*
"""
return # osid.learning.ProficiencyAdminSession
@abc.abstractmethod
def get_proficiency_admin_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency administration service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyAdminSession``
:rtype: ``osid.learning.ProficiencyAdminSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyAdminSession
@abc.abstractmethod
def get_proficiency_notification_session(self, proficiency_receiver, proxy):
"""Gets the ``OsidSession`` associated with the proficiency notification service.
:param proficiency_receiver: the notification callback
:type proficiency_receiver: ``osid.learning.ProficiencyReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyNotificationSession``
:rtype: ``osid.learning.ProficiencyNotificationSession``
:raise: ``NullArgument`` -- ``proficiency_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_notification()`` is ``true``.*
"""
return # osid.learning.ProficiencyNotificationSession
@abc.abstractmethod
def get_proficiency_notification_session_for_objective_bank(self, proficiency_receiver, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency notification service for the given objective bank.
:param proficiency_receiver: the notification callback
:type proficiency_receiver: ``osid.learning.ProficiencyReceiver``
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyNotificationSession``
:rtype: ``osid.learning.ProficiencyNotificationSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``proficiency_receiver, objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_notification()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.ProficiencyNotificationSession
@abc.abstractmethod
def get_proficiency_objective_bank_session(self, proxy):
"""Gets the ``OsidSession`` to lookup proficiency/objective bank mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyObjectiveBankSession``
:rtype: ``osid.learning.ProficiencyObjectiveBankSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_objective_bank()`` is ``true``.*
"""
return # osid.learning.ProficiencyObjectiveBankSession
@abc.abstractmethod
def get_proficiency_objective_bank_assignment_session(self, proxy):
"""Gets the ``OsidSession`` associated with assigning proficiencies to objective banks.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ProficiencyObjectiveBankAssignmentSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_objective_bank_assignment()`` is
``true``.*
"""
return # osid.learning.ProficiencyObjectiveBankAssignmentSession
@abc.abstractmethod
def get_proficiency_smart_objective_bank_session(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` to manage dynamic objective banks of proficiencies.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencySmartObjectiveBankSession``
:rtype: ``osid.learning.ProficiencySmartObjectiveBankSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_smart_objective_bank()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_proficiency_smart_objective_bank()`` is ``true``.*
"""
return # osid.learning.ProficiencySmartObjectiveBankSession
@abc.abstractmethod
def get_my_learning_path_session(self, proxy):
"""Gets the ``OsidSession`` associated with the my learning path service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``MyLearningPathSession``
:rtype: ``osid.learning.MyLearningPathSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_my_learning_path()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_my_learning_path()`` is ``true``.*
"""
return # osid.learning.MyLearningPathSession
@abc.abstractmethod
def get_my_learning_path_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the my learning path service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``MyLearningPathSession``
:rtype: ``osid.learning.MyLearningPathSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_my_learning_path()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_my_learning_path()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.MyLearningPathSession
@abc.abstractmethod
def get_learning_path_session(self, proxy):
"""Gets the ``OsidSession`` associated with the learning path service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``LearningPathSession``
:rtype: ``osid.learning.LearningPathSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_learning_path()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_path()`` is ``true``.*
"""
return # osid.learning.LearningPathSession
@abc.abstractmethod
def get_learning_path_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the learning path service for the given objective bank.
:param objective_bank_id: the ``Id`` of the ``ObjectiveBank``
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``LearningPathSession``
:rtype: ``osid.learning.LearningPathSession``
:raise: ``NotFound`` -- no objective bank found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supporty_learning_path()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_learning_path()`` and
``supports_visible_federation()`` are ``true``*
"""
return # osid.learning.LearningPathSession
@abc.abstractmethod
def get_objective_bank_lookup_session(self, proxy):
"""Gets the OsidSession associated with the objective bank lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankLookupSession``
:rtype: ``osid.learning.ObjectiveBankLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_lookup() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_lookup()`` is true.*
"""
return # osid.learning.ObjectiveBankLookupSession
@abc.abstractmethod
def get_objective_bank_query_session(self, proxy):
"""Gets the OsidSession associated with the objective bank query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankQuerySession``
:rtype: ``osid.learning.ObjectiveBankQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_query() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_query()`` is true.*
"""
return # osid.learning.ObjectiveBankQuerySession
@abc.abstractmethod
def get_objective_bank_search_session(self, proxy):
"""Gets the OsidSession associated with the objective bank search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankSearchSession``
:rtype: ``osid.learning.ObjectiveBankSearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_search() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_search()`` is true.*
"""
return # osid.learning.ObjectiveBankSearchSession
@abc.abstractmethod
def get_objective_bank_admin_session(self, proxy):
"""Gets the OsidSession associated with the objective bank administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankAdminSession``
:rtype: ``osid.learning.ObjectiveBankAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_admin() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_admin()`` is true.*
"""
return # osid.learning.ObjectiveBankAdminSession
@abc.abstractmethod
def get_objective_bank_notification_session(self, objective_bank_receiver, proxy):
"""Gets the notification session for notifications pertaining to objective bank service changes.
:param objective_bank_receiver: the objective bank receiver
:type objective_bank_receiver: ``osid.learning.ObjectiveBankReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankNotificationSession``
:rtype: ``osid.learning.ObjectiveBankNotificationSession``
:raise: ``NullArgument`` -- ``objective_bank_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_notification() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_notification()`` is true.*
"""
return # osid.learning.ObjectiveBankNotificationSession
@abc.abstractmethod
def get_objective_bank_hierarchy_session(self, proxy):
"""Gets the session traversing objective bank hierarchies.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankHierarchySession``
:rtype: ``osid.learning.ObjectiveBankHierarchySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_hierarchy() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_hierarchy()`` is true.*
"""
return # osid.learning.ObjectiveBankHierarchySession
@abc.abstractmethod
def get_objective_bank_hierarchy_design_session(self, proxy):
"""Gets the session designing objective bank hierarchies.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankHierarchyDesignSession``
:rtype: ``osid.learning.ObjectiveBankHierarchyDesignSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_hierarchy_design() is false``
*compliance: optional -- This method must be implemented if
``supports_objective_bank_hierarchy_design()`` is true.*
"""
return # osid.learning.ObjectiveBankHierarchyDesignSession
@abc.abstractmethod
def get_learning_batch_proxy_manager(self):
"""Gets a ``LearningBatchProxyManager``.
:return: a ``LearningBatchProxyManager``
:rtype: ``osid.learning.batch.LearningBatchProxyManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_learning_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_learning_batch()`` is true.*
"""
return # osid.learning.batch.LearningBatchProxyManager
learning_batch_proxy_manager = property(fget=get_learning_batch_proxy_manager)
|
{
"content_hash": "b1f24083027fc73be5b963b14a7a2620",
"timestamp": "",
"source": "github",
"line_count": 3287,
"max_line_length": 137,
"avg_line_length": 42.69729236385762,
"alnum_prop": 0.6502215952004332,
"repo_name": "mitsei/dlkit",
"id": "2c4f70602902167e338bc27b8f871a2a41614905",
"size": "140346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/abstract_osid/learning/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
import Utils
import os
from collections import defaultdict
import matplotlib.pyplot as plt
import pandas as pd
from random import Random
from Simulation import Simulation
import pylab, numpy, math, csv
def run_sim(params, exp_prefix, rng, sim=True):
"""
Run multiple initialisations of a single set of experimental conditions.
If sim=False, output is estimated, rather than simulated. Currently, this
is just used for evaluating the "fixed time investment" heuristic.
"""
sum_roi_values = []
roi_values = []
total_r_values = []
mean_tg_values = []
corr_rq_held_values = []
redundancies_final = 0
# for each initialisation
for run in range(params['runs']):
params['seed'] = rng.randint(0,99999999)
params['prefix'] = exp_prefix + "%d/" % params['seed']
print " run #%d" % run
# run experiment; either simulation or estimation
exp = Simulation(params)
if sim:
exp.run()
else:
exp.test_flat(params['fixed_time'])
roi_values.append(exp.calc_roi(1)[2])
total_r_values.append(exp.calc_mean_total_output())
mean_tg_values.append(exp.calc_mean_time_grant())
corr_rq_held_values.append(exp.calc_mean_corr_rq_held())
sum_roi_values.append(exp.calc_roi_sum)
#roi_SEs.append(numpy.std(sum_roi_values))
exp.calc_redundancies()
redundancies_final = exp.redundancies_total
if params['write_output']:
Utils.create_dir(params['prefix'])
exp.write_output()
#redundancies_final = exp.redundancies_total #redundancies_final.append(exp.redundancies_total)
dataFile.write(str(run + 1) + "," + str(params['use_postdocs']) + "," + str(params['growing_pop']) + "," + str(params['pdr_rq_counts']) + ","
+ str(params['mentored_pdrs']) + "," + str(exp.roi_sum[params['iterations'] - 1]) + "," + str(exp.roi_sum_pdr[params['iterations'] - 1]) +
"," + str(total_r_values[run]) + "," + str(exp.mean_r[params['iterations'] - 1]) + "," + str(exp.mean_r_old_academic[params['iterations'] - 1]) + "," + str(exp.mean_r_postdoc[params['iterations'] - 1]) + ","
+ str(exp.mean_r_former_pdr[params['iterations'] - 1]) + "," + str(mean_tg_values[run]) + "," + str(params['learning_type']) + "," + str(params['postdoc_chance']) + "," + str(exp.redundancies_total) + "," + str(params['limited_funding']) + "," + str(params['yearly_increase']) + "\n")
# calculate and write summary statistics
summ_data = [roi_values, total_r_values,
mean_tg_values, corr_rq_held_values]
summ_stats = [
pylab.mean(roi_values), pylab.std(roi_values),
pylab.mean(total_r_values), pylab.std(total_r_values),
pylab.mean(mean_tg_values), pylab.std(mean_tg_values),
pylab.mean(corr_rq_held_values), pylab.std(corr_rq_held_values)]
if params['write_output']:
Utils.write_data_2d(summ_data, exp_prefix + 'summ_data.csv')
Utils.write_data(summ_stats, exp_prefix + 'summ_stats.csv')
return pylab.mean(roi_values), pylab.std(roi_values), total_r_values[-1], redundancies_final
def run_WCSS_sims(params, base_prefix, seed_rng):
"""
Run basic simulations associated with the WCSS paper.
"""
params['runs'] = 10
# run THERMOSTAT model
params['prefix'] = base_prefix + "thermostat/"
params['learning_type'] = 'thermostat'
print 'thermostat'
run_sim(params, params['prefix'], seed_rng)
# run MEMORY A model ("bad" parameters)
params['prefix'] = base_prefix + "memory_A/"
params['learning_type'] = 'memory'
params['prob_reentry'] = 0.05
params['run_length'] = 5
print 'memory A'
run_sim(params, params['prefix'], seed_rng)
# run MEMORY B model ("good" parameters)
params['prefix'] = base_prefix + "memory_B/"
params['learning_type'] = 'memory'
params['prob_reentry'] = 0.02
params['run_length'] = 3
print 'memory B'
run_sim(params, params['prefix'], seed_rng)
# run FIXED model
params['prefix'] = base_prefix + "fixed/"
print 'fixed'
run_sim(params, params['prefix'], seed_rng, sim=False)
def run_Alife_sims(params, base_prefix, seed_rng):
"""
Run sims for Alife XV paper.
"""
params['runs'] = 50
#run MEMORY, growing pop model
params['prefix'] = base_prefix + "memory_growingPop"
params['learning_type'] = 'memory'
params['use_postdocs'] = 0
params['growing_pop'] = 1
params['pdr_rq_counts'] = 0
params['mentored_pdrs'] = 0
print 'Growing population, no postdocs'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, postdocs, no RQ/mentors
params['prefix'] = base_prefix + "memory_noRQnoM"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 0
params['mentored_pdrs'] = 0
print 'Memory, postdocs, no RQ or mentors'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, postdocs, RQ, no mentors
params['prefix'] = base_prefix + "memory_RQnoM"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
print 'Memory, postdocs, RQ no mentors'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, postdocs, RQ and mentors
params['prefix'] = base_prefix + "memory_RQM"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
print 'Memory, postdocs, RQ and mentors'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, postdocs, no RQ, with mentors
params['prefix'] = base_prefix + "memory_noRQM"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 0
params['mentored_pdrs'] = 1
print 'Memory, postdocs, no RQ, with mentors'
run_sim(params, params['prefix'], seed_rng)
def run_Alife_thermo(params, base_prefix, seed_rng):
"""
Run sims for Alife XV paper with thermostat.
"""
params['runs'] = 25
#run thermo, growing pop model
params['prefix'] = base_prefix + "thermo_growingPop"
params['learning_type'] = 'thermostat'
params['use_postdocs'] = 0
params['growing_pop'] = 1
params['pdr_rq_counts'] = 0
params['mentored_pdrs'] = 0
print 'Growing population, no postdocs'
run_sim(params, params['prefix'], seed_rng)
#run thermo version, postdocs, no RQ/mentors
params['prefix'] = base_prefix + "thermo_noRQnoM"
params['learning_type'] = 'thermostat'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 0
params['mentored_pdrs'] = 0
print 'Thermo, postdocs, no RQ or mentors'
run_sim(params, params['prefix'], seed_rng)
#run thermo version, postdocs, RQ, no mentors
params['prefix'] = base_prefix + "thermo_RQnoM"
params['learning_type'] = 'thermostat'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
print 'Thermo, postdocs, RQ no mentors'
run_sim(params, params['prefix'], seed_rng)
#run thermo version, postdocs, RQ and mentors
params['prefix'] = base_prefix + "thermo_RQM"
params['learning_type'] = 'thermostat'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
print 'Thermo, postdocs, RQ and mentors'
run_sim(params, params['prefix'], seed_rng)
#run thermo version, postdocs, no RQ, with mentors
params['prefix'] = base_prefix + "thermo_noRQM"
params['learning_type'] = 'thermostat'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 0
params['mentored_pdrs'] = 1
print 'Thermo, postdocs, no RQ, with mentors'
run_sim(params, params['prefix'], seed_rng)
def run_Alife_sweep1(params, base_prefix, seed_rng):
"""
Run sweep of promotion chance values.
"""
params['runs'] = 50
#run MEMORY version, promotion chance 15 percent
params['prefix'] = base_prefix + "memory_chance15_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['postdoc_chance'] = 0.15
print 'Memory, postdocs promo 15 percent'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 25 percent
params['prefix'] = base_prefix + "memory_chance25_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['postdoc_chance'] = 0.25
print 'Memory, postdocs promo 25 percent'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 50 percent
params['prefix'] = base_prefix + "memory_chance50_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['postdoc_chance'] = 0.50
print 'Memory, postdocs promo 50 percent'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 75 percent
params['prefix'] = base_prefix + "memory_chance75_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['postdoc_chance'] = 0.75
print 'Memory, postdocs promo 50 percent'
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 100 percent
params['prefix'] = base_prefix + "memory_chance100_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['postdoc_chance'] = 1.0
print 'Memory, postdocs promo 100 percent'
run_sim(params, params['prefix'], seed_rng)
def run_Alife_sweep2(params, base_prefix, seed_rng):
"""
Run sweep of funding limits.
"""
params['runs'] = 100
#run funding limited sim
params['prefix'] = base_prefix + "funding_limited_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = True
print 'Funding Limited'
run_sim(params, params['prefix'], seed_rng)
#run funding unlimited sim
params['prefix'] = base_prefix + "funding_unlimited_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = False
print 'Funding Unlimited'
run_sim(params, params['prefix'], seed_rng)
def run_Alife_sweep3(params, base_prefix, seed_rng):
"""
Run sweep of funding increase rate (limited funding condition).
"""
params['runs'] = 20
#run MEMORY version, promotion chance 25 percent
params['prefix'] = base_prefix + "inc02_"
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.02
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 15 percent
params['prefix'] = base_prefix + "inc025_"
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.025
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 50 percent
params['prefix'] = base_prefix + "inc03_"
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.03
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 75 percent
params['prefix'] = base_prefix + "inc035_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.035
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 75 percent
params['prefix'] = base_prefix + "inc04_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.04
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 75 percent
params['prefix'] = base_prefix + "inc045_"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.045
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
#run MEMORY version, promotion chance 100 percent
params['prefix'] = base_prefix + "inc05_"
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 0
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.05
params['limited_funding'] = True
run_sim(params, params['prefix'], seed_rng)
def run_Alife_sweep4(params, base_prefix, seed_rng):
"""
Run sweep of funding limits -- baseline scenario vs postdocs.
"""
params['runs'] = 10
#run funding limited sim - baseline
params['prefix'] = base_prefix + "funding_limited_base"
params['learning_type'] = 'memory'
params['use_postdocs'] = 0
params['growing_pop'] = 1
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = True
print 'Funding Limited -- Baseline'
run_sim(params, params['prefix'], seed_rng)
#run funding limited sim - postdocs
params['prefix'] = base_prefix + "funding_limited_pdr"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = True
print 'Funding Limited -- Postdocs'
run_sim(params, params['prefix'], seed_rng)
def run_Alife_sweep5(params, base_prefix, seed_rng):
"""
Run sweep of funding limits -- baseline scenario vs postdocs.
"""
params['runs'] = 50
#run funding limited sim - baseline
params['prefix'] = base_prefix + "funding_limited_base"
params['learning_type'] = 'memory'
params['use_postdocs'] = 0
params['growing_pop'] = 1
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = True
print 'Funding Limited -- Baseline'
run_sim(params, params['prefix'], seed_rng)
#run funding limited sim - postdocs
params['prefix'] = base_prefix + "funding_limited_pdr"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = True
print 'Funding Limited -- Postdocs'
run_sim(params, params['prefix'], seed_rng)
#run funding unlimited sim - baseline
params['prefix'] = base_prefix + "funding_unlimited_base"
params['learning_type'] = 'memory'
params['use_postdocs'] = 0
params['growing_pop'] = 1
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = False
print 'Funding Unlimited -- Postdocs'
run_sim(params, params['prefix'], seed_rng)
#run funding unlimited sim - postdocs
params['prefix'] = base_prefix + "funding_unlimited_pdr"
params['learning_type'] = 'memory'
params['use_postdocs'] = 1
params['growing_pop'] = 0
params['pdr_rq_counts'] = 1
params['mentored_pdrs'] = 1
params['postdoc_chance'] = 0.15
params['limited_funding'] = False
print 'Funding Unlimited -- Postdocs'
run_sim(params, params['prefix'], seed_rng)
def init_params():
params = {}
# simulation parameters
params['write_output'] = True # whether or not to write output on individual runs
params['prefix'] = '/Users/u0030612/Documents/results/test/Alife/limited funding/baseComp8/' # where to write output
params['runs'] = 1 # number of runs per parameter combination
params['random_seed'] = True # whether to use random seed (or fixed)
params['use_postdocs'] = 1 # whether to include postdocs in sim
params['growing_pop'] = 0 # use growing population, no postdocs scenario
params['pdr_rq_counts'] = 1 # does postdoc RQ count in promotions
params['mentored_pdrs'] = 1 # do postdocs gain RQ due to mentoring
params['seed'] = 1234 # seed to use (if random_seed==False)
params['pop_size'] = 100 # initial number of academic agents
params['iterations'] = 100 # number of iterations to simulate
params['init_time'] = 0.5 # upper bound on initial academic time_grant values
params['fixed_time'] = 0.1 # ie, for legislated time alloc
params['postdoc_chance'] = 0.15 # chance for PDR promotion
params['mentoring_bonus'] = 0.20 # bonus to RQ for promoted postdocs (from being mentored/maturing)
params['newb_time'] = 0.4 # time spent on being a new postdoc/academic
params['jobhunt_time'] = 0.3 # time spent on job-hunting in final 2 semesters of contract
params['career_end'] = 60 #semesters before agent has to retire
params['use_retirement'] = True
# grant parameters
params['weight_grant'] = 1.0 # weighting on grant quality, rather than track record
params['grant_slope'] = 2.0 # slope constant in tanh function
params['research_slope'] = 2.0 # slope constant in tanh function
params['grant_noise'] = 0.1 # std dev of gaussian noise on grant quality
params['rq_counts'] = True # is research_quality involved in grant_quality?
params['grant_bonus'] = 1.5 # G: bonus to research output arising from grants
params['grant_proportion'] = 0.3 # P: proportion of population who can obtain grants
params['grant_pools'] = 1 # number of pools for grant evaluation
params['manager_penalty'] = 0.00 # management time deducted from successful grant applicants
params['limited_funding'] = True #are grants limited?
params['starting_grant_fund'] = 30 #starting funds available, 1 unit equals 1 funded project
params['yearly_increase'] = 0.02 #percentage increase per timestep in limited funding case
# self learning parameters
params['learning_type'] = 'memory' # options 'thermostat', 'memory'
params['self_update_width'] = 0.1 # "learning rate"
params['self_update_width_fixed'] = True # Are learning steps of fixed width? (or random)
params['memory_size'] = 12 # number of memory steps to store (NB: this is NOT window length!)
params['run_length'] = 12 # W: number of memory steps to consider (THIS is window length!)
params['prob_reentry'] = 0.02 # E: probability of re-entering population after dropping out
params['reentry_range'] = 0.2 # upper bound on re-entry time_grant values
return params
if __name__ == '__main__':
params = init_params()
Utils.create_dir(params['prefix'])
seed_rng = Random(params['seed'])
# WCSS SIMULATIONS
#run_WCSS_sims(params, params['prefix'], seed_rng)
# SINGLE TEST RUN
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI no PDRs" + "," + "Total RO" + "," + "Mean RO Old Farts" + "," + "Mean RO PDR" + "," + "Mean RO FPDR" + "," +
# "Mean TG" + "," + "Learning Type" + "," + "Promo Chance" + "\n")
# run_sim(params, params['prefix'], seed_rng, sim=True)
# dataFile.close()
#Alife XV simulations, thermo
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
# run_Alife_thermo(params, params['prefix'], seed_rng)
# dataFile.close()
#Alife XV simulations, memory
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
# run_Alife_sims(params, params['prefix'], seed_rng)
# dataFile.close()
# plt.clf()
# fields = ['Use_PDRs', 'Growing_Pop', 'RQ_counts', 'Mentored_PDRs', 'Total_RO', 'Mean_RO_all', 'ROI_no_PDRs', 'Total_Sacked']
# df = pd.read_csv(root_dir + 'roi_test.csv', usecols=fields)
# means = df.groupby(['Use_PDRs', 'Growing_Pop', 'RQ_counts', 'Mentored_PDRs']).mean()
# deviation = df.groupby(['Use_PDRs', 'Growing_Pop', 'RQ_counts', 'Mentored_PDRs']).std()
# print(means)
# print(deviation)
# scenarioList = ['Growing Pop', 'NoRQnoM', 'noRQM', 'RQnoM', 'RQM']
# fig = means['Total_RO'].plot(kind='bar', title = 'Output in Postdoc Scenarios', yerr=deviation.Total_RO, color=['Orange', 'Salmon', 'Violet', 'Chocolate', 'LightSteelBlue'], alpha=0.5, rot=0)
# fig.set_xlabel("Scenarios",fontsize=12)
# fig.set_ylabel("Research Output",fontsize=12)
# fig.set_xticklabels(scenarioList)
# fig2 = fig.get_figure()
# fig2.savefig(root_dir + 'memRunSet_totalRO.pdf')
# fig2.savefig(root_dir + 'memRunSet_totalRO.png')
#
# plt.clf()
# fig_r = means['Mean_RO_all'].plot(kind='bar', title = 'Mean Output in Postdoc Scenarios', yerr=deviation.Mean_RO_all, color=['Orange', 'Salmon', 'Violet', 'Chocolate', 'LightSteelBlue'], alpha=0.5, rot=0)
# fig_r.set_xlabel("Scenarios",fontsize=12)
# fig_r.set_ylabel("Mean Research Output",fontsize=12)
# fig_r.set_xticklabels(scenarioList)
# fig2_r = fig.get_figure()
# fig2_r.savefig(root_dir + 'memRunSet_meanRO.pdf')
# fig2_r.savefig(root_dir + 'memRunSet_meanRO.png')
#
# plt.clf()
# fig_roi = means['ROI_no_PDRs'].plot(kind='bar', title='ROI in Postdoc Scenarios', yerr=deviation.ROI_no_PDRs, facecolor='g', alpha=0.5, rot=0)
# fig_roi.set_xlabel("Scenarios", fontsize=12)
# fig_roi.set_ylabel("Return on Investment", fontsize=12)
# fig_roi.set_xticklabels(scenarioList)
# fig2_roi = fig.get_figure()
# fig2_roi.savefig(root_dir + 'memRunSet_ROI.pdf')
# fig2_roi.savefig(root_dir + 'memRunSet_ROI.png')
#
# plt.clf()
# scenarioListR = ['NoRQnoM', 'noRQM', 'RQnoM', 'RQM']
# df_R = df[df.Growing_Pop != 1]
# means_R = df_R.groupby(['Use_PDRs', 'RQ_counts', 'Mentored_PDRs']).mean()
# deviation_R = df_R.groupby(['Use_PDRs','RQ_counts', 'Mentored_PDRs']).std()
# fig_sacked = means_R['Total_Sacked'].plot(kind='bar', title='Redundancies in Postdoc Scenarios', yerr=deviation_R.Total_Sacked, facecolor='r', alpha=0.5, rot=0)
# fig_sacked.set_xlabel("Scenarios", fontsize=12)
# fig_sacked.set_ylabel("Redundancies", fontsize=12)
# fig_sacked.set_xticklabels(scenarioListR)
# fig2_sacked = fig.get_figure()
# fig2_sacked.savefig(root_dir + 'memRunSet_sacked.pdf')
# fig2_sacked.savefig(root_dir + 'memRunSet_sacked.png')
#Alife XV promo chance sweep
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
# run_Alife_sweep1(params, params['prefix'], seed_rng)
# dataFile.close()
# plt.clf()
# promoChanceList = [15, 25, 50, 75, 100]
# fields = ['Promo_Chance', 'Total_RO', 'Mean_RO_all', 'ROI_no_PDRs', 'Total_Sacked']
# df = pd.read_csv(root_dir + 'roi_test.csv', usecols=fields)
# means = df.groupby('Promo_Chance').mean()
# deviation = df.groupby('Promo_Chance').std()
# print(means)
# print(deviation)
# fig = means['Total_RO'].plot(kind='bar', title = 'Promotion Chance vs Output', yerr=deviation.Total_RO, facecolor='c', alpha=0.5, rot=0)
# fig.set_xlabel("Promotion Chance",fontsize=12)
# fig.set_ylabel("Research Output",fontsize=12)
# fig.set_xticklabels(promoChanceList)
# fig2 = fig.get_figure()
# fig2.savefig(root_dir + 'promoRunSet_totalRO.pdf')
# fig2.savefig(root_dir + 'promoRunSet_totalRO.png')
# plt.clf()
# fig_r = means['Mean_RO_all'].plot(kind='bar', title = 'Promotion Chance vs Mean Output', yerr=deviation.Mean_RO_all, facecolor='m', alpha=0.5, rot=0)
# fig_r.set_xlabel("Promotion Chance",fontsize=12)
# fig_r.set_ylabel("Mean Research Output",fontsize=12)
# fig_r.set_xticklabels(promoChanceList)
# fig2_r = fig.get_figure()
# fig2_r.savefig(root_dir + 'promoRunSet_meanRO.pdf')
# fig2_r.savefig(root_dir + 'promoRunSet_meanRO.png')
# plt.clf()
# fig_roi = means['ROI_no_PDRs'].plot(kind='bar', title='Promotion Chance vs ROI', yerr=deviation.ROI_no_PDRs, facecolor='g', alpha=0.5, rot=0)
# fig_roi.set_xlabel("Promotion Chance", fontsize=12)
# fig_roi.set_ylabel("Return on Investment", fontsize=12)
# fig_roi.set_xticklabels(promoChanceList)
# fig2_roi = fig.get_figure()
# fig2_roi.savefig(root_dir + 'promoRunSet_ROI.pdf')
# fig2_roi.savefig(root_dir + 'promoRunSet_ROI.png')
# plt.clf()
# df_R = df[df.Promo_Chance != 1]
# means_R = df_R.groupby(['Promo_Chance']).mean()
# deviation_R = df_R.groupby(['Promo_Chance']).std()
# fig_sacked = means_R['Total_Sacked'].plot(kind='bar', title='Redundancies vs Promotion Chance', yerr=deviation_R.Total_Sacked, facecolor='r', alpha=0.5, rot=0)
# fig_sacked.set_xlabel("Promotion Chance", fontsize=12)
# fig_sacked.set_ylabel("Redundancies", fontsize=12)
# fig_sacked.set_xticklabels(promoChanceList)
# fig2_sacked = fig.get_figure()
# fig2_sacked.savefig(root_dir + 'promoRunSet_sacked.pdf')
# fig2_sacked.savefig(root_dir + 'promoRunSet_sacked.png')
# #runs for sensitivity analysis using GEM-SA
# researchMeans = []
# researchSEs = []
# params['write_output'] = False
# sim_runs = 20
# gemFile = open(params['prefix'] + "GEMSA data new.txt",'w')
# meansFile = open(params['prefix'] + "GEMSA means new.txt", 'w')
# outFile = open(params['prefix'] + "GEMSA outputs new.txt", 'w')
# postdocChanceList = [ 0.15, 0.25, 0.50, 0.75, 1.0 ]
# mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60, 0.70 ]
# newbTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
# jobhuntTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Job_Stress" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
# for variableChance in postdocChanceList:
# for variableMentoring in mentoringBonusList:
# for variableNewb in newbTimeList:
# for variableJobHunting in jobhuntTimeList:
# params['postdoc_chance'] = variableChance
# params['mentoring_bonus'] = variableMentoring
# params['newb_time'] = variableNewb
# params['jobhunt_time'] = variableJobHunting
# print "Trying postdoc chance: ", variableChance
# print "Trying mentoring bonus: ", variableMentoring
# print "Trying newb time: ", variableNewb
# print "Trying jobhunting time: ", variableJobHunting
# researchList = []
# researchSum = 0.0
# meansFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\n")
# for i in range ( 0, sim_runs ):
# print i,
# researchOut = run_sim(params, root_dir, seed_rng, sim=True)[2]
# researchList.append(researchOut)
# researchSum += researchOut
# print researchOut
# gemFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\t" + str(researchOut) + "\n")
# researchMeans.append(pylab.mean(researchList))
# outFile.write(str(researchSum/sim_runs) + "\n")
# researchSEs.append(pylab.std(researchList) / math.sqrt(sim_runs))
# dataFile.close()
# gemFile.close()
# meansFile.close()
# outFile.close()
# #runs for sensitivity analysis using GEM-SA -- NO MENTORING
# researchMeans = []
# researchSEs = []
# params['write_output'] = False
# params['mentored_pdrs'] = 0
# sim_runs = 60
# gemFile = open(params['prefix'] + "GEMSA data new.txt",'w')
# meansFile = open(params['prefix'] + "GEMSA means new.txt", 'w')
# outFile = open(params['prefix'] + "GEMSA outputs new.txt", 'w')
# postdocChanceList = [ 0.15, 0.25, 0.50, 0.75, 1.0 ]
# newbTimeList = [ 0.1, 0.3, 0.5, 0.7, 0.9 ]
# jobhuntTimeList = [ 0.1, 0.3, 0.5, 0.7, 0.9 ]
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Job_Stress" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
# for variableChance in postdocChanceList:
# for variableNewb in newbTimeList:
# for variableJobHunting in jobhuntTimeList:
# params['postdoc_chance'] = variableChance
# params['newb_time'] = variableNewb
# params['jobhunt_time'] = variableJobHunting
# print "Trying postdoc chance: ", variableChance
# print "Trying newb time: ", variableNewb
# print "Trying jobhunting time: ", variableJobHunting
# researchList = []
# researchSum = 0.0
# meansFile.write(str(variableChance) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\n")
# for i in range ( 0, sim_runs ):
# print i,
# researchOut = run_sim(params, root_dir, seed_rng, sim=True)[2]
# researchList.append(researchOut)
# researchSum += researchOut
# print researchOut
# gemFile.write(str(variableChance) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\t" + str(researchOut) + "\n")
# researchMeans.append(pylab.mean(researchList))
# outFile.write(str(researchSum/sim_runs) + "\n")
# researchSEs.append(pylab.std(researchList) / math.sqrt(sim_runs))
# dataFile.close()
# gemFile.close()
# meansFile.close()
# outFile.close()
#runs for sensitivity analysis using GEM-SA -- INCLUDING MENTORING SWITCH
# researchMeans = []
# researchSEs = []
#
# params['write_output'] = False
#
# sim_runs = 2 #20
# gemFile = open(params['prefix'] + "GEMSA data new.txt",'w')
# meansFile = open(params['prefix'] + "GEMSA means new.txt", 'w')
# outFile = open(params['prefix'] + "GEMSA outputs new.txt", 'w')
#
# postdocChanceList = [ 0.15, 0.25, 0.50, 0.75, 1.0 ]
# mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60, 0.70 ]
# newbTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
# jobhuntTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
#dataFile = open(params['prefix'] + "roi_test.csv",'w')
#dataFile.write("Run_Number" + "," + "Job_Stress" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
#
# for variableChance in postdocChanceList:
# for variableMentoring in mentoringBonusList:
# for variableNewb in newbTimeList:
# for variableJobHunting in jobhuntTimeList:
# params['postdoc_chance'] = variableChance
# params['mentoring_bonus'] = variableMentoring
# params['newb_time'] = variableNewb
# params['jobhunt_time'] = variableJobHunting
# print "Trying postdoc chance: ", variableChance
# print "Trying mentoring bonus: ", variableMentoring
# print "Trying newb time: ", variableNewb
# print "Trying jobhunting time: ", variableJobHunting
# researchList = []
# researchSum = 0.0
# meansFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\n")
# for i in range ( 0, sim_runs ):
# print i,
# researchOut = run_sim(params, root_dir, seed_rng, sim=True)[2]
# researchList.append(researchOut)
# researchSum += researchOut
# print researchOut
# gemFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\t" + str(researchOut) + "\n")
# researchMeans.append(pylab.mean(researchList))
# outFile.write(str(researchSum/sim_runs) + "\n")
# researchSEs.append(pylab.std(researchList) / math.sqrt(sim_runs))
#
# #dataFile.close()
# gemFile.close()
# meansFile.close()
# outFile.close()
#runs for sensitivity analysis using GEM-SA -- INCLUDING MENTORING SWITCH
# researchMeans = []
# researchSEs = []
#
# params['write_output'] = False
#
# sim_runs = 2 #20
# gemFile = open(params['prefix'] + "GEMSA data new.txt",'w')
# meansFile = open(params['prefix'] + "GEMSA means new.txt", 'w')
# outFile = open(params['prefix'] + "GEMSA outputs new.txt", 'w')
#
# postdocChanceList = [ 0.15, 0.25, 0.50, 0.75, 1.0 ]
# mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60, 0.70 ]
# newbTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
# jobhuntTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
#
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Job_Stress" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
#
# for variableChance in postdocChanceList:
# for variableMentoring in mentoringBonusList:
# for variableNewb in newbTimeList:
# for variableJobHunting in jobhuntTimeList:
# params['postdoc_chance'] = variableChance
# params['mentoring_bonus'] = variableMentoring
# params['newb_time'] = variableNewb
# params['jobhunt_time'] = variableJobHunting
# print "Trying postdoc chance: ", variableChance
# print "Trying mentoring bonus: ", variableMentoring
# print "Trying newb time: ", variableNewb
# print "Trying jobhunting time: ", variableJobHunting
# redundantList = []
# redundantSum = 0.0
# meansFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\n")
# for i in range ( 0, sim_runs ):
# print i,
# #sim = run_sim(params, root_dir, seed_rng, sim=True)
# redundantOut = run_sim(params, root_dir, seed_rng, sim=True)[3]
# redundantList.append(redundantOut)
# redundantSum += redundantOut
# print redundantOut
# gemFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableNewb) + "\t" + str(variableJobHunting) + "\t" + str(redundantOut) + "\n")
# researchMeans.append(pylab.mean(redundantList))
# outFile.write(str(redundantSum/sim_runs) + "\n")
# researchSEs.append(pylab.std(redundantList) / math.sqrt(sim_runs))
#
# #dataFile.close()
# gemFile.close()
# meansFile.close()
# outFile.close()
#runs for sensitivity analysis using GEM-SA -- INCLUDING MENTORING SWITCH
# researchMeans = []
# researchSEs = []
#
# params['write_output'] = False
#
# sim_runs = 1 #20
# gemFile = open(params['prefix'] + "GEMSA data new.txt",'w')
# meansFile = open(params['prefix'] + "GEMSA means new.txt", 'w')
# outFile = open(params['prefix'] + "GEMSA outputs new.txt", 'w')
#
# postdocChanceList = [ 0.15, 0.25, 0.50, 0.75, 1.0 ]
# mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60 ]
# startingGrantList = [ 10, 20, 30, 40, 50 ]
# yearlyIncreaseList = [ 0.01, 0.02, 0.03, 0.04 ]
# #mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60, 0.70 ]
# #newbTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
# #jobhuntTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
#
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Job_Stress" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
#
# for variableChance in postdocChanceList:
# for variableMentoring in mentoringBonusList:
# for variableStartingGrant in startingGrantList:
# for variableIncrease in yearlyIncreaseList:
# params['postdoc_chance'] = variableChance
# params['mentoring_bonus'] = variableMentoring
# params['starting_grant_fund'] = variableStartingGrant
# params['yearly_increase'] = variableIncrease
# redundantList = []
# redundantSum = 0.0
# meansFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableStartingGrant) + "\t" + str(variableIncrease) + "\n")
# for i in range ( 0, sim_runs ):
# print i,
# #sim = run_sim(params, root_dir, seed_rng, sim=True)
# redundantOut = run_sim(params, root_dir, seed_rng, sim=True)[3]
# redundantList.append(redundantOut)
# redundantSum += redundantOut
# print redundantOut
# gemFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableStartingGrant) + "\t" + str(variableIncrease) + "\t" + str(redundantOut) + "\n")
# researchMeans.append(pylab.mean(redundantList))
# outFile.write(str(redundantSum/sim_runs) + "\n")
# researchSEs.append(pylab.std(redundantList) / math.sqrt(sim_runs))
#
# dataFile.close()
# gemFile.close()
# meansFile.close()
# outFile.close()
#runs for sensitivity analysis using GEM-SA -- INCLUDING limited funding switch
# researchMeans = []
# researchSEs = []
#
# params['write_output'] = False
#
# sim_runs = 1 #20
# gemFile = open(params['prefix'] + "GEMSA data new.txt",'w')
# meansFile = open(params['prefix'] + "GEMSA means new.txt", 'w')
# outFile = open(params['prefix'] + "GEMSA outputs new.txt", 'w')
#
# postdocChanceList = [ 0.15, 0.25, 0.50, 0.75, 1.0 ]
# mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60 ]
# #startingGrantList = [ 10, 20, 30, 40, 50 ]
# #yearlyIncreaseList = [ 0.01, 0.02, 0.03, 0.04 ]
# #mentoringBonusList = [ 0.30, 0.40, 0.50, 0.60, 0.70 ]
# #newbTimeList = [ 0.1, 0.3, 0.5, 0.7 ]
# jobhuntTimeList = [ 0.1, 0.3, 0.5, 0.7, 0.9 ]
# limitFunding = [True, False]
#
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Job_Stress" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "\n")
# root_dir = params['prefix']
#
# for variableChance in postdocChanceList:
# for variableMentoring in mentoringBonusList:
# for variableJobHunt in jobhuntTimeList:
# for variableFunding in limitFunding:
# params['postdoc_chance'] = variableChance
# params['mentoring_bonus'] = variableMentoring
# params['jobhunt_time'] = variableJobHunt
# params['limited_funding'] = variableFunding
# redundantList = []
# redundantSum = 0.0
# meansFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableJobHunt) + "\t" + str(variableFunding) + "\n")
# for i in range ( 0, sim_runs ):
# print i,
# #sim = run_sim(params, root_dir, seed_rng, sim=True)
# redundantOut = run_sim(params, root_dir, seed_rng, sim=True)[3]
# redundantList.append(redundantOut)
# redundantSum += redundantOut
# print redundantOut
# gemFile.write(str(variableChance) + "\t" + str(variableMentoring) + "\t" + str(variableJobHunt) + "\t" + str(variableFunding) + "\t" + str(redundantOut) + "\n")
# researchMeans.append(pylab.mean(redundantList))
# outFile.write(str(redundantSum/sim_runs) + "\n")
# researchSEs.append(pylab.std(redundantList) / math.sqrt(sim_runs))
#
# dataFile.close()
# gemFile.close()
# meansFile.close()
# outFile.close()
###############################
#Alife XV promo chance sweep - unlimited funding
###############################
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "," + "Funding_Limit" + "Funding Increase" + "\n")
# root_dir = params['prefix']
# run_Alife_sweep2(params, params['prefix'], seed_rng)
# dataFile.close()
# plt.clf()
# fundingList = [True, False]
# fields = ['Funding_Limit', 'Total_RO', 'Mean_RO_all', 'ROI_no_PDRs', 'Total_Sacked']
# df = pd.read_csv(root_dir + 'roi_test.csv', usecols=fields)
# means = df.groupby('Funding_Limit').mean()
# deviation = df.groupby('Funding_Limit').std()
# print(means)
# print(deviation)
# fig = means['Total_RO'].plot(kind='bar', title = 'Limited Funding vs Output', yerr=deviation.Total_RO, facecolor='c', alpha=0.5, rot=0)
# fig.set_xlabel("Funding Limit",fontsize=12)
# fig.set_ylabel("Research Output",fontsize=12)
# fig.set_xticklabels(fundingList)
# fig2 = fig.get_figure()
# fig2.savefig(root_dir + 'promoRunSet_totalRO.pdf')
# fig2.savefig(root_dir + 'promoRunSet_totalRO.png')
#
# plt.clf()
# fig_r = means['Mean_RO_all'].plot(kind='bar', title = 'Limited Funding vs Mean Output', yerr=deviation.Mean_RO_all, facecolor='m', alpha=0.5, rot=0)
# fig_r.set_xlabel("Funding Limit",fontsize=12)
# fig_r.set_ylabel("Mean Research Output",fontsize=12)
# fig_r.set_xticklabels(fundingList)
# fig2_r = fig.get_figure()
# fig2_r.savefig(root_dir + 'promoRunSet_meanRO.pdf')
# fig2_r.savefig(root_dir + 'promoRunSet_meanRO.png')
#
# plt.clf()
# fig_roi = means['ROI_no_PDRs'].plot(kind='bar', title='Limited Funding vs ROI', yerr=deviation.ROI_no_PDRs, facecolor='g', alpha=0.5, rot=0)
# fig_roi.set_xlabel("Funding Limit", fontsize=12)
# fig_roi.set_ylabel("Return on Investment", fontsize=12)
# fig_roi.set_xticklabels(fundingList)
# fig2_roi = fig.get_figure()
# fig2_roi.savefig(root_dir + 'promoRunSet_ROI.pdf')
# fig2_roi.savefig(root_dir + 'promoRunSet_ROI.png')
#
# plt.clf()
# #df_R = df[df.Promo_Chance != 1]
# #means_R = df_R.groupby(['Promo_Chance']).mean()
# #deviation_R = df_R.groupby(['Promo_Chance']).std()
# fig_sacked = means['Total_Sacked'].plot(kind='bar', title='Limited Funding vs Redundancies', yerr=deviation.Total_Sacked, facecolor='r', alpha=0.5, rot=0)
# fig_sacked.set_xlabel("Funding Limit", fontsize=12)
# fig_sacked.set_ylabel("Redundancies", fontsize=12)
# fig_sacked.set_xticklabels(fundingList)
# fig2_sacked = fig.get_figure()
# fig2_sacked.savefig(root_dir + 'promoRunSet_sacked.pdf')
# fig2_sacked.savefig(root_dir + 'promoRunSet_sacked.png')
# ########################################
# #Parameter sweep of funding increase rate (limited funding condition)
# ########################################
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "," + "Funding_Limit" + "," + "Funding_Increase" + "\n")
# root_dir = params['prefix']
# run_Alife_sweep3(params, params['prefix'], seed_rng)
# dataFile.close()
# plt.clf()
# fundingList = [2, 2.5, 3, 3.5, 4, 4.5, 5]
# fields = ['Funding_Increase', 'Total_RO', 'Mean_RO_all', 'ROI_no_PDRs', 'Total_Sacked']
# df = pd.read_csv(root_dir + 'roi_test.csv', usecols=fields)
# means = df.groupby('Funding_Increase').mean()
# deviation = df.groupby('Funding_Increase').std()
# print(means)
# print(deviation)
# fig = means['Total_RO'].plot(kind='bar', title = 'Funding Increases vs Output', yerr=deviation.Total_RO, facecolor='c', alpha=0.5, rot=0)
# fig.set_xlabel("Funding Increase",fontsize=12)
# fig.set_ylabel("Research Output",fontsize=12)
# fig.set_xticklabels(fundingList)
# fig2 = fig.get_figure()
# fig2.savefig(root_dir + 'incRunSet_totalRO.pdf')
# fig2.savefig(root_dir + 'incRunSet_totalRO.png')
#
# plt.clf()
# fig_r = means['Mean_RO_all'].plot(kind='bar', title = 'Funding Increases vs Mean Output', yerr=deviation.Mean_RO_all, facecolor='m', alpha=0.5, rot=0)
# fig_r.set_xlabel("Funding Increase",fontsize=12)
# fig_r.set_ylabel("Mean Research Output",fontsize=12)
# fig_r.set_xticklabels(fundingList)
# fig2_r = fig.get_figure()
# fig2_r.savefig(root_dir + 'incRunSet_meanRO.pdf')
# fig2_r.savefig(root_dir + 'incRunSet_meanRO.png')
#
# plt.clf()
# fig_roi = means['ROI_no_PDRs'].plot(kind='bar', title='Funding Increases vs ROI', yerr=deviation.ROI_no_PDRs, facecolor='g', alpha=0.5, rot=0)
# fig_roi.set_xlabel("Funding Increase", fontsize=12)
# fig_roi.set_ylabel("Return on Investment", fontsize=12)
# fig_roi.set_xticklabels(fundingList)
# fig2_roi = fig.get_figure()
# fig2_roi.savefig(root_dir + 'incRunSet_ROI.pdf')
# fig2_roi.savefig(root_dir + 'incRunSet_ROI.png')
#
# plt.clf()
# #df_R = df[df.Promo_Chance != 1]
# #means_R = df_R.groupby(['Promo_Chance']).mean()
# #deviation_R = df_R.groupby(['Promo_Chance']).std()
# fig_sacked = means['Total_Sacked'].plot(kind='bar', title='Funding Increases vs Redundancies', yerr=deviation.Total_Sacked, facecolor='r', alpha=0.5, rot=0)
# fig_sacked.set_xlabel("Funding Increase", fontsize=12)
# fig_sacked.set_ylabel("Redundancies", fontsize=12)
# fig_sacked.set_xticklabels(fundingList)
# fig2_sacked = fig.get_figure()
# fig2_sacked.savefig(root_dir + 'incRunSet_sacked.pdf')
# fig2_sacked.savefig(root_dir + 'incRunSet_sacked.png')
#Parameter sweep, baseline vs limited vs unlimited funding
# dataFile = open(params['prefix'] + "roi_test.csv",'w')
# dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
# "ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
# "Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "," + "Funding_Limit" + "," + "Funding_Increase" + "\n")
# root_dir = params['prefix']
# run_Alife_sweep4(params, params['prefix'], seed_rng)
# dataFile.close()
# plt.clf()
# fields = ['Use_PDRs', 'Growing_Pop', 'Total_RO', 'Mean_RO_all', 'ROI_no_PDRs', 'Total_Sacked']
# df = pd.read_csv(root_dir + 'roi_test.csv', usecols=fields)
# means = df.groupby(['Use_PDRs', 'Growing_Pop']).mean()
# deviation = df.groupby(['Use_PDRs', 'Growing_Pop']).std()
# print(means)
# print(deviation)
# scenarioList = ['Growing Pop', 'Limited funding']
# fig = means['Total_RO'].plot(kind='bar', title = 'Output Comparison: Baseline vs Limited Funding', yerr=deviation.Total_RO, color=['Orange', 'Salmon'], alpha=0.5, rot=0)
# fig.set_xlabel("Scenarios",fontsize=12)
# fig.set_ylabel("Research Output",fontsize=12)
# fig.set_xticklabels(scenarioList)
# fig2 = fig.get_figure()
# fig2.savefig(root_dir + 'memRunSet_totalRO.pdf')
# fig2.savefig(root_dir + 'memRunSet_totalRO.png')
#
# plt.clf()
# fig_r = means['Mean_RO_all'].plot(kind='bar', title = 'Mean Output Comparison: Baseline vs Limited Funding', yerr=deviation.Mean_RO_all, color=['Orange', 'Salmon', 'Violet'], alpha=0.5, rot=0)
# fig_r.set_xlabel("Scenarios",fontsize=12)
# fig_r.set_ylabel("Mean Research Output",fontsize=12)
# fig_r.set_xticklabels(scenarioList)
# fig2_r = fig.get_figure()
# fig2_r.savefig(root_dir + 'memRunSet_meanRO.pdf')
# fig2_r.savefig(root_dir + 'memRunSet_meanRO.png')
#
# plt.clf()
# fig_roi = means['ROI_no_PDRs'].plot(kind='bar', title='ROI in Limited-Funding Postdoc Scenarios', yerr=deviation.ROI_no_PDRs, facecolor='g', alpha=0.5, rot=0)
# fig_roi.set_xlabel("Scenarios", fontsize=12)
# fig_roi.set_ylabel("Return on Investment", fontsize=12)
# fig_roi.set_xticklabels(scenarioList)
# fig2_roi = fig.get_figure()
# fig2_roi.savefig(root_dir + 'memRunSet_ROI.pdf')
# fig2_roi.savefig(root_dir + 'memRunSet_ROI.png')
#
# plt.clf()
# fig_sacked = means['Total_Sacked'].plot(kind='bar', title='Redundancies: Baseline vs Limited Funding', yerr=deviation.Total_Sacked, facecolor='r', alpha=0.5, rot=0)
# fig_sacked.set_xlabel("Scenario", fontsize=12)
# fig_sacked.set_ylabel("Redundancies", fontsize=12)
# fig_sacked.set_xticklabels(scenarioList)
# fig2_sacked = fig.get_figure()
# fig2_sacked.savefig(root_dir + 'incRunSet_sacked.pdf')
# fig2_sacked.savefig(root_dir + 'incRunSet_sacked.png')
#Parameter sweep, baseline vs limited vs unlimited funding
dataFile = open(params['prefix'] + "roi_test.csv",'w')
dataFile.write("Run_Number" + "," + "Use_PDRs" + "," + "Growing_Pop" + "," + "RQ_counts" + "," + "Mentored_PDRs" + "," +
"ROI" + "," + "ROI_no_PDRs" + "," + "Total_RO" + "," + "Mean_RO_all" + "," + "Mean_RO_Old_Farts" + "," + "Mean_RO_PDR" + "," + "Mean_RO_FPDR" + "," +
"Mean_TG" + "," + "Learning_Type" + "," + "Promo_Chance" + "," + "Total_Sacked" + "," + "Funding_Limit" + "," + "Funding_Increase" + "\n")
root_dir = params['prefix']
run_Alife_sweep5(params, params['prefix'], seed_rng)
dataFile.close()
plt.clf()
fields = ['Use_PDRs', 'Growing_Pop', 'Total_RO', 'Mean_RO_all', 'ROI_no_PDRs', 'Total_Sacked', 'Funding_Limit']
df = pd.read_csv(root_dir + 'roi_test.csv', usecols=fields)
means = df.groupby(['Use_PDRs', 'Growing_Pop', 'Funding_Limit']).mean()
deviation = df.groupby(['Use_PDRs', 'Growing_Pop', 'Funding_Limit']).std()
print(means)
print(deviation)
scenarioList = ['GP-LF', 'PDR-LF', 'GP-UF', 'PDR-UF']
fig = means['Total_RO'].plot(kind='bar', title = 'Output Comparison: Baseline and Funding Scenarios', yerr=deviation.Total_RO, color=['Orange', 'Salmon', 'Violet', 'Green'], alpha=0.5, rot=0)
fig.set_xlabel("Scenarios",fontsize=12)
fig.set_ylabel("Research Output",fontsize=12)
fig.set_xticklabels(scenarioList)
fig2 = fig.get_figure()
fig2.savefig(root_dir + 'memRunSet_totalRO.pdf')
fig2.savefig(root_dir + 'memRunSet_totalRO.png')
plt.clf()
fig_r = means['Mean_RO_all'].plot(kind='bar', title = 'Mean Output Comparison: Baseline and Funding Scenarios', yerr=deviation.Mean_RO_all, color=['Orange', 'Salmon', 'Violet', 'Green'], alpha=0.5, rot=0)
fig_r.set_xlabel("Scenarios",fontsize=12)
fig_r.set_ylabel("Mean Research Output",fontsize=12)
fig_r.set_xticklabels(scenarioList)
fig2_r = fig.get_figure()
fig2_r.savefig(root_dir + 'memRunSet_meanRO.pdf')
fig2_r.savefig(root_dir + 'memRunSet_meanRO.png')
plt.clf()
fig_roi = means['ROI_no_PDRs'].plot(kind='bar', title='ROI by Scenario', yerr=deviation.ROI_no_PDRs, facecolor='g', alpha=0.5, rot=0)
fig_roi.set_xlabel("Scenarios", fontsize=12)
fig_roi.set_ylabel("Return on Investment", fontsize=12)
fig_roi.set_xticklabels(scenarioList)
fig2_roi = fig.get_figure()
fig2_roi.savefig(root_dir + 'memRunSet_ROI.pdf')
fig2_roi.savefig(root_dir + 'memRunSet_ROI.png')
plt.clf()
fig_sacked = means['Total_Sacked'].plot(kind='bar', title='Redundancies: Baseline and Funding Scenarios', yerr=deviation.Total_Sacked, facecolor='r', alpha=0.5, rot=0)
fig_sacked.set_xlabel("Scenario", fontsize=12)
fig_sacked.set_ylabel("Redundancies", fontsize=12)
fig_sacked.set_xticklabels(scenarioList)
fig2_sacked = fig.get_figure()
fig2_sacked.savefig(root_dir + 'incRunSet_sacked.pdf')
fig2_sacked.savefig(root_dir + 'incRunSet_sacked.png')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
|
{
"content_hash": "36edcdca6bc6cf8b3ca171261bed6efa",
"timestamp": "",
"source": "github",
"line_count": 1219,
"max_line_length": 301,
"avg_line_length": 47.67842493847416,
"alnum_prop": 0.5924810736407433,
"repo_name": "thorsilver/Modelling-academic-job-security",
"id": "390f39a35a7f3eaf44db1441af29e984f0dd5cd1",
"size": "58120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118834"
}
],
"symlink_target": ""
}
|
from tempest.api.identity import base
from tempest import test
class TestApiDiscovery(base.BaseIdentityV2Test):
"""Tests for API discovery features."""
@test.attr(type='smoke')
@test.idempotent_id('ea889a68-a15f-4166-bfb1-c12456eae853')
def test_api_version_resources(self):
descr = self.non_admin_client.get_api_description()['version']
expected_resources = ('id', 'links', 'media-types', 'status',
'updated')
keys = descr.keys()
for res in expected_resources:
self.assertIn(res, keys)
@test.attr(type='smoke')
@test.idempotent_id('007a0be0-78fe-4fdb-bbee-e9216cc17bb2')
def test_api_media_types(self):
descr = self.non_admin_client.get_api_description()['version']
# Get MIME type bases and descriptions
media_types = [(media_type['base'], media_type['type']) for
media_type in descr['media-types']]
# These are supported for API version 2
supported_types = [('application/json',
'application/vnd.openstack.identity-v2.0+json')]
# Check if supported types exist in response body
for s_type in supported_types:
self.assertIn(s_type, media_types)
@test.attr(type='smoke')
@test.idempotent_id('77fd6be0-8801-48e6-b9bf-38cdd2f253ec')
def test_api_version_statuses(self):
descr = self.non_admin_client.get_api_description()['version']
status = descr['status'].lower()
supported_statuses = ['current', 'stable', 'experimental',
'supported', 'deprecated']
self.assertIn(status, supported_statuses)
|
{
"content_hash": "4c4b2b1022c5a564fd229a9e2182dcb7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 40.38095238095238,
"alnum_prop": 0.6191037735849056,
"repo_name": "rakeshmi/tempest",
"id": "57c78ef76666a688e7c0e37d4c060f4f52e15de4",
"size": "2342",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/api/identity/v2/test_api_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2867452"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
import json
import urllib2
url="https://ajax.googleapis.com/ajax/services/feed/load?v=1.0&num=10&q=https://news.google.com/news/feeds?output=rss"
response = urllib2.urlopen(url)
html = response.read()
obj=json.loads(html)
n = len(obj["responseData"]["feed"]["entries"])
for i in range(0, n):
pub=obj["responseData"]["feed"]["entries"][i]["publishedDate"]
title=obj["responseData"]["feed"]["entries"][i]["title"]
content=obj["responseData"]["feed"]["entries"][i]["contentSnippet"]
print pub.encode("utf-8")
print title.encode("utf-8")
print content.encode("utf-8")
print ""
|
{
"content_hash": "2f0a410b08a611cb46ee5361c7d33c7a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 118,
"avg_line_length": 26.681818181818183,
"alnum_prop": 0.6916524701873935,
"repo_name": "dr-slump/bajawa",
"id": "47e45d3b6ddddf368ed1fa8439b8eac6306fe964",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/geeklets/google news/google_news.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4010"
},
{
"name": "CSS",
"bytes": "6275"
},
{
"name": "HTML",
"bytes": "225369"
},
{
"name": "Lua",
"bytes": "544939"
},
{
"name": "Makefile",
"bytes": "944"
},
{
"name": "Perl",
"bytes": "37312"
},
{
"name": "Python",
"bytes": "41406"
},
{
"name": "Shell",
"bytes": "67914"
},
{
"name": "Vim script",
"bytes": "3678"
}
],
"symlink_target": ""
}
|
"""
Passes that massage expression graphs into execution kernels.
"""
from __future__ import absolute_import, division, print_function
from .frontend import (translate, coercions, jit, ckernel_impls,
ckernel_lift, allocation, assemblage)
#------------------------------------------------------------------------
# Passes
#------------------------------------------------------------------------
passes = [
translate,
# erasure, # TODO: erase shape from ops
# cache, # TODO:
coercions,
jit,
# TODO: Make the below compile-time passes !
#ckernel_impls,
#allocation,
#ckernel_lift,
assemblage.assemble_py_kernels,
]
|
{
"content_hash": "4f30ecac23d22c871c635b0943def088",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 27.16,
"alnum_prop": 0.5139911634756995,
"repo_name": "XinSong/blaze",
"id": "a348043419e0e911bdd3b24d68591ef5ebe59a87",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blaze/compute/air/passes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Application base, containing global templates."""
default_app_config = 'pontoon.base.apps.BaseConfig'
MOZILLA_REPOS = (
'ssh://hg.mozilla.org/users/m_owca.info/firefox-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/firefox-for-android-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/thunderbird-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/lightning-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/seamonkey-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/firefox-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/firefox-for-android-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/thunderbird-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/lightning-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/seamonkey-aurora/',
)
class SyncError(RuntimeError):
"""Error class for errors relating to the project sync process."""
|
{
"content_hash": "0d5f75ff433025f83562f3fcb9f84e2d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 44.05,
"alnum_prop": 0.6980703745743473,
"repo_name": "m8ttyB/pontoon",
"id": "7fda5d5c2e4dbb85acd962f9a497ea564585888d",
"size": "881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/base/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133828"
},
{
"name": "HTML",
"bytes": "58483"
},
{
"name": "JavaScript",
"bytes": "801345"
},
{
"name": "Python",
"bytes": "438798"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
r"""
clang-format git integration
============================
This file provides a clang-format integration for git. Put it somewhere in your
path and ensure that it is executable. Then, "git clang-format" will invoke
clang-format on the changes in current files or a specific commit.
For further details, run:
git clang-format -h
Requires Python 2.7
"""
import argparse
import collections
import contextlib
import errno
import os
import re
import subprocess
import sys
usage = 'git clang-format [OPTIONS] [<commit>] [--] [<file>...]'
desc = '''
Run clang-format on all lines that differ between the working directory
and <commit>, which defaults to HEAD. Changes are only applied to the working
directory.
The following git-config settings set the default of the corresponding option:
clangFormat.binary
clangFormat.commit
clangFormat.extension
clangFormat.style
'''
# Name of the temporary index file in which save the output of clang-format.
# This file is created within the .git directory.
temp_index_basename = 'clang-format-index'
Range = collections.namedtuple('Range', 'start, count')
def main():
config = load_git_config()
# In order to keep '--' yet allow options after positionals, we need to
# check for '--' ourselves. (Setting nargs='*' throws away the '--', while
# nargs=argparse.REMAINDER disallows options after positionals.)
argv = sys.argv[1:]
try:
idx = argv.index('--')
except ValueError:
dash_dash = []
else:
dash_dash = argv[idx:]
argv = argv[:idx]
default_extensions = ','.join([
# From clang/lib/Frontend/FrontendOptions.cpp, all lower case
'c', 'h', # C
'm', # ObjC
'mm', # ObjC++
'cc', 'cp', 'cpp', 'c++', 'cxx', 'hpp', # C++
# Other languages that clang-format supports
'proto', 'protodevel', # Protocol Buffers
'js', # JavaScript
])
p = argparse.ArgumentParser(
usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc)
p.add_argument('--binary',
default=config.get('clangformat.binary', 'clang-format'),
help='path to clang-format'),
p.add_argument('--commit',
default=config.get('clangformat.commit', 'HEAD'),
help='default commit to use if none is specified'),
p.add_argument('--diff', action='store_true',
help='print a diff instead of applying the changes')
p.add_argument('--extensions',
default=config.get('clangformat.extensions',
default_extensions),
help=('comma-separated list of file extensions to format, '
'excluding the period and case-insensitive')),
p.add_argument('-f', '--force', action='store_true',
help='allow changes to unstaged files')
p.add_argument('-p', '--patch', action='store_true',
help='select hunks interactively')
p.add_argument('-q', '--quiet', action='count', default=0,
help='print less information')
p.add_argument('--style',
default=config.get('clangformat.style', None),
help='passed to clang-format'),
p.add_argument('-v', '--verbose', action='count', default=0,
help='print extra information')
# We gather all the remaining positional arguments into 'args' since we need
# to use some heuristics to determine whether or not <commit> was present.
# However, to print pretty messages, we make use of metavar and help.
p.add_argument('args', nargs='*', metavar='<commit>',
help='revision from which to compute the diff')
p.add_argument('ignored', nargs='*', metavar='<file>...',
help='if specified, only consider differences in these files')
opts = p.parse_args(argv)
opts.verbose -= opts.quiet
del opts.quiet
commit, files = interpret_args(opts.args, dash_dash, opts.commit)
changed_lines = compute_diff_and_extract_lines(commit, files)
if opts.verbose >= 1:
ignored_files = set(changed_lines)
filter_by_extension(changed_lines, opts.extensions.lower().split(','))
if opts.verbose >= 1:
ignored_files.difference_update(changed_lines)
if ignored_files:
print 'Ignoring changes in the following files (wrong extension):'
for filename in ignored_files:
print ' ', filename
if changed_lines:
print 'Running clang-format on the following files:'
for filename in changed_lines:
print ' ', filename
if not changed_lines:
print 'no modified files to format'
return
# The computed diff outputs absolute paths, so we must cd before accessing
# those files.
cd_to_toplevel()
old_tree = create_tree_from_workdir(changed_lines)
new_tree = run_clang_format_and_save_to_tree(changed_lines,
binary=opts.binary,
style=opts.style)
if opts.verbose >= 1:
print 'old tree:', old_tree
print 'new tree:', new_tree
if old_tree == new_tree:
if opts.verbose >= 0:
print 'clang-format did not modify any files'
elif opts.diff:
print_diff(old_tree, new_tree)
else:
changed_files = apply_changes(old_tree, new_tree, force=opts.force,
patch_mode=opts.patch)
if (opts.verbose >= 0 and not opts.patch) or opts.verbose >= 1:
print 'changed files:'
for filename in changed_files:
print ' ', filename
def load_git_config(non_string_options=None):
"""Return the git configuration as a dictionary.
All options are assumed to be strings unless in `non_string_options`, in which
is a dictionary mapping option name (in lower case) to either "--bool" or
"--int"."""
if non_string_options is None:
non_string_options = {}
out = {}
for entry in run('git', 'config', '--list', '--null').split('\0'):
if entry:
name, value = entry.split('\n', 1)
if name in non_string_options:
value = run('git', 'config', non_string_options[name], name)
out[name] = value
return out
def interpret_args(args, dash_dash, default_commit):
"""Interpret `args` as "[commit] [--] [files...]" and return (commit, files).
It is assumed that "--" and everything that follows has been removed from
args and placed in `dash_dash`.
If "--" is present (i.e., `dash_dash` is non-empty), the argument to its
left (if present) is taken as commit. Otherwise, the first argument is
checked if it is a commit or a file. If commit is not given,
`default_commit` is used."""
if dash_dash:
if len(args) == 0:
commit = default_commit
elif len(args) > 1:
die('at most one commit allowed; {0:d} given'.format(len(args)))
else:
commit = args[0]
object_type = get_object_type(commit)
if object_type not in ('commit', 'tag'):
if object_type is None:
die("'{0!s}' is not a commit".format(commit))
else:
die("'{0!s}' is a {1!s}, but a commit was expected".format(commit, object_type))
files = dash_dash[1:]
elif args:
if disambiguate_revision(args[0]):
commit = args[0]
files = args[1:]
else:
commit = default_commit
files = args
else:
commit = default_commit
files = []
return commit, files
def disambiguate_revision(value):
"""Returns True if `value` is a revision, False if it is a file, or dies."""
# If `value` is ambiguous (neither a commit nor a file), the following
# command will die with an appropriate error message.
run('git', 'rev-parse', value, verbose=False)
object_type = get_object_type(value)
if object_type is None:
return False
if object_type in ('commit', 'tag'):
return True
die('`{0!s}` is a {1!s}, but a commit or filename was expected'.format(value, object_type))
def get_object_type(value):
"""Returns a string description of an object's type, or None if it is not
a valid git object."""
cmd = ['git', 'cat-file', '-t', value]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
return None
return stdout.strip()
def compute_diff_and_extract_lines(commit, files):
"""Calls compute_diff() followed by extract_lines()."""
diff_process = compute_diff(commit, files)
changed_lines = extract_lines(diff_process.stdout)
diff_process.stdout.close()
diff_process.wait()
if diff_process.returncode != 0:
# Assume error was already printed to stderr.
sys.exit(2)
return changed_lines
def compute_diff(commit, files):
"""Return a subprocess object producing the diff from `commit`.
The return value's `stdin` file object will produce a patch with the
differences between the working directory and `commit`, filtered on `files`
(if non-empty). Zero context lines are used in the patch."""
cmd = ['git', 'diff-index', '-p', '-U0', commit, '--']
cmd.extend(files)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.close()
return p
def extract_lines(patch_file):
"""Extract the changed lines in `patch_file`.
The return value is a dictionary mapping filename to a list of (start_line,
line_count) pairs.
The input must have been produced with ``-U0``, meaning unidiff format with
zero lines of context. The return value is a dict mapping filename to a
list of line `Range`s."""
matches = {}
for line in patch_file:
match = re.search(r'^\+\+\+\ [^/]+/(.*)', line)
if match:
filename = match.group(1).rstrip('\r\n')
match = re.search(r'^@@ -[0-9,]+ \+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count > 0:
matches.setdefault(filename, []).append(Range(start_line, line_count))
return matches
def filter_by_extension(dictionary, allowed_extensions):
"""Delete every key in `dictionary` that doesn't have an allowed extension.
`allowed_extensions` must be a collection of lowercase file extensions,
excluding the period."""
allowed_extensions = frozenset(allowed_extensions)
for filename in dictionary.keys():
base_ext = filename.rsplit('.', 1)
if len(base_ext) == 1 or base_ext[1].lower() not in allowed_extensions:
del dictionary[filename]
def cd_to_toplevel():
"""Change to the top level of the git repository."""
toplevel = run('git', 'rev-parse', '--show-toplevel')
os.chdir(toplevel)
def create_tree_from_workdir(filenames):
"""Create a new git tree with the given files from the working directory.
Returns the object ID (SHA-1) of the created tree."""
return create_tree(filenames, '--stdin')
def run_clang_format_and_save_to_tree(changed_lines, binary='clang-format',
style=None):
"""Run clang-format on each file and save the result to a git tree.
Returns the object ID (SHA-1) of the created tree."""
def index_info_generator():
for filename, line_ranges in changed_lines.iteritems():
mode = oct(os.stat(filename).st_mode)
blob_id = clang_format_to_blob(filename, line_ranges, binary=binary,
style=style)
yield '{0!s} {1!s}\t{2!s}'.format(mode, blob_id, filename)
return create_tree(index_info_generator(), '--index-info')
def create_tree(input_lines, mode):
"""Create a tree object from the given input.
If mode is '--stdin', it must be a list of filenames. If mode is
'--index-info' is must be a list of values suitable for "git update-index
--index-info", such as "<mode> <SP> <sha1> <TAB> <filename>". Any other mode
is invalid."""
assert mode in ('--stdin', '--index-info')
cmd = ['git', 'update-index', '--add', '-z', mode]
with temporary_index_file():
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
for line in input_lines:
p.stdin.write('{0!s}\0'.format(line))
p.stdin.close()
if p.wait() != 0:
die('`{0!s}` failed'.format(' '.join(cmd)))
tree_id = run('git', 'write-tree')
return tree_id
def clang_format_to_blob(filename, line_ranges, binary='clang-format',
style=None):
"""Run clang-format on the given file and save the result to a git blob.
Returns the object ID (SHA-1) of the created blob."""
clang_format_cmd = [binary, filename]
if style:
clang_format_cmd.extend(['-style='+style])
clang_format_cmd.extend([
'-lines={0!s}:{1!s}'.format(start_line, start_line+line_count-1)
for start_line, line_count in line_ranges])
try:
clang_format = subprocess.Popen(clang_format_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
die('cannot find executable "{0!s}"'.format(binary))
else:
raise
clang_format.stdin.close()
hash_object_cmd = ['git', 'hash-object', '-w', '--path='+filename, '--stdin']
hash_object = subprocess.Popen(hash_object_cmd, stdin=clang_format.stdout,
stdout=subprocess.PIPE)
clang_format.stdout.close()
stdout = hash_object.communicate()[0]
if hash_object.returncode != 0:
die('`{0!s}` failed'.format(' '.join(hash_object_cmd)))
if clang_format.wait() != 0:
die('`{0!s}` failed'.format(' '.join(clang_format_cmd)))
return stdout.rstrip('\r\n')
@contextlib.contextmanager
def temporary_index_file(tree=None):
"""Context manager for setting GIT_INDEX_FILE to a temporary file and deleting
the file afterward."""
index_path = create_temporary_index(tree)
old_index_path = os.environ.get('GIT_INDEX_FILE')
os.environ['GIT_INDEX_FILE'] = index_path
try:
yield
finally:
if old_index_path is None:
del os.environ['GIT_INDEX_FILE']
else:
os.environ['GIT_INDEX_FILE'] = old_index_path
os.remove(index_path)
def create_temporary_index(tree=None):
"""Create a temporary index file and return the created file's path.
If `tree` is not None, use that as the tree to read in. Otherwise, an
empty index is created."""
gitdir = run('git', 'rev-parse', '--git-dir')
path = os.path.join(gitdir, temp_index_basename)
if tree is None:
tree = '--empty'
run('git', 'read-tree', '--index-output='+path, tree)
return path
def print_diff(old_tree, new_tree):
"""Print the diff between the two trees to stdout."""
# We use the porcelain 'diff' and not plumbing 'diff-tree' because the output
# is expected to be viewed by the user, and only the former does nice things
# like color and pagination.
subprocess.check_call(['git', 'diff', old_tree, new_tree, '--'])
def apply_changes(old_tree, new_tree, force=False, patch_mode=False):
"""Apply the changes in `new_tree` to the working directory.
Bails if there are local changes in those files and not `force`. If
`patch_mode`, runs `git checkout --patch` to select hunks interactively."""
changed_files = run('git', 'diff-tree', '-r', '-z', '--name-only', old_tree,
new_tree).rstrip('\0').split('\0')
if not force:
unstaged_files = run('git', 'diff-files', '--name-status', *changed_files)
if unstaged_files:
print >>sys.stderr, ('The following files would be modified but '
'have unstaged changes:')
print >>sys.stderr, unstaged_files
print >>sys.stderr, 'Please commit, stage, or stash them first.'
sys.exit(2)
if patch_mode:
# In patch mode, we could just as well create an index from the new tree
# and checkout from that, but then the user will be presented with a
# message saying "Discard ... from worktree". Instead, we use the old
# tree as the index and checkout from new_tree, which gives the slightly
# better message, "Apply ... to index and worktree". This is not quite
# right, since it won't be applied to the user's index, but oh well.
with temporary_index_file(old_tree):
subprocess.check_call(['git', 'checkout', '--patch', new_tree])
index_tree = old_tree
else:
with temporary_index_file(new_tree):
run('git', 'checkout-index', '-a', '-f')
return changed_files
def run(*args, **kwargs):
stdin = kwargs.pop('stdin', '')
verbose = kwargs.pop('verbose', True)
strip = kwargs.pop('strip', True)
for name in kwargs:
raise TypeError("run() got an unexpected keyword argument '{0!s}'".format(name))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate(input=stdin)
if p.returncode == 0:
if stderr:
if verbose:
print >>sys.stderr, '`{0!s}` printed to stderr:'.format(' '.join(args))
print >>sys.stderr, stderr.rstrip()
if strip:
stdout = stdout.rstrip('\r\n')
return stdout
if verbose:
print >>sys.stderr, '`{0!s}` returned {1!s}'.format(' '.join(args), p.returncode)
if stderr:
print >>sys.stderr, stderr.rstrip()
sys.exit(2)
def die(message):
print >>sys.stderr, 'error:', message
sys.exit(2)
if __name__ == '__main__':
main()
|
{
"content_hash": "897c2d7ef563ee2b5db513a61145f379",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 93,
"avg_line_length": 36.35940803382664,
"alnum_prop": 0.6414699383649262,
"repo_name": "runt18/osquery",
"id": "dfc84962edac04caecd508ebb0af8cc831669680",
"size": "17563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/formatting/git-clang-format.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "35991"
},
{
"name": "C++",
"bytes": "1189092"
},
{
"name": "CMake",
"bytes": "64276"
},
{
"name": "Makefile",
"bytes": "3286"
},
{
"name": "Objective-C++",
"bytes": "15578"
},
{
"name": "Shell",
"bytes": "2030"
},
{
"name": "Thrift",
"bytes": "2879"
}
],
"symlink_target": ""
}
|
'''Runner for debugging with xt-gdb.'''
from os import path
from runners.core import ZephyrBinaryRunner, RunnerCaps
class XtensaBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for xt-gdb.'''
def __init__(self, cfg):
super(XtensaBinaryRunner, self).__init__(cfg)
@classmethod
def name(cls):
return 'xtensa'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'debug'})
@classmethod
def do_add_parser(cls, parser):
parser.add_argument('--xcc-tools', required=True,
help='path to XTensa tools')
@classmethod
def create(cls, cfg, args):
# Override any GDB with the one provided by the XTensa tools.
cfg.gdb = path.join(args.xcc_tools, 'bin', 'xt-gdb')
return XtensaBinaryRunner(cfg)
def do_run(self, command, **kwargs):
gdb_cmd = [self.cfg.gdb, self.cfg.kernel_elf]
self.check_call(gdb_cmd)
|
{
"content_hash": "4bda1ccc6281684304d19b3a97a971cb",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 69,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.6260416666666667,
"repo_name": "punitvara/zephyr",
"id": "2aeb42d8f69cae7e399b4957bc66e2f78dd25f79",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/meta/west/runners/xtensa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1273941"
},
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "C",
"bytes": "327159567"
},
{
"name": "C++",
"bytes": "3098126"
},
{
"name": "CMake",
"bytes": "502687"
},
{
"name": "EmberScript",
"bytes": "793"
},
{
"name": "Makefile",
"bytes": "3113"
},
{
"name": "Objective-C",
"bytes": "33555"
},
{
"name": "Perl",
"bytes": "202119"
},
{
"name": "Python",
"bytes": "944189"
},
{
"name": "Shell",
"bytes": "40960"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
}
|
import ujson
|
{
"content_hash": "4dcec4fec048a72de29dc3426ddae51d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 12,
"avg_line_length": 12,
"alnum_prop": 0.9166666666666666,
"repo_name": "Fluorescing/PyPSFGen",
"id": "5138a7547bdd4296e6f14b485783d1dbefa95204",
"size": "12",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoleculeScenarioCSV/MoleculeScenarioCSV.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2833"
},
{
"name": "Python",
"bytes": "9816"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.template.defaultfilters import slugify
from django.utils import timezone
from acquisition.models import Acquisition
import calendar
import datetime
import json
def to_timestamp(dt):
stamp = calendar.timegm(dt.timetuple()) * 1000
return stamp
class Series(models.Model):
name = models.CharField(
max_length=128,
verbose_name="name"
)
color = models.CharField(
max_length=32,
verbose_name="color",
blank=True
)
title = models.CharField(
max_length=128,
verbose_name="title",
blank=True
)
def create_next_value(self):
raise NotImplementedError
def __str__(self):
return self.name
def get_data(self, hours):
start = timezone.now() - datetime.timedelta(hours=hours)
data = [[to_timestamp(probe.date), probe.value]
for probe in self.probes.all().filter(date__gt=start)]
return data
class Probe(models.Model):
date = models.DateTimeField(
verbose_name="datetime",
auto_now_add=True
)
value = models.IntegerField(
verbose_name="value"
)
series = models.ForeignKey(
Series,
verbose_name="series",
related_name="probes"
)
class Meta:
ordering = ['date']
def __str__(self):
return ", ".join([self.series.name, str(self.date), str(self.value)])
class Chart(models.Model):
title = models.CharField(
max_length=128,
verbose_name="title"
)
series = models.ManyToManyField(
Series,
verbose_name="series",
related_name="charts"
)
type = models.CharField(
max_length=128,
verbose_name="type"
)
x_axis_name = models.CharField(
max_length=32,
verbose_name="x axis name"
)
y_axis_name = models.CharField(
max_length=32,
verbose_name="y axis name"
)
slug = models.SlugField(
max_length=24,
verbose_name="slug",
default=""
)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Chart, self).save(*args, **kwargs)
def get_chart_json(self, hours):
series_list = []
for series in self.series.all():
single_series = {}
single_series.update({'name': series.title,
'data': series.get_data(hours),
'color': series.color})
series_list.append(single_series)
return json.dumps(series_list)
class HttpSeries(Acquisition, Series):
def create_next_value(self):
probe = Probe.objects.create(value=self.get_data(), series=self)
return probe
class NestedSeries(HttpSeries):
def extra_process(self, value):
return value.next
class ThirdNextSibling(NestedSeries):
def extra_process(self, value):
return value.find_next_sibling().find_next_sibling().next
|
{
"content_hash": "e7606a4c46d886b5dec5ae0f4554b3c8",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 77,
"avg_line_length": 24.479674796747968,
"alnum_prop": 0.5914978412487546,
"repo_name": "tomaszpiotro/Tracker",
"id": "e685febd7e8fb07777406aa97ac2e394b91356f8",
"size": "3011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker/charts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1664"
},
{
"name": "JavaScript",
"bytes": "1443"
},
{
"name": "Python",
"bytes": "17617"
}
],
"symlink_target": ""
}
|
import logging
l = logging.getLogger(__name__)
from mox3 import mox
from datetime import date, datetime, time, timedelta
from django_lean.experiments.models import (Experiment, DailyEngagementReport,
DailyConversionReport,
DailyConversionReportGoalData,
Participant, AnonymousVisitor,
GoalType, GoalRecord)
from django_lean.experiments.reports import (EngagementReportGenerator,
ConversionReportGenerator,
calculate_participant_conversion,
get_conversion_data,
calculate_goal_type_conversion,
find_experiment_group_participants)
from django_lean.experiments.tests.utils import create_user_in_group, TestCase
class TestDailyReports(TestCase):
def setUp(self):
self.experiment = Experiment(name="test_experiment")
self.experiment.save()
self.experiment.state = Experiment.ENABLED_STATE
self.experiment.save()
self.experiment.start_date = (self.experiment.start_date -
timedelta(days=5))
self.experiment.save()
anonymous_visitor = AnonymousVisitor()
anonymous_visitor.save()
anonymous_participant = Participant(anonymous_visitor=anonymous_visitor,
experiment=self.experiment,
group=Participant.TEST_GROUP)
anonymous_participant.save()
anonymous_participant.enrollment_date = self.experiment.start_date
anonymous_participant.save()
self.other_experiment = Experiment(name="test_experiment2")
self.other_experiment.save()
self.other_experiment.state = Experiment.DISABLED_STATE
self.other_experiment.save()
self.other_experiment.start_date = (date.today() -
timedelta(days=7))
self.other_experiment.end_date = (date.today() -
timedelta(days=3))
self.other_experiment.save()
def testDailyEngagementReport(self):
users_test = []
users_control = []
num_control1 = 0
num_test1 = 0
num_control2 = 0
num_test2 = 0
#create users
for i in range(5):
users_control.append(create_user_in_group(self.experiment, i,
Participant.CONTROL_GROUP,
date.today() - timedelta(days=i)))
users_test.append(create_user_in_group(self.experiment, i,
Participant.TEST_GROUP,
date.today() - timedelta(days=i)))
# users_<test|control>[0] were enrolled today, [1] 1 day ago, etc.
report_date = date.today() - timedelta(days=1)
expected_engagement_score_calls = {
(users_test[1], date.today() - timedelta(days=1), report_date): 3.2,
(users_test[2], date.today() - timedelta(days=2), report_date): 2.5,
(users_test[3], date.today() - timedelta(days=3), report_date): 4.1,
(users_test[4], date.today() - timedelta(days=4), report_date): 0,
(users_control[1], date.today() - timedelta(days=1), report_date): 0,
(users_control[2], date.today() - timedelta(days=2), report_date): 0,
(users_control[3], date.today() - timedelta(days=3), report_date): 0,
(users_control[4], date.today() - timedelta(days=4), report_date): 0}
test_case = self
class EngagementScoreCalculatorStub(object):
def calculate_user_engagement_score(self, user,
start_date, end_date):
test_case.assertNotEqual(user, None)
test_case.assertTrue((user, start_date, end_date) in expected_engagement_score_calls)
return expected_engagement_score_calls[(user,
start_date, end_date)]
(EngagementReportGenerator(EngagementScoreCalculatorStub()).
generate_daily_report_for_experiment(self.experiment, report_date))
experiment_report = DailyEngagementReport.objects.get(
experiment=self.experiment, date=report_date)
self.assertAlmostEqual((3.2 + 2.5 + 4.1 + 0)/4.0,
experiment_report.test_score)
self.assertAlmostEqual(0.0, experiment_report.control_score)
self.assertEqual(4, experiment_report.test_group_size)
self.assertEqual(4, experiment_report.control_group_size)
self.assertAlmostEqual(96.819293337188498, experiment_report.confidence)
def testZeroParticipantExperiment(self):
mocker = mox.Mox()
engagement_calculator = mocker.CreateMockAnything()
mocker.ReplayAll()
report_date = date.today()
EngagementReportGenerator(engagement_score_calculator=engagement_calculator).generate_daily_report_for_experiment(
self.other_experiment, report_date)
experiment_report = DailyEngagementReport.objects.get(
experiment=self.other_experiment, date=report_date)
mocker.VerifyAll()
self.assertEqual(None, experiment_report.test_score)
self.assertEqual(None, experiment_report.control_score)
self.assertEqual(0, experiment_report.test_group_size)
self.assertEqual(0, experiment_report.control_group_size)
def testGenerateAllDailyEngagementReports(self):
class DummyEngagementCalculator(object):
def calculate_user_engagement_score(self, user, start_date, end_date):
return 7
engagement_report_generator = EngagementReportGenerator(engagement_score_calculator=DummyEngagementCalculator())
engagement_report_generator.generate_daily_report_for_experiment(
self.experiment, date.today() - timedelta(days=2))
engagement_report_generator.generate_daily_report_for_experiment(
self.experiment, date.today() - timedelta(days=3))
engagement_report_generator.generate_all_daily_reports()
DailyEngagementReport.objects.get(experiment=self.experiment,
date=date.today() - timedelta(days=1))
DailyEngagementReport.objects.get(experiment=self.experiment,
date=date.today() - timedelta(days=2))
DailyEngagementReport.objects.get(experiment=self.experiment,
date=date.today() - timedelta(days=3))
DailyEngagementReport.objects.get(experiment=self.experiment,
date=date.today() - timedelta(days=4))
DailyEngagementReport.objects.get(experiment=self.experiment,
date=date.today() - timedelta(days=5))
self.assertEqual(5, DailyEngagementReport.objects.filter(
experiment=self.experiment).count())
DailyEngagementReport.objects.get(experiment=self.other_experiment,
date=date.today() - timedelta(days=3))
DailyEngagementReport.objects.get(experiment=self.other_experiment,
date=date.today() - timedelta(days=4))
DailyEngagementReport.objects.get(experiment=self.other_experiment,
date=date.today() - timedelta(days=5))
DailyEngagementReport.objects.get(experiment=self.other_experiment,
date=date.today() - timedelta(days=6))
DailyEngagementReport.objects.get(experiment=self.other_experiment,
date=date.today() - timedelta(days=7))
self.assertEqual(5, DailyEngagementReport.objects.filter(
experiment=self.other_experiment).count())
def create_goal_record(self, record_datetime, anonymous_visitor, goal_type):
record = GoalRecord.objects.create(anonymous_visitor=anonymous_visitor,
goal_type=goal_type)
record.created = record_datetime
record.save()
def create_participant(self, anonymous_visitor, experiment, enrollment_date, group):
participant = Participant.objects.create(anonymous_visitor=anonymous_visitor,
experiment=experiment,
group=group)
participant.enrollment_date=enrollment_date
participant.save()
return participant
def testParticipantConversionCalculator(self):
goal_types = [GoalType.objects.create(name=str(i)) for i in range(3)]
anonymous_visitor = AnonymousVisitor.objects.create()
participant = self.create_participant(
anonymous_visitor=anonymous_visitor,
experiment=self.experiment,
enrollment_date=self.experiment.start_date + timedelta(days=2),
group=Participant.TEST_GROUP)
days = [datetime.combine(self.experiment.start_date + timedelta(days=i), time(hour=12))
for i in range(5)]
nb_goal_records = GoalRecord.objects.all().count()
self.create_goal_record(days[0], anonymous_visitor, goal_types[0])
self.create_goal_record(days[0], anonymous_visitor, goal_types[1])
self.create_goal_record(days[1], anonymous_visitor, goal_types[0])
self.create_goal_record(days[1], anonymous_visitor, goal_types[0])
self.create_goal_record(days[2], anonymous_visitor, goal_types[1])
self.create_goal_record(days[3], anonymous_visitor, goal_types[0])
self.create_goal_record(days[4], anonymous_visitor, goal_types[0])
self.create_goal_record(days[4], anonymous_visitor, goal_types[0])
self.assertEqual(nb_goal_records + 8, GoalRecord.objects.all().count())
# wasn't enrolled yet!
self.assertEqual(calculate_participant_conversion(participant, goal_types[0], days[0]), 0)
self.assertEqual(calculate_participant_conversion(participant, goal_types[1], days[0]), 0)
self.assertEqual(calculate_participant_conversion(participant, goal_types[2], days[0]), 0)
self.assertEqual(calculate_participant_conversion(participant, None, days[0]), 0)
self.assertEqual(calculate_participant_conversion(participant, goal_types[0], days[1]), 0)
self.assertEqual(calculate_participant_conversion(participant, goal_types[1], days[1]), 0)
self.assertEqual(calculate_participant_conversion(participant, goal_types[2], days[1]), 0)
self.assertEqual(calculate_participant_conversion(participant, None, days[1]), 0)
# now enrolled
self.assertEqual(calculate_participant_conversion(participant, goal_types[0], days[2]), 0)
self.assertEqual(calculate_participant_conversion(participant, goal_types[1], days[2]), 1)
self.assertEqual(calculate_participant_conversion(participant, goal_types[2], days[2]), 0)
# "any" is one
self.assertEqual(calculate_participant_conversion(participant, None, days[2]), 1)
self.assertEqual(calculate_participant_conversion(participant, goal_types[0], days[3]), 1)
self.assertEqual(calculate_participant_conversion(participant, goal_types[1], days[3]), 1)
self.assertEqual(calculate_participant_conversion(participant, goal_types[2], days[3]), 0)
# "any" is one, even though two different goals were achieved
self.assertEqual(calculate_participant_conversion(participant, None, days[3]), 1)
# there were three conversions on this day for goal 0, but we only count the first!
self.assertEqual(calculate_participant_conversion(participant, goal_types[0], days[4]), 1)
self.assertEqual(calculate_participant_conversion(participant, goal_types[1], days[4]), 1)
self.assertEqual(calculate_participant_conversion(participant, goal_types[2], days[4]), 0)
self.assertEqual(calculate_participant_conversion(participant, None, days[4]), 1)
def testGoalTypeConversionCalculator(self):
mocker = mox.Mox()
participants = [mocker.CreateMockAnything(),
mocker.CreateMockAnything(),
mocker.CreateMockAnything()]
goal_type = mocker.CreateMockAnything()
report_date = mocker.CreateMockAnything()
participant_conversion_calculator = mocker.CreateMockAnything()
participant_conversion_calculator(
participants[0], goal_type, report_date).InAnyOrder().AndReturn(1)
participant_conversion_calculator(
participants[1], goal_type, report_date).InAnyOrder().AndReturn(0)
participant_conversion_calculator(
participants[2], goal_type, report_date).InAnyOrder().AndReturn(1)
mocker.ReplayAll()
self.assertEqual(2, calculate_goal_type_conversion(
goal_type, participants, report_date, participant_conversion_calculator))
mocker.VerifyAll()
def testExperimentGroupParticipantFinder(self):
days = [datetime.combine(date.today() + timedelta(days=i), time(hour=12))
for i in range(-7, 0)]
experiment = Experiment(name="experiment1")
experiment.save()
experiment.state = Experiment.ENABLED_STATE
experiment.save()
experiment.start_date = days[2].date()
experiment.save()
other_experiment = Experiment(name="experiment2")
other_experiment.save()
other_experiment.state = Experiment.DISABLED_STATE
other_experiment.save()
other_experiment.start_date = days[0].date()
other_experiment.end_date = days[4].date()
other_experiment.save()
anonymous_visitors = [AnonymousVisitor.objects.create() for i in range(10)]
experiment_participant_groups = [
[
self.create_participant(anonymous_visitor=anonymous_visitors[0],
experiment=experiment,
enrollment_date=days[2],
group=Participant.TEST_GROUP),
self.create_participant(anonymous_visitor=anonymous_visitors[1],
experiment=experiment,
enrollment_date=days[2],
group=Participant.CONTROL_GROUP),
self.create_participant(anonymous_visitor=anonymous_visitors[3],
experiment=experiment,
enrollment_date=days[3],
group=Participant.TEST_GROUP),
self.create_participant(anonymous_visitor=anonymous_visitors[4],
experiment=experiment,
enrollment_date=days[4],
group=Participant.CONTROL_GROUP),
self.create_participant(anonymous_visitor=anonymous_visitors[6],
experiment=experiment,
enrollment_date=days[6],
group=Participant.TEST_GROUP)
],
[
self.create_participant(anonymous_visitor=anonymous_visitors[0],
experiment=other_experiment,
enrollment_date=days[0],
group=Participant.CONTROL_GROUP),
self.create_participant(anonymous_visitor=anonymous_visitors[2],
experiment=other_experiment,
enrollment_date=days[0],
group=Participant.TEST_GROUP),
self.create_participant(anonymous_visitor=anonymous_visitors[5],
experiment=other_experiment,
enrollment_date=days[4],
group=Participant.TEST_GROUP)
]
]
ex1day2 = find_experiment_group_participants(Participant.TEST_GROUP, experiment, days[2])
ex1day2visitors = [p.anonymous_visitor for p in ex1day2]
self.assertTrue(anonymous_visitors[0] in ex1day2visitors)
self.assertEqual(1, len(ex1day2visitors))
ex1day4test = find_experiment_group_participants(Participant.TEST_GROUP, experiment, days[4])
ex1day4testvisitors = [p.anonymous_visitor for p in ex1day4test]
self.assertTrue(anonymous_visitors[0] in ex1day4testvisitors)
self.assertTrue(anonymous_visitors[3] in ex1day4testvisitors)
self.assertEqual(2, len(ex1day4testvisitors))
ex1day4control = find_experiment_group_participants(Participant.CONTROL_GROUP, experiment, days[4])
ex1day4controlvisitors = [p.anonymous_visitor for p in ex1day4control]
self.assertTrue(anonymous_visitors[1] in ex1day4controlvisitors)
self.assertTrue(anonymous_visitors[4] in ex1day4controlvisitors)
self.assertEqual(2, len(ex1day4controlvisitors))
ex2day5test = find_experiment_group_participants(Participant.TEST_GROUP, other_experiment, days[5])
ex2day5testvisitors = [p.anonymous_visitor for p in ex2day5test]
self.assertTrue(anonymous_visitors[2] in ex2day5testvisitors)
self.assertTrue(anonymous_visitors[5] in ex2day5testvisitors)
self.assertEqual(2, len(ex2day5testvisitors))
def testGetConversionData(self):
days = [datetime.combine(date.today() + timedelta(days=i), time(hour=12))
for i in range(-7, 0)]
yesterday = date.today() - timedelta(days=1)
experiment = Experiment(name="experiment1")
experiment.save()
experiment.state = Experiment.ENABLED_STATE
experiment.save()
experiment.start_date = yesterday
experiment.save()
goal_types = [GoalType.objects.create(name="%s" % i) for i in range(4)]
report = DailyConversionReport.objects.create(experiment=experiment,
date=yesterday,
overall_test_conversion=17,
overall_control_conversion=12,
test_group_size=139,
control_group_size=142,
confidence=87.3)
DailyConversionReportGoalData.objects.create(report=report,
goal_type=goal_types[0],
test_conversion=11,
control_conversion=0,
confidence=65.3)
DailyConversionReportGoalData.objects.create(report=report,
goal_type=goal_types[1],
test_conversion=0,
control_conversion=21,
confidence=None)
DailyConversionReportGoalData.objects.create(report=report,
goal_type=goal_types[2],
test_conversion=23,
control_conversion=21,
confidence=100)
data = get_conversion_data(experiment, yesterday)
self.assertEqual(data['date'], yesterday)
self.assertTrue("totals" in data)
self.assertTrue("goal_types" in data)
self.assertEqual(4, len(data["goal_types"].keys()))
for goal_type in goal_types[0:3]:
self.assertTrue(goal_type.name in data["goal_types"])
goal_type_data = data["goal_types"][goal_type.name]
self.assertTrue("test_count" in goal_type_data)
self.assertTrue("control_count" in goal_type_data)
self.assertTrue("test_rate" in goal_type_data)
self.assertTrue("control_rate" in goal_type_data)
self.assertTrue("improvement" in goal_type_data)
self.assertTrue("confidence" in goal_type_data)
self.assertEqual(None, data["goal_types"][goal_types[3].name])
self.assertEqual(139, data["test_group_size"])
self.assertEqual(142, data["control_group_size"])
totals = data["totals"]
expected_test_rate = 17. / 139. * 100.
expected_control_rate = 12. / 142. * 100.
expected_improvement = (expected_test_rate - expected_control_rate) / expected_control_rate * 100.
self.assertAlmostEqual(expected_test_rate, totals["test_rate"])
self.assertAlmostEqual(expected_control_rate, totals["control_rate"])
self.assertAlmostEqual(expected_improvement, totals["improvement"])
self.assertAlmostEqual(87.3, totals["confidence"])
self.assertEqual(17, totals["test_count"])
self.assertEqual(12, totals["control_count"])
self.assertEqual(0, data["goal_types"][goal_types[0].name]["control_rate"])
self.assertAlmostEqual(11./139*100., data["goal_types"][goal_types[0].name]["test_rate"])
self.assertEqual(None, data["goal_types"][goal_types[0].name]["improvement"])
self.assertAlmostEqual(65.3, data["goal_types"][goal_types[0].name]["confidence"])
self.assertEqual(11, data["goal_types"][goal_types[0].name]["test_count"])
self.assertEqual(0, data["goal_types"][goal_types[0].name]["control_count"])
self.assertAlmostEqual(21./142*100., data["goal_types"][goal_types[1].name]["control_rate"])
self.assertEqual(None, data["goal_types"][goal_types[1].name]["confidence"])
self.assertEqual(None, data["goal_types"][goal_types[1].name]["improvement"])
self.assertAlmostEqual((23./139-21./142)/(21./142)*100.,
data["goal_types"][goal_types[2].name]["improvement"])
#TODO test with zero participants and check rate == None
#TODO sometimes confidence cannot be calculated and must return None. Add a test to verify this.
def testConversionReportGenerator(self):
days = [datetime.combine(date.today() + timedelta(days=i), time(hour=12))
for i in range(-7, 0)]
experiment = Experiment(name="experiment1")
experiment.save()
experiment.state = Experiment.ENABLED_STATE
experiment.save()
experiment.start_date = days[2].date()
experiment.save()
other_experiment = Experiment(name="experiment2")
other_experiment.save()
other_experiment.state = Experiment.DISABLED_STATE
other_experiment.save()
other_experiment.start_date = days[0].date()
other_experiment.end_date = days[4].date()
other_experiment.save()
goal_types = [GoalType.objects.create(name="%s" % i) for i in range(3)]
# experiment starts on days[2]
# other experiment starts on days[0]
mocker = mox.Mox()
finder = mocker.CreateMockAnything()
calculator = mocker.CreateMockAnything()
default_data = {
Participant.TEST_GROUP: {
"count": 110,
"conversions": [23, 12, 9]
},
Participant.CONTROL_GROUP: {
"count": 130,
"conversions": [12, 47, 5]
}
}
day_2_data = {
Participant.TEST_GROUP: {
"count": 12,
"conversions": [0, 2, 3]
},
Participant.CONTROL_GROUP: {
"count": 7,
"conversions": [1, 0, 3]
}
}
day_3_data = {
Participant.TEST_GROUP: {
"count": 5,
"conversions": [1, 0, 3]
},
Participant.CONTROL_GROUP: {
"count": 12,
"conversions": [0, 0, 0]
}
}
day_4_data = {
Participant.TEST_GROUP: {
"count": 0,
"conversions": [0, 0, 0]
},
Participant.CONTROL_GROUP: {
"count": 25,
"conversions": [2, 3, 7]
}
}
for day in days[2:7]:
data = default_data
if day == days[2]:
data = day_2_data
if day == days[3]:
data = day_3_data
if day == days[4]:
data = day_4_data
for group in (Participant.TEST_GROUP, Participant.CONTROL_GROUP):
mock_participants = mocker.CreateMockAnything()
finder(group, experiment, day.date()).InAnyOrder().AndReturn(mock_participants)
mock_participants.count().MultipleTimes().AndReturn(data[group]["count"])
for goal_type in goal_types:
calculator(goal_type, mock_participants, day.date()).InAnyOrder().AndReturn(data[group]["conversions"][int(goal_type.name)])
calculator(None, mock_participants, day.date()).InAnyOrder().AndReturn(sum(data[group]["conversions"]))
mocker.ReplayAll()
for d in days[2:7]:
ConversionReportGenerator(calculator, finder).generate_daily_report_for_experiment(
experiment, d.date())
results = DailyConversionReport.objects.filter(
experiment=experiment).order_by('-date')
mocker.VerifyAll()
self.assertEqual(results.count(), 5)
report_days = [ d.date() for d in days[2:7]]
for i in range(5):
self.assertEqual(results[i].date, report_days[4-i])
# Day 2
self.assertEqual(12, results[4].test_group_size)
self.assertEqual(7, results[4].control_group_size)
self.assertEqual(5, results[4].overall_test_conversion)
self.assertEqual(4, results[4].overall_control_conversion)
day_2_goal_4_test_conversion = DailyConversionReportGoalData.objects.filter(
report=results[4],
goal_type=goal_types[0])[0].test_conversion
self.assertEqual(0, day_2_goal_4_test_conversion)
day_2_goal_2_control_conversion = DailyConversionReportGoalData.objects.filter(
report=results[4],
goal_type=goal_types[2])[0].control_conversion
self.assertEqual(3, day_2_goal_2_control_conversion)
# Day 3
self.assertEqual(5, results[3].test_group_size)
# Day 4
self.assertEqual(0, results[2].test_group_size)
self.assertEqual(None, results[2].confidence)
day_4_goal_1_confidence = DailyConversionReportGoalData.objects.filter(
report=results[2],
goal_type=goal_types[0])[0].confidence
self.assertEqual(None, day_4_goal_1_confidence)
# Day 5
day_5_goal_0_confidence = DailyConversionReportGoalData.objects.filter(
report=results[1],
goal_type=goal_types[0])[0].confidence
self.assertAlmostEqual(98.935467172597029, day_5_goal_0_confidence, places=6)
|
{
"content_hash": "d931fbde2b427af57393cd5a8fb4ffbf",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 144,
"avg_line_length": 50.78354203935599,
"alnum_prop": 0.5702057207270678,
"repo_name": "MontmereLimited/django-lean",
"id": "79ad32aa2a40d7ca4cb6c43f5d16e08c87846796",
"size": "28412",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_lean/experiments/tests/test_daily_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14189"
},
{
"name": "JavaScript",
"bytes": "2774"
},
{
"name": "Python",
"bytes": "301048"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetLegislator(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetLegislator Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetLegislator, self).__init__(temboo_session, '/Library/SunlightLabs/Congress/Legislator/GetLegislator')
def new_input_set(self):
return GetLegislatorInputSet()
def _make_result_set(self, result, path):
return GetLegislatorResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetLegislatorChoreographyExecution(session, exec_id, path)
class GetLegislatorInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetLegislator
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Sunlight Labs.)
"""
super(GetLegislatorInputSet, self)._set_input('APIKey', value)
def set_AllLegislators(self, value):
"""
Set the value of the AllLegislators input for this Choreo. ((optional, boolean) A boolean flag indicating to search for all legislators even when they are no longer in office.)
"""
super(GetLegislatorInputSet, self)._set_input('AllLegislators', value)
def set_BioguideID(self, value):
"""
Set the value of the BioguideID input for this Choreo. ((conditional, string) The bioguide_id of the legislator to return.)
"""
super(GetLegislatorInputSet, self)._set_input('BioguideID', value)
def set_CRPID(self, value):
"""
Set the value of the CRPID input for this Choreo. ((optional, string) The crp_id associated with a legislator to return.)
"""
super(GetLegislatorInputSet, self)._set_input('CRPID', value)
def set_FECID(self, value):
"""
Set the value of the FECID input for this Choreo. ((optional, string) The fec_id associated with the legislator to return.)
"""
super(GetLegislatorInputSet, self)._set_input('FECID', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma-separated list of fields to include in the response.)
"""
super(GetLegislatorInputSet, self)._set_input('Fields', value)
def set_GovTrackID(self, value):
"""
Set the value of the GovTrackID input for this Choreo. ((optional, string) The govetrack_id associated with a legistlator to return.)
"""
super(GetLegislatorInputSet, self)._set_input('GovTrackID', value)
def set_ICPSRID(self, value):
"""
Set the value of the ICPSRID input for this Choreo. ((optional, string) Identifier for this member as it is maintained by the Inter-university Consortium for Political and Social Research.)
"""
super(GetLegislatorInputSet, self)._set_input('ICPSRID', value)
def set_LISID(self, value):
"""
Set the value of the LISID input for this Choreo. ((optional, string) Identifier for this member as it appears on some of Congress' data systems (namely Senate votes).)
"""
super(GetLegislatorInputSet, self)._set_input('LISID', value)
def set_OCDID(self, value):
"""
Set the value of the OCDID input for this Choreo. ((optional, string) Identifier for this member across all countries and levels of government, as defined by the Open Civic Data project.)
"""
super(GetLegislatorInputSet, self)._set_input('OCDID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(GetLegislatorInputSet, self)._set_input('ResponseFormat', value)
def set_ThomasID(self, value):
"""
Set the value of the ThomasID input for this Choreo. ((optional, string) Identifier for this member as it appears on THOMAS.gov and Congress.gov.)
"""
super(GetLegislatorInputSet, self)._set_input('ThomasID', value)
def set_VoteSmartID(self, value):
"""
Set the value of the VoteSmartID input for this Choreo. ((optional, integer) The votesmart_id of a legislator to return.)
"""
super(GetLegislatorInputSet, self)._set_input('VoteSmartID', value)
class GetLegislatorResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetLegislator Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from the Sunlight Congress API.)
"""
return self._output.get('Response', None)
class GetLegislatorChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetLegislatorResultSet(response, path)
|
{
"content_hash": "b5f27ca71d62e157b2a706f5775db622",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 197,
"avg_line_length": 47.939655172413794,
"alnum_prop": 0.6809926272253192,
"repo_name": "jordanemedlock/psychtruths",
"id": "580cd911ed0a26b3bc14205320d3f94e107489a9",
"size": "6448",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "temboo/core/Library/SunlightLabs/Congress/Legislator/GetLegislator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.models.imagemodels import Image
from fractions import Fraction
import exifread
class ExifData(models.Model):
class Meta:
verbose_name = _('EXIF Data')
verbose_name_plural = _('EXIF data')
image = models.OneToOneField(
Image,
verbose_name=_('Image'),
)
focal_length = models.CharField(
max_length=100,
verbose_name=_('Focal length'),
blank=True,
)
iso = models.CharField(
max_length=100,
verbose_name=_('ISO'),
blank=True,
)
fraction = models.CharField(
max_length=100,
verbose_name=_('Fraction'),
blank=True,
)
exposure_time = models.CharField(
max_length=100,
verbose_name=_('Exposure time'),
blank=True,
)
def save(self, *args, **kwargs):
if self.pk is None:
self._read_exif(self.image.file)
super(ExifData, self).save(*args, **kwargs)
@property
def as_list(self):
tmp = (self.focal_length, self.fraction, self.exposure_time, self.iso)
return filter(lambda x: x, tmp)
def _read_exif(self, file):
result = {}
try:
file.open('rb')
# read tags
tags = exifread.process_file(file)
# get necessary tags
self.focal_length = self._get_and_format(tags,
"EXIF FocalLength",
"%gmm", lambda s: Fraction(s))
self.iso = self._get_and_format(tags,
"EXIF ISOSpeedRatings", "ISO %d",
lambda s: int(s))
self.fraction = self._get_and_format(tags, "EXIF FNumber", "f/%g",
lambda s: float(Fraction(s)))
# format exposure time (fraction or float)
exposure_time = self._get_and_format(tags, "EXIF ExposureTime",
None, lambda s: Fraction(s))
exposure_time_str = ""
if exposure_time:
if exposure_time >= 1:
exposure_time_str = "%gs" % exposure_time
else:
exposure_time_str = "%ss" % str(exposure_time)
self.exposure_time = exposure_time_str
except IOError as e:
pass
def _get_and_format(self, tags, key, format, convertfunc):
"""
Gets element with "key" from dict "tags". Converts this data with
convertfunc and inserts it into the formatstring "format".
If "format" is None, the data is returned without formatting, conversion
is done.
It the key is not in the dict, the empty string is returned.
"""
data = tags.get(key, None)
if data:
data = convertfunc(str(data))
if format:
return format % data
return data
return ""
|
{
"content_hash": "664c8c3197be6c7b737be86a8e3da2e8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 83,
"avg_line_length": 30.647058823529413,
"alnum_prop": 0.5108765195137556,
"repo_name": "svenhertle/django_image_exif",
"id": "791a2571752233e30c50ea57ca75c3745c428991",
"size": "3126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_exif/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5903"
}
],
"symlink_target": ""
}
|
import os
import re
from base64 import b64decode
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Mapping, Optional
import pkg_resources
from flask import Response as FlaskResponse
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT
from saml2.client import Saml2Client
from saml2.response import AuthnResponse
from eduid_common.api.testing import EduidAPITestCase
from eduid_common.authn.cache import IdentityCache, OutstandingQueriesCache, StateCache
from eduid_common.authn.utils import get_saml2_config
from eduid_webapp.idp.app import IdPApp, init_idp_app
from eduid_webapp.idp.sso_session import SSOSession
__author__ = 'ft'
class LoginState(Enum):
S0_REDIRECT = 'redirect'
S1_LOGIN_FORM = 'login-form'
S2_VERIFY = 'verify'
S3_REDIRECT_LOGGED_IN = 'redirect-logged-in'
S4_REDIRECT_TO_ACS = 'redirect-to-acs'
S5_LOGGED_IN = 'logged-in'
@dataclass
class LoginResult:
url: str
reached_state: LoginState
response: FlaskResponse
sso_cookie_val: Optional[str] = None
class IdPTests(EduidAPITestCase):
"""Base TestCase for those tests that need a full environment setup"""
def setUp(
self, users: Optional[List[str]] = None, copy_user_to_private: bool = False,
):
super().setUp(users=users, copy_user_to_private=copy_user_to_private)
self.idp_entity_id = 'https://unittest-idp.example.edu/idp.xml'
self.relay_state = 'test-fest'
self.sp_config = get_saml2_config(self.app.conf.pysaml2_config, name='SP_CONFIG')
# pysaml2 likes to keep state about ongoing logins, data from login to when you logout etc.
self._pysaml2_caches: Dict[str, Any] = dict()
self.pysaml2_state = StateCache(self._pysaml2_caches) # _saml2_state in _pysaml2_caches
self.pysaml2_identity = IdentityCache(self._pysaml2_caches) # _saml2_identities in _pysaml2_caches
self.pysaml2_oq = OutstandingQueriesCache(self._pysaml2_caches) # _saml2_outstanding_queries in _pysaml2_caches
self.saml2_client = Saml2Client(config=self.sp_config, identity_cache=self.pysaml2_identity)
def load_app(self, config: Optional[Mapping[str, Any]]) -> IdPApp:
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return init_idp_app(test_config=config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
config = super().update_config(config)
datadir = pkg_resources.resource_filename(__name__, 'data')
fn = os.path.join(datadir, 'test_SSO_conf.py')
config.update(
{
'pysaml2_config': fn,
'fticks_secret_key': 'test test',
'eduperson_targeted_id_secret_key': 'eptid_secret',
'sso_cookie': {'key': 'test_sso_cookie'},
'eduid_site_url': 'https://eduid.docker_dev',
}
)
return config
def tearDown(self):
super(IdPTests, self).tearDown()
with self.app.app_context():
self.app.central_userdb._drop_whole_collection()
def test_app_starts(self):
assert self.app.conf.app_name == 'idp'
def _try_login(
self, saml2_client: Optional[Saml2Client] = None, authn_context=None, force_authn: bool = False,
) -> LoginResult:
"""
Try logging in to the IdP.
:return: Information about how far we got (reached LoginState) and the last response instance.
"""
_saml2_client = saml2_client if saml2_client is not None else self.saml2_client
(session_id, info) = _saml2_client.prepare_for_authenticate(
entityid=self.idp_entity_id,
relay_state=self.relay_state,
binding=BINDING_HTTP_REDIRECT,
requested_authn_context=authn_context,
force_authn=force_authn,
)
self.pysaml2_oq.set(session_id, self.relay_state)
path = self._extract_path_from_info(info)
with self.session_cookie_anon(self.browser) as browser:
resp = browser.get(path)
if resp.status_code != 200:
return LoginResult(url=path, reached_state=LoginState.S0_REDIRECT, response=resp)
form_data = self._extract_form_inputs(resp.data.decode('utf-8'))
del form_data['key'] # test if key is really necessary
form_data['username'] = self.test_user.mail_addresses.primary.email
form_data['password'] = 'Jenka'
if 'redirect_uri' not in form_data:
return LoginResult(url=path, reached_state=LoginState.S1_LOGIN_FORM, response=resp)
cookies = resp.headers.get('Set-Cookie')
if not cookies:
return LoginResult(url=path, reached_state=LoginState.S1_LOGIN_FORM, response=resp)
with self.session_cookie_anon(self.browser) as browser:
resp = browser.post('/verify', data=form_data, headers={'Cookie': cookies})
if resp.status_code != 302:
return LoginResult(url='/verify', reached_state=LoginState.S2_VERIFY, response=resp)
redirect_loc = self._extract_path_from_response(resp)
# check that we were sent back to the login screen
# TODO: verify that we really were logged in
if not redirect_loc.startswith('/sso/redirect?key='):
return LoginResult(url='/verify', reached_state=LoginState.S2_VERIFY, response=resp)
cookies = resp.headers.get('Set-Cookie')
if not cookies:
return LoginResult(url='/verify', reached_state=LoginState.S2_VERIFY, response=resp)
# Save the SSO cookie value
sso_cookie_val = None
_re = f'.*{self.app.conf.sso_cookie.key}=(.+?);.*'
_sso_cookie_re = re.match(_re, cookies)
if _sso_cookie_re:
sso_cookie_val = _sso_cookie_re.groups()[0]
resp = self.browser.get(redirect_loc, headers={'Cookie': cookies})
if resp.status_code != 200:
return LoginResult(
url=redirect_loc,
sso_cookie_val=sso_cookie_val,
reached_state=LoginState.S3_REDIRECT_LOGGED_IN,
response=resp,
)
return LoginResult(
url=redirect_loc, sso_cookie_val=sso_cookie_val, reached_state=LoginState.S5_LOGGED_IN, response=resp
)
def _extract_form_inputs(self, res: str) -> Dict[str, Any]:
inputs = {}
for line in res.split('\n'):
if 'input' in line:
# YOLO
m = re.match('.*<input .* name=[\'"](.+?)[\'"].*value=[\'"](.+?)[\'"]', line)
if m:
name, value = m.groups()
inputs[name] = value.strip('\'"')
return inputs
def _extract_path_from_response(self, response: FlaskResponse) -> str:
return self._extract_path_from_info({'headers': response.headers})
def _extract_path_from_info(self, info: Mapping[str, Any]) -> str:
_location_headers = [_hdr for _hdr in info['headers'] if _hdr[0] == 'Location']
# get first Location URL
loc = _location_headers[0][1]
return self._extract_path_from_url(loc)
def _extract_path_from_url(self, url):
# It is a complete URL, extract the path from it (8 is to skip over slashes in https://)
_idx = url[8:].index('/')
path = url[8 + _idx :]
return path
def parse_saml_authn_response(self, response: FlaskResponse) -> AuthnResponse:
form = self._extract_form_inputs(response.data.decode('utf-8'))
xmlstr = bytes(form['SAMLResponse'], 'ascii')
outstanding_queries = self.pysaml2_oq.outstanding_queries()
return self.saml2_client.parse_authn_request_response(xmlstr, BINDING_HTTP_POST, outstanding_queries)
def get_sso_session(self, sso_cookie_val: str) -> Optional[SSOSession]:
if sso_cookie_val is None:
return None
return self.app.sso_sessions.get_session(b64decode(sso_cookie_val), self.app.userdb)
|
{
"content_hash": "9a620ef1214a59440572c6ba769918a0",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 120,
"avg_line_length": 41.634020618556704,
"alnum_prop": 0.6305558994676241,
"repo_name": "SUNET/eduid-webapp",
"id": "d37869c13e61a883a693027902da3c00e769b306",
"size": "9685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/eduid_webapp/idp/tests/test_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "433"
},
{
"name": "HTML",
"bytes": "46956"
},
{
"name": "Python",
"bytes": "1041956"
},
{
"name": "Shell",
"bytes": "577"
}
],
"symlink_target": ""
}
|
from oslo.serialization import jsonutils
from webob import exc
from nova.api.openstack.compute.plugins.v3 import pci
from nova.api.openstack import wsgi
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.pci import device
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device
fake_compute_node = {
'pci_stats': [{"count": 3,
"vendor_id": "8086",
"product_id": "1520",
"extra_info": {"phys_function": '[["0x0000", "0x04", '
'"0x00", "0x1"]]'}}]}
class FakeResponse(wsgi.ResponseObject):
pass
class PciServerControllerTest(test.NoDBTestCase):
def setUp(self):
super(PciServerControllerTest, self).setUp()
self.controller = pci.PciServerController()
self.fake_obj = {'server': {'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac0',
}}
self.fake_list = {'servers': [{'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac',
}]}
self._create_fake_instance()
self._create_fake_pci_device()
device.claim(self.pci_device, self.inst)
device.allocate(self.pci_device, self.inst)
def _create_fake_instance(self):
self.inst = objects.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = objects.PciDeviceList()
def _create_fake_pci_device(self):
def fake_pci_device_get_by_addr(ctxt, id, addr):
return test_pci_device.fake_db_dev
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_get_by_addr',
fake_pci_device_get_by_addr)
self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_show(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_obj, '')
req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.show(req, resp, '1')
self.assertEqual([{'id': 1}],
resp.obj['server']['os-pci:pci_devices'])
def test_detail(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_list, '')
req = fakes.HTTPRequestV3.blank('/os-pci/detail',
use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.detail(req, resp)
self.assertEqual([{'id': 1}],
resp.obj['servers'][0]['os-pci:pci_devices'])
class PciHypervisorControllerTest(test.NoDBTestCase):
def setUp(self):
super(PciHypervisorControllerTest, self).setUp()
self.controller = pci.PciHypervisorController()
self.fake_objs = dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1")])
self.fake_obj = dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1"))
def test_show(self):
def fake_get_db_compute_node(id):
fake_compute_node['pci_stats'] = jsonutils.dumps(
fake_compute_node['pci_stats'])
return fake_compute_node
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
use_admin_context=True)
resp = FakeResponse(self.fake_obj, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.show(req, resp, '1')
self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
fake_compute_node['pci_stats'] = jsonutils.loads(
fake_compute_node['pci_stats'])
self.assertEqual(fake_compute_node['pci_stats'][0],
resp.obj['hypervisor']['os-pci:pci_stats'][0])
def test_detail(self):
def fake_get_db_compute_node(id):
fake_compute_node['pci_stats'] = jsonutils.dumps(
fake_compute_node['pci_stats'])
return fake_compute_node
req = fakes.HTTPRequestV3.blank('/os-hypervisors/detail',
use_admin_context=True)
resp = FakeResponse(self.fake_objs, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.detail(req, resp)
fake_compute_node['pci_stats'] = jsonutils.loads(
fake_compute_node['pci_stats'])
self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
self.assertEqual(fake_compute_node['pci_stats'][0],
resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
class PciControlletest(test.NoDBTestCase):
def setUp(self):
super(PciControlletest, self).setUp()
self.controller = pci.PciController()
def test_show(self):
def fake_pci_device_get_by_id(context, id):
return test_pci_device.fake_db_dev
self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
result = self.controller.show(req, '1')
dist = {'pci_device': {'address': 'a',
'compute_node_id': 1,
'dev_id': 'i',
'extra_info': {},
'dev_type': 't',
'id': 1,
'server_uuid': None,
'label': 'l',
'product_id': 'p',
'status': 'available',
'vendor_id': 'v'}}
self.assertEqual(dist, result)
def test_show_error_id(self):
def fake_pci_device_get_by_id(context, id):
raise exception.PciDeviceNotFoundById(id=id)
self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
req = fakes.HTTPRequestV3.blank('/os-pci/0', use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
def _fake_compute_node_get_all(self, context):
return [dict(id=1,
service_id=1,
cpu_info='cpu_info',
disk_available_least=100)]
def _fake_pci_device_get_all_by_node(self, context, node):
return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
def test_index(self):
self.stubs.Set(db, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequestV3.blank('/os-pci', use_admin_context=True)
result = self.controller.index(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['status'],
result['pci_devices'][i]['status'])
self.assertEqual(dist['pci_devices'][i]['address'],
result['pci_devices'][i]['address'])
def test_detail(self):
self.stubs.Set(db, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequestV3.blank('/os-pci/detail',
use_admin_context=True)
result = self.controller.detail(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['label'],
result['pci_devices'][i]['label'])
self.assertEqual(dist['pci_devices'][i]['dev_id'],
result['pci_devices'][i]['dev_id'])
|
{
"content_hash": "6f743eeb2376802d9c0731bba7d99361",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 77,
"avg_line_length": 43.276018099547514,
"alnum_prop": 0.5193433709744877,
"repo_name": "luzheqi1987/nova-annotation",
"id": "6ac6269195d738667597ef292b656f7e39c753fa",
"size": "10169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15206909"
},
{
"name": "Shell",
"bytes": "18273"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from contextlib import contextmanager
from django.test.utils import modify_settings
from corsheaders.signals import check_request_enabled
def add_middleware(action, path):
return modify_settings(**{
'MIDDLEWARE': {
action: path,
}
})
def append_middleware(path):
return add_middleware('append', path)
def prepend_middleware(path):
return add_middleware('prepend', path)
@contextmanager
def temporary_check_request_hander(handler):
check_request_enabled.connect(handler)
try:
yield
finally:
check_request_enabled.disconnect(handler)
|
{
"content_hash": "8eda90c639e632504de9a87a640d8be1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 53,
"avg_line_length": 20.4375,
"alnum_prop": 0.6987767584097859,
"repo_name": "kawamon/hue",
"id": "1347cabee362c3ca6af81c706a315f6b9c063dd3",
"size": "654",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/django-cors-headers-2.5.3/tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from django.urls import reverse
from django.test import TestCase
from model_mommy import mommy
from orcamentos.crm.models import Occupation
class OccupationTest(TestCase):
def setUp(self):
self.obj = Occupation.objects.create(occupation='Gerente')
self.obj2 = Occupation.objects.create(occupation='Diretor')
def test_create(self):
self.assertTrue(Occupation.objects.exists())
def test_str(self):
self.assertEqual('Gerente', str(self.obj))
def test_exists(self):
occupations_list = ['Diretor', 'Gerente']
occupations = Occupation.objects.filter(
occupation__in=occupations_list).values_list('occupation', flat=True).order_by('occupation')
self.assertSequenceEqual(occupations, occupations_list)
class OccupationTestCase(TestCase):
def setUp(self):
self.occupations = mommy.make('crm.Occupation', _quantity=10)
def test_context(self):
occupations = Occupation.objects.all()
self.assertEquals(occupations.count(), 10)
|
{
"content_hash": "637e722df965f4eddc61df767cd8226a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 104,
"avg_line_length": 31.545454545454547,
"alnum_prop": 0.6983669548511047,
"repo_name": "rg3915/orcamentos",
"id": "fd6fbbc26bf58e1c9b2c1fc58b149a8fd6915e46",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcamentos/crm/tests/test_model_occupation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50664"
},
{
"name": "HTML",
"bytes": "542962"
},
{
"name": "JavaScript",
"bytes": "133637"
},
{
"name": "Jupyter Notebook",
"bytes": "134102"
},
{
"name": "Makefile",
"bytes": "1730"
},
{
"name": "Python",
"bytes": "197204"
},
{
"name": "Shell",
"bytes": "10278"
}
],
"symlink_target": ""
}
|
"""
Slightly modified variant of the original script.
Author of the original: Aishwarya Agrawal
"""
import sys
dataDir = '/BS/databases/vqa_1.0/VQA'
sys.path.insert(0, '../vqaTools')
from vqa import VQA
from vqaClassNormalizedEval import VQAClassNormalizedEval as VQAEval
import matplotlib.pyplot as plt
import skimage.io as io
import json
import random
import os
if len(sys.argv) != 4:
print 'Usage: python vqaEvaluateModel datasetFold resultType isVisualisation'
print 'E.g.: python vqaEvaluateModel val image_bow False'
sys.exit(1)
datasetFold = sys.argv[1]
resultType = sys.argv[2]
if sys.argv[3] == 'True':
isVisualisation = True
elif sys.argv[3] == 'False':
isVisualisation = False
else:
raise NotImplementedError()
# set up file names and paths
taskType ='OpenEnded'
dataType ='mscoco' # 'mscoco' for real and 'abstract_v002' for abstract
if datasetFold == 'train':
dataSubType ='train2014'
elif datasetFold == 'val':
dataSubType = 'val2014'
else:
raise NotImplementedError()
annFile ='%s/Annotations/%s_%s_annotations.json'%(dataDir, dataType, dataSubType)
quesFile ='%s/Questions/%s_%s_%s_questions.json'%(dataDir, taskType, dataType, dataSubType)
imgDir ='%s/Images/%s/%s/' %(dataDir, dataType, dataSubType)
fileTypes = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']
# An example result json file has been provided in './Results' folder.
[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = \
['../../../local/results/%s.%s.%s.%s.%s.json'%(taskType, dataType, dataSubType, resultType, fileType) for fileType in fileTypes]
# create vqa object and vqaRes object
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(resFile, quesFile)
# create vqaEval object by taking vqa and vqaRes
vqaEval = VQAEval(vqa, vqaRes, n=2) #n is precision of accuracy (number of places after decimal), default is 2
# evaluate results
"""
If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function
By default it uses all the question ids in annotation file
"""
vqaEval.evaluate()
# print accuracies
print "\n"
print "Per Question Type Accuracy is the following:"
for quesType in vqaEval.accuracy['perQuestionType']:
print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType])
print "Overall Accuracy is: %.02f\n" %(vqaEval.accuracy['overall'])
print "\n"
print "Overall per class accuracy is %.02f\n" %(vqaEval.accuracy['classNormalizedOverall'])
print "\n"
print "Per Answer Type Accuracy is the following:"
for ansType in vqaEval.accuracy['perAnswerType']:
print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType])
print "\n"
# demo how to use evalQA to retrieve low score result
if isVisualisation == True:
evals = [quesId for quesId in vqaEval.evalQA if vqaEval.evalQA[quesId]<35] #35 is per question percentage accuracy
if len(evals) > 0:
print 'ground truth answers'
randomEval = random.choice(evals)
randomAnn = vqa.loadQA(randomEval)
vqa.showQA(randomAnn)
print '\n'
print 'generated answer (accuracy %.02f)'%(vqaEval.evalQA[randomEval])
ann = vqaRes.loadQA(randomEval)[0]
print "Answer: %s\n" %(ann['answer'])
imgId = randomAnn[0]['image_id']
imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg'
if os.path.isfile(imgDir + imgFilename):
I = io.imread(imgDir + imgFilename)
plt.imshow(I)
plt.axis('off')
plt.show()
# plot accuracy for various question types
plt.bar(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].values(), align='center')
plt.xticks(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].keys(), rotation='0',fontsize=10)
plt.title('Per Question Type Accuracy', fontsize=10)
plt.xlabel('Question Types', fontsize=10)
plt.ylabel('Accuracy', fontsize=10)
plt.show()
# save evaluation results to ./Results folder
json.dump(vqaEval.accuracy, open(accuracyFile, 'w'))
json.dump(vqaEval.evalQA, open(evalQAFile, 'w'))
json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w'))
json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w'))
|
{
"content_hash": "98ead905aa44cfdc744673a4db4e83eb",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 134,
"avg_line_length": 39.630630630630634,
"alnum_prop": 0.6912934757899523,
"repo_name": "mateuszmalinowski/visual_turing_test-tutorial",
"id": "fa33f135e09db918f2ff1265fd8154eb6a0b8b1d",
"size": "4438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kraino/utils/vqaEvaluation/vqaEvaluateModel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "176636"
},
{
"name": "Python",
"bytes": "201094"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('osbs', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='osbsrecord',
name='component',
field=models.OneToOneField(related_name='osbs', to='component.ReleaseComponent'),
),
]
|
{
"content_hash": "a069fc2dda44310b3f15f091c2c5aba0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 93,
"avg_line_length": 23,
"alnum_prop": 0.6111111111111112,
"repo_name": "xychu/product-definition-center",
"id": "b490c73f7c4207fd0ce5b39e04818d0afc15858d",
"size": "438",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pdc/apps/osbs/migrations/0002_auto_20151001_1115.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1767"
},
{
"name": "HTML",
"bytes": "49433"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "1189218"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
from subprocess import Popen, PIPE, STDOUT
import os, sys
import xmlrpclib
import cPickle
class _Method(object):
def __init__(self, proxy, name):
self.proxy = proxy
self.name = name
def __call__(self, *args):
#print "CALL", self.name, args
z = getattr( self.proxy, self.name, None )
#print "SEND", repr(cPickle.dumps( args ) )
try:
if len(args) == 1:
ret = z( cPickle.dumps( args[0] ) )
else:
ret = z( cPickle.dumps( args ) )
#print "RECEIVE", repr(ret)
return cPickle.loads( ret )
except xmlrpclib.ProtocolError:
return collections.deque()
class MyXMLRPC(object):
def __init__(self, proxy):
self.proxy = proxy
def __getattr__(self, name):
return _Method(self.proxy, name)
class BasicBlock(object):
def __init__(self, ins):
self.ins = ins
def show(self):
for i in self.ins:
print i
class Function(object):
def __init__(self, name, start_ea, instructions, information):
#print name, start_ea
self.name = name
self.start_ea = start_ea
self.information = information
self.basic_blocks = collections.deque()
self.instructions = instructions
r = {}
idx = 0
for i in instructions:
r[ i[0] ] = idx
idx += 1
for i in information[0]:
try:
start = r[i[0]]
end = r[i[1]] + 1
self.basic_blocks.append( BasicBlock( instructions[start:end] ) )
except KeyError:
pass
def get_instructions(self):
return [ i for i in self.instructions ]
def run_ida(idapath, wrapper_init_path, binpath):
os.environ["TVHEADLESS"] = "1"
pid = os.fork()
if pid == 0:
wrapper_path = "-S" + wrapper_init_path
l = [ idapath, "-A", wrapper_path, binpath ]
print l
compile = Popen(l, stdout=open('/dev/null', 'w'), stderr=STDOUT)
stdout, stderr = compile.communicate()
# print stdout, stderr
sys.exit(0)
class IDAPipe(object):
def __init__(self, idapath, binpath, wrapper_init_path):
self.idapath = idapath
self.binpath = binpath
self.proxy = None
run_ida(self.idapath, self.binpath, wrapper_init_path)
while 1:
try:
self.proxy = xmlrpclib.ServerProxy("http://localhost:9000/")
self.proxy.is_connected()
break
except:
pass
#print self.proxy
self.proxy = MyXMLRPC( self.proxy )
def quit(self):
try:
self.proxy.quit()
except:
pass
def _build_functions(self, functions):
F = {}
for i in functions:
F[ i ] = Function( functions[i][0], i, functions[i][1:-1], functions[i][-1] )
return F
def get_quick_functions(self):
functions = self.get_raw()
return self._build_functions( functions )
def get_raw(self):
return self.proxy.get_raw()
def get_nb_functions(self):
return len(self.proxy.Functions())
def get_functions(self):
for function_ea in self.proxy.Functions():
self.get_function_addr( function_ea )
def get_function_name(self, name):
function_ea = self.proxy.get_function( name )
self.get_function_addr( function_ea )
def get_function_addr(self, function_ea):
if function_ea == -1:
return
f_start = function_ea
f_end = self.proxy.GetFunctionAttr(function_ea, 4) #FUNCATTR_END)
edges = set()
boundaries = set((f_start,))
for head in self.proxy.Heads(f_start, f_end):
if self.proxy.isCode( self.proxy.GetFlags( head ) ):
refs = self.proxy.CodeRefsFrom(head, 0)
refs = set(filter(lambda x: x>=f_start and x<=f_end, refs))
#print head, f_end, refs, self.proxy.GetMnem(head), self.proxy.GetOpnd(head, 0), self.proxy.GetOpnd(head, 1)
if refs:
next_head = self.proxy.NextHead(head, f_end)
if self.proxy.isFlow(self.proxy.GetFlags(next_head)):
refs.add(next_head)
# Update the boundaries found so far.
boundaries.update(refs)
# For each of the references found, and edge is
# created.
for r in refs:
# If the flow could also come from the address
# previous to the destination of the branching
# an edge is created.
if self.proxy.isFlow(self.proxy.GetFlags(r)):
edges.add((self.proxy.PrevHead(r, f_start), r))
edges.add((head, r))
#print edges, boundaries
# Let's build the list of (startEA, startEA) couples
# for each basic block
sorted_boundaries = sorted(boundaries, reverse = True)
end_addr = self.proxy.PrevHead(f_end, f_start)
bb_addr = collections.deque()
for begin_addr in sorted_boundaries:
bb_addr.append((begin_addr, end_addr))
# search the next end_addr which could be
# farther than just the previous head
# if data are interlaced in the code
# WARNING: it assumes it won't epicly fail ;)
end_addr = self.proxy.PrevHead(begin_addr, f_start)
while not self.proxy.isCode(self.proxy.GetFlags(end_addr)):
end_addr = self.proxy.PrevHead(end_addr, f_start)
# And finally return the result
bb_addr.reverse()
#print bb_addr, sorted(edges)
def display_function(f):
print f, f.name, f.information
for i in f.basic_blocks:
print i
i.show()
|
{
"content_hash": "b1ec27023015919e3ecb269a8e648b6e",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 124,
"avg_line_length": 30.8659793814433,
"alnum_prop": 0.541750167000668,
"repo_name": "yang-guangliang/android_guard",
"id": "6ad2c24fc47a6a462e28541326586b55cc67f3fb",
"size": "6653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "androguard/core/binaries/idapipe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4315536"
}
],
"symlink_target": ""
}
|
from __future__ import division
from weight.weight_vector import WeightVector
from learner.perceptron_base import PerceptronLearnerBase
from learner import logger
from feature.feature_vector import FeatureVector
__version__ = '1.0.0'
class Learner(PerceptronLearnerBase):
name = "AveragePerceptronLearner"
def __init__(self, w_vector=None):
"""
:param w_vector: A global weight vector instance that stores
the weight value (float)
:return: None
"""
PerceptronLearnerBase.__init__(self, w_vector)
self.total_sent = 0
self.weight_sum_dict = {}
return
def _iteration_learn(self,
data_pool,
init_w_vector,
f_argmax,
log=False,
info=""):
w_vector = WeightVector()
weight_sum_dict = WeightVector()
last_change_dict = WeightVector()
for key in init_w_vector.keys():
w_vector[key] = init_w_vector[key]
sentence_count = 1
while data_pool.has_next_data():
data_instance = data_pool.get_next_data()
if log:
logger.info(info + "Sentence %d of %d, Length %d" % (
sentence_count,
data_pool.get_sent_num(),
len(data_instance.get_word_list()) - 1))
sentence_count += 1
gold_global_vector = FeatureVector(data_instance.gold_global_vector)
current_global_vector = f_argmax(w_vector, data_instance)
delta_global_vector = gold_global_vector - current_global_vector
# update every iteration (more convenient for dump)
if data_pool.has_next_data():
if not current_global_vector == gold_global_vector:
for s in delta_global_vector.keys():
weight_sum_dict[s] += w_vector[s] * (sentence_count - last_change_dict[s])
last_change_dict[s] = sentence_count
w_vector.iadd(delta_global_vector.feature_dict)
weight_sum_dict.iadd(delta_global_vector.feature_dict)
else:
for s in last_change_dict.keys() + w_vector.keys():
weight_sum_dict[s] += w_vector[s] * (sentence_count - last_change_dict[s])
last_change_dict[s] = sentence_count
if not current_global_vector == gold_global_vector:
w_vector.iadd(delta_global_vector.feature_dict)
weight_sum_dict.iadd(delta_global_vector.feature_dict)
data_pool.reset_index()
vector_list = {}
for key in weight_sum_dict.keys():
vector_list[str(key)] = (w_vector[key], weight_sum_dict[key], 1)
vector_list['sent_num'] = (0, 0, data_pool.get_sent_num())
return vector_list.items()
def _iteration_proc(self, vector_list):
w_vector = {}
for (feat, (weight, weight_sum, count)) in vector_list:
if feat == 'sent_num':
self.total_sent += count
else:
w_vector[feat] = float(weight) / float(count)
if feat not in self.weight_sum_dict:
self.weight_sum_dict[feat] = 0
self.weight_sum_dict[feat] += float(weight_sum)
return w_vector
def export(self):
w_vector = WeightVector()
for feat in self.weight_sum_dict:
w_vector[feat] = self.weight_sum_dict[feat] / self.total_sent
return w_vector
|
{
"content_hash": "60777437c2163f0ff0716b00de4fdf6f",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 98,
"avg_line_length": 37.927083333333336,
"alnum_prop": 0.5432573468827245,
"repo_name": "sfu-natlang/glm-parser",
"id": "39b9963290127a2b42fec441a2b008fddefc1c0d",
"size": "3641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/learner/average_perceptron.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32989"
},
{
"name": "Perl",
"bytes": "12897"
},
{
"name": "Python",
"bytes": "2217716"
},
{
"name": "Shell",
"bytes": "26389"
}
],
"symlink_target": ""
}
|
from helpers import get_allowed
import web
class IndexR:
"""
This endpoint is just a quick way to ensure that the vcontrol API is up and
running properly
"""
allow_origin, rest_url = get_allowed.get_allowed()
def GET(self):
""" GET HTTP Request """
web.header("Content-Type","text/plain")
web.header('Access-Control-Allow-Origin', self.allow_origin)
return "vcontrol"
|
{
"content_hash": "58a24f51cfebc20d8b9306d6a8d419e4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 28.4,
"alnum_prop": 0.6455399061032864,
"repo_name": "CyberReboot/vcontrol",
"id": "de3d32b0928eb71577148827bc9af05a0c75332b",
"size": "426",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vcontrol/rest/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4280"
},
{
"name": "Makefile",
"bytes": "4080"
},
{
"name": "Python",
"bytes": "125465"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.html import escape
from django.utils.importlib import import_module
from django.utils.encoding import force_bytes, smart_text
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
modpath = settings.DEFAULT_EXCEPTION_REPORTER_FILTER
modname, classname = modpath.rsplit('.', 1)
try:
mod = import_module(modname)
except ImportError as e:
raise ImproperlyConfigured(
'Error importing default exception reporter filter %s: "%s"' % (modpath, e))
try:
default_exception_reporter_filter = getattr(mod, classname)()
except AttributeError:
raise ImproperlyConfigured('Default exception reporter filter module "%s" does not define a "%s" class' % (modname, classname))
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"Return a Context instance containing traceback information."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data())
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.readlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': module_name.startswith('django.') and 'django' or 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def empty_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(EMPTY_URLCONF_TEMPLATE, name='Empty URLConf template')
c = Context({
'project_name': settings.SETTINGS_MODULE.split('.')[0]
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
EMPTY_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
ul { margin-left: 2em; margin-top: 1em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>Of course, you haven't actually done any work yet. Here's what to do next:</p>
<ul>
<li>If you plan to use a database, edit the <code>DATABASES</code> setting in <code>{{ project_name }}/settings.py</code>.</li>
<li>Start your first app by running <code>python manage.py startapp [appname]</code>.</li>
</ul>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
|
{
"content_hash": "3381131d5741be017a2c347caf5fba6f",
"timestamp": "",
"source": "github",
"line_count": 1112,
"max_line_length": 251,
"avg_line_length": 38.37769784172662,
"alnum_prop": 0.5799746930358984,
"repo_name": "Proggie02/TestRepo",
"id": "aaa7e40efe58e1d453a2e4c137c3436fb3327c8d",
"size": "42676",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/views/debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50602"
},
{
"name": "JavaScript",
"bytes": "94313"
},
{
"name": "Python",
"bytes": "8238602"
},
{
"name": "SQL",
"bytes": "603"
},
{
"name": "Shell",
"bytes": "12148"
}
],
"symlink_target": ""
}
|
""" DICT server """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import os
import sys
import logging
try: # Python 2
import SocketServer as socketserver
except ImportError: # Python 3
import socketserver
log = logging.getLogger(__name__)
HOST = "localhost"
# The strings that indicate the test framework is checking our aliveness
VERIFIED_REQ = b"verifiedserver"
VERIFIED_RSP = "WE ROOLZ: {pid}"
def dictserver(options):
"""
Starts up a TCP server with a DICT handler and serves DICT requests
forever.
"""
if options.pidfile:
pid = os.getpid()
with open(options.pidfile, "w") as f:
f.write("{0}".format(pid))
local_bind = (HOST, options.port)
log.info("[DICT] Listening on %s", local_bind)
# Need to set the allow_reuse on the class, not on the instance.
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(local_bind, DictHandler)
server.serve_forever()
return ScriptRC.SUCCESS
class DictHandler(socketserver.BaseRequestHandler):
"""Handler class for DICT connections.
"""
def handle(self):
"""
Simple function which responds to all queries with a 552.
"""
try:
# First, send a response to allow the server to continue.
rsp = "220 dictserver <xnooptions> <msgid@msgid>\n"
self.request.sendall(rsp.encode("utf-8"))
# Receive the request.
data = self.request.recv(1024).strip()
log.debug("[DICT] Incoming data: %r", data)
if VERIFIED_REQ in data:
log.debug("[DICT] Received verification request from test "
"framework")
response_data = VERIFIED_RSP.format(pid=os.getpid())
else:
log.debug("[DICT] Received normal request")
response_data = "No matches"
# Send back a failure to find.
response = "552 {0}\n".format(response_data)
log.debug("[DICT] Responding with %r", response)
self.request.sendall(response.encode("utf-8"))
except IOError:
log.exception("[DICT] IOError hit during request")
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("--port", action="store", default=9016,
type=int, help="port to listen on")
parser.add_argument("--verbose", action="store", type=int, default=0,
help="verbose output")
parser.add_argument("--pidfile", action="store",
help="file name for the PID")
parser.add_argument("--logfile", action="store",
help="file name for the log")
parser.add_argument("--srcdir", action="store", help="test directory")
parser.add_argument("--id", action="store", help="server ID")
parser.add_argument("--ipv4", action="store_true", default=0,
help="IPv4 flag")
return parser.parse_args()
def setup_logging(options):
"""
Set up logging from the command line options
"""
root_logger = logging.getLogger()
add_stdout = False
formatter = logging.Formatter("%(asctime)s %(levelname)-5.5s %(message)s")
# Write out to a logfile
if options.logfile:
handler = logging.FileHandler(options.logfile, mode="w")
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
root_logger.addHandler(handler)
else:
# The logfile wasn't specified. Add a stdout logger.
add_stdout = True
if options.verbose:
# Add a stdout logger as well in verbose mode
root_logger.setLevel(logging.DEBUG)
add_stdout = True
else:
root_logger.setLevel(logging.INFO)
if add_stdout:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
stdout_handler.setLevel(logging.DEBUG)
root_logger.addHandler(stdout_handler)
class ScriptRC(object):
"""Enum for script return codes"""
SUCCESS = 0
FAILURE = 1
EXCEPTION = 2
class ScriptException(Exception):
pass
if __name__ == '__main__':
# Get the options from the user.
options = get_options()
# Setup logging using the user options
setup_logging(options)
# Run main script.
try:
rc = dictserver(options)
except Exception as e:
log.exception(e)
rc = ScriptRC.EXCEPTION
log.info("[DICT] Returning %d", rc)
sys.exit(rc)
|
{
"content_hash": "9a079e7825ec4b5bd6a5f752004ef73c",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 78,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6117113223854796,
"repo_name": "kimjinyong/i2nsf-framework",
"id": "a41a8a0d67d1cf8375971ad61fb49ba5bf3f86cb",
"size": "4676",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "Hackathon-111/SecurityController/curl-7.63.0/tests/dictserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4396520"
},
{
"name": "C++",
"bytes": "9389"
},
{
"name": "CSS",
"bytes": "51736"
},
{
"name": "Dockerfile",
"bytes": "3839"
},
{
"name": "Emacs Lisp",
"bytes": "24812"
},
{
"name": "Erlang",
"bytes": "1364078"
},
{
"name": "HTML",
"bytes": "42486541"
},
{
"name": "Hack",
"bytes": "6349"
},
{
"name": "Java",
"bytes": "7976"
},
{
"name": "JavaScript",
"bytes": "533000"
},
{
"name": "Makefile",
"bytes": "401170"
},
{
"name": "PHP",
"bytes": "164007"
},
{
"name": "Perl",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "3004949"
},
{
"name": "QMake",
"bytes": "360"
},
{
"name": "Roff",
"bytes": "3906372"
},
{
"name": "Shell",
"bytes": "83872"
},
{
"name": "XSLT",
"bytes": "167018"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by '
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def PathSplitToList(path):
"""Returns the path split into a list by the separator.
Args:
path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
Returns:
A list of path components (e.g. ['a', 'b', 'c]).
"""
lst = []
while True:
(head, tail) = os.path.split(path)
if head == path: # absolute paths end
lst.append(head)
break
if tail == path: # relative paths end
lst.append(tail)
break
path = head
lst.append(tail)
lst.reverse()
return lst
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
def FixupPathFromRoot():
if _root_debug:
sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
%(_root, fileinfo.RepositoryName()))
# Process the file path with the --root flag if it was set.
if not _root:
if _root_debug:
sys.stderr.write("_root unspecified\n")
return file_path_from_root
def StripListPrefix(lst, prefix):
# f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
if lst[:len(prefix)] != prefix:
return None
# f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
return lst[(len(prefix)):]
# root behavior:
# --root=subdir , lstrips subdir from the header guard
maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
PathSplitToList(_root))
if _root_debug:
sys.stderr.write("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
" _root=%s)\n" %(maybe_path, file_path_from_root, _root))
if maybe_path:
return os.path.join(*maybe_path)
# --root=.. , will prepend the outer directory to the header guard
full_path = fileinfo.FullName()
root_abspath = os.path.abspath(_root)
maybe_path = StripListPrefix(PathSplitToList(full_path),
PathSplitToList(root_abspath))
if _root_debug:
sys.stderr.write("_root prepend (maybe_path=%s, full_path=%s, " +
"root_abspath=%s)\n" %(maybe_path, full_path, root_abspath))
if maybe_path:
return os.path.join(*maybe_path)
if _root_debug:
sys.stderr.write("_root ignore, returning %s\n" %(file_path_from_root))
# --root=FAKE_DIR is ignored
return file_path_from_root
file_path_from_root = FixupPathFromRoot()
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a .cc file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
if not os.path.exists(headerfile):
return
headername = FileInfo(headerfile).RepositoryName()
first_include = 0
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername))
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
r'(?:(?:inline|constexpr)\s+)*%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
body = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(body)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif (include.endswith('.cc') and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .cc files from other packages')
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if IsHeaderExtension(file_extension):
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (IsHeaderExtension(file_extension)
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo = FileInfo(filename_cc)
if not fileinfo.IsSource():
return (False, '')
filename_cc = filename_cc[:-len(fileinfo.Extension())]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if IsHeaderExtension(file_extension):
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
if _cpplint_state.quiet:
# Suppress "Ignoring file" warning when using --quiet.
return False
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
elif name == 'root':
global _root
# root directories are specified relative to CPPLINT.cfg dir.
_root = os.path.join(os.path.dirname(cfg_file), val)
elif name == 'headers':
ProcessHppHeadersOption(val)
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
old_errors = _cpplint_state.error_count
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
# Suppress printing anything if --quiet was passed unless the error
# count has increased after processing this file.
if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
sys.stdout.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions=',
'headers=',
'quiet'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
quiet = _Quiet()
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--quiet':
quiet = True
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--headers':
ProcessHppHeadersOption(val)
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetQuiet(quiet)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
# If --quiet is passed, suppress printing error count unless there are errors.
if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
|
{
"content_hash": "b21c13a712e6f8e9e86a27f1d79ce979",
"timestamp": "",
"source": "github",
"line_count": 5017,
"max_line_length": 97,
"avg_line_length": 39.90711580625872,
"alnum_prop": 0.6366138232091662,
"repo_name": "zuyu/incubator-quickstep",
"id": "431c112cccceb4dfbfedc24435c9aea029ff956b",
"size": "239726",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "third_party/src/cpplint/cpplint.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9171115"
},
{
"name": "CMake",
"bytes": "659012"
},
{
"name": "Dockerfile",
"bytes": "1803"
},
{
"name": "Python",
"bytes": "83442"
},
{
"name": "Ruby",
"bytes": "5352"
},
{
"name": "Shell",
"bytes": "9617"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
import warnings
from django.utils.translation import ugettext_lazy as _
import xstatic.main
import xstatic.pkg.angular
import xstatic.pkg.angular_cookies
import xstatic.pkg.angular_mock
import xstatic.pkg.bootstrap_datepicker
import xstatic.pkg.bootstrap_scss
import xstatic.pkg.d3
import xstatic.pkg.font_awesome
import xstatic.pkg.hogan
import xstatic.pkg.jasmine
import xstatic.pkg.jquery
import xstatic.pkg.jquery_migrate
import xstatic.pkg.jquery_quicksearch
import xstatic.pkg.jquery_tablesorter
import xstatic.pkg.jquery_ui
import xstatic.pkg.jsencrypt
import xstatic.pkg.qunit
import xstatic.pkg.rickshaw
import xstatic.pkg.spin
from openstack_horizon import exceptions
warnings.formatwarning = lambda message, category, *args, **kwargs: \
'%s: %s' % (category.__name__, message)
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
BIN_DIR = os.path.abspath(os.path.join(ROOT_PATH, '..', 'bin'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_BRANDING = 'OpenStack Dashboard'
LOGIN_URL = '/auth/login/'
LOGOUT_URL = '/auth/logout/'
# LOGIN_REDIRECT_URL can be used as an alternative for
# HORIZON_CONFIG.user_home, if user_home is not set.
# Do not set it to '/home/', as this will cause circular redirect loop
LOGIN_REDIRECT_URL = '/'
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
STATIC_URL = '/static/'
ROOT_URLCONF = 'openstack_horizon.urls'
HORIZON_CONFIG = {
'user_home': 'openstack_horizon.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
}
# Set to True to allow users to upload images to glance via Horizon server.
# When enabled, a file form field will appear on the create image form.
# See documentation for deployment considerations.
HORIZON_IMAGES_ALLOW_UPLOAD = True
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', _('Select format')),
('aki', _('AKI - Amazon Kernel Image')),
('ami', _('AMI - Amazon Machine Image')),
('ari', _('ARI - Amazon Ramdisk Image')),
('iso', _('ISO - Optical Disk Image')),
('qcow2', _('QCOW2 - QEMU Emulator')),
('raw', _('Raw')),
('vdi', _('VDI')),
('vhd', _('VHD')),
('vmdk', _('VMDK'))
]
}
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'horizon_lib.middleware.HorizonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'horizon.context_processors.horizon',
'openstack_horizon.context_processors.openstack',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon_lib.loaders.TemplateLoader'
)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = [
('horizon_lib/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular).base_dir),
('horizon_lib/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_cookies).base_dir),
('horizon_lib/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_mock).base_dir),
('horizon_lib/lib/bootstrap_datepicker',
xstatic.main.XStatic(xstatic.pkg.bootstrap_datepicker).base_dir),
('bootstrap',
xstatic.main.XStatic(xstatic.pkg.bootstrap_scss).base_dir),
('horizon_lib/lib',
xstatic.main.XStatic(xstatic.pkg.d3).base_dir),
('horizon_lib/lib',
xstatic.main.XStatic(xstatic.pkg.hogan).base_dir),
('horizon_lib/lib/font-awesome',
xstatic.main.XStatic(xstatic.pkg.font_awesome).base_dir),
('horizon_lib/lib/jasmine-1.3.1',
xstatic.main.XStatic(xstatic.pkg.jasmine).base_dir),
('horizon_lib/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery).base_dir),
('horizon_lib/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_migrate).base_dir),
('horizon_lib/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_quicksearch).base_dir),
('horizon_lib/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_tablesorter).base_dir),
('horizon_lib/lib/jsencrypt',
xstatic.main.XStatic(xstatic.pkg.jsencrypt).base_dir),
('horizon_lib/lib/qunit',
xstatic.main.XStatic(xstatic.pkg.qunit).base_dir),
('horizon_lib/lib',
xstatic.main.XStatic(xstatic.pkg.rickshaw).base_dir),
('horizon_lib/lib',
xstatic.main.XStatic(xstatic.pkg.spin).base_dir),
]
if xstatic.main.XStatic(xstatic.pkg.jquery_ui).version.startswith('1.10.'):
# The 1.10.x versions already contain the 'ui' directory.
STATICFILES_DIRS.append(
('horizon_lib/lib/jquery-ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui).base_dir))
else:
# Newer versions dropped the directory, add it to keep the path the same.
STATICFILES_DIRS.append(
('horizon_lib/lib/jquery-ui/ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui).base_dir))
COMPRESS_PRECOMPILERS = (
('text/scss', 'django_pyscss.compressor.DjangoScssFilter'),
)
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
)
COMPRESS_ENABLED = True
COMPRESS_OUTPUT_DIR = 'dashboard'
COMPRESS_CSS_HASHING_METHOD = 'hash'
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
INSTALLED_APPS = [
'openstack_horizon',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django_pyscss',
'openstack_horizon.django_pyscss_fix',
'compressor',
'horizon_lib',
'openstack_auth',
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end af validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 10
# When using cookie-based sessions, log error when the session cookie exceeds
# the following size (common browsers drop cookies above a certain size):
SESSION_COOKIE_MAX_SIZE = 4093
# when doing upgrades, it may be wise to stick to PickleSerializer
# NOTE(berendt): Check during the K-cycle if this variable can be removed.
# https://bugs.launchpad.net/horizon/+bug/1349463
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
LANGUAGES = (
('de', 'German'),
('en', 'English'),
('en-au', 'Australian English'),
('en-gb', 'British English'),
('es', 'Spanish'),
('fr', 'French'),
('hi', 'Hindi'),
('ja', 'Japanese'),
('ko', 'Korean (Korea)'),
('nl', 'Dutch (Netherlands)'),
('pl', 'Polish'),
('pt-br', 'Portuguese (Brazil)'),
('sr', 'Serbian'),
('zh-cn', 'Simplified Chinese'),
('zh-tw', 'Chinese (Taiwan)'),
)
LANGUAGE_CODE = 'en'
LANGUAGE_COOKIE_NAME = 'horizon_language'
USE_I18N = True
USE_L10N = True
USE_TZ = True
OPENSTACK_KEYSTONE_DEFAULT_ROLE = '_member_'
DEFAULT_EXCEPTION_REPORTER_FILTER = """horizon_lib.exceptions."
"HorizonReporterFilter"""
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json',
'volume': 'cinder_policy.json',
'image': 'glance_policy.json',
'orchestration': 'heat_policy.json',
'network': 'neutron_policy.json',
}
SECRET_KEY = None
LOCAL_PATH = None
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
}
try:
from local.local_settings import * # noqa
except ImportError:
logging.warning("No local_settings file found.")
# Load the pluggable dashboard settings
import openstack_horizon.enabled
import openstack_horizon.local.enabled
from openstack_horizon.utils import settings
INSTALLED_APPS = list(INSTALLED_APPS) # Make sure it's mutable
settings.update_dashboards([
openstack_horizon.enabled,
openstack_horizon.local.enabled,
], HORIZON_CONFIG, INSTALLED_APPS)
# Ensure that we always have a SECRET_KEY set, even when no local_settings.py
# file is present. See local_settings.py.example for full documentation on the
# horizon_lib.utils.secret_key module and its use.
if not SECRET_KEY:
if not LOCAL_PATH:
LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local')
from horizon_lib.utils import secret_key
SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH,
'.secret_key_store'))
from openstack_horizon import policy
POLICY_CHECK_FUNCTION = policy.check
# Add HORIZON_CONFIG to the context information for offline compression
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': STATIC_URL,
'HORIZON_CONFIG': HORIZON_CONFIG
}
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
# during django reloads and an active user is logged in, the monkey
# patch below will not otherwise be applied in time - resulting in developers
# appearing to be logged out. In typical production deployments this section
# below may be omitted, though it should not be harmful
from openstack_auth import utils as auth_utils
auth_utils.patch_middleware_get_user()
|
{
"content_hash": "b2801c301d72da9e50b1e11a10c25699",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 79,
"avg_line_length": 33.571014492753626,
"alnum_prop": 0.6831289932654119,
"repo_name": "mrunge/openstack_horizon",
"id": "fb273b52e09936b1d532512008dec5da19f0a8f0",
"size": "12346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_horizon/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63809"
},
{
"name": "JavaScript",
"bytes": "40"
},
{
"name": "Python",
"bytes": "3460539"
},
{
"name": "Shell",
"bytes": "16000"
}
],
"symlink_target": ""
}
|
__author__ = 'dengzhihong'
from numpy import *
import numpy as np
from sklearn.decomposition import *
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
import pylab
import matplotlib.cm as cm
import matplotlib.pyplot as plt
def randomTrail(vectors, labels):
train = open("./Output/random_train.txt","w")
test = open("./Output/random_test.txt","w")
TrainIndice = []
TestIndice = []
for i in range(2000):
rand = random.randint(0, 3999)
while(TrainIndice.count(rand) != 0):
rand = random.randint(0, 3999)
TrainIndice.append(rand)
TrainIndice.sort()
for i in range(4000):
if(TrainIndice.count(i) == 0):
TestIndice.append(i)
for i in range(2000):
train.write(str(TrainIndice[i]) + " " + str(TrainIndice[i]) + "\n")
test.write(str(TestIndice[i]) + " " + str(TestIndice[i]) + "\n")
train.close()
test.close()
return 0
def toFloatList(stringlist):
floatlist = []
for i in range(0, len(stringlist)):
floatlist.append(float(stringlist[i]))
return floatlist
def toStrList(floatlist):
strlist = []
for i in range(len(floatlist)):
strlist.append(str(floatlist[i]))
return strlist
def prepareData(Data, Label, index):
#process the data
Num = index.shape[0]
k = Data.shape[1]
OutData = zeros((Num, k))
OutLabel = zeros(Num)
for i in range(Num):
DataIndex = index[i][0]
LabelIndex = index[i][1]
OutData[i] = Data[DataIndex]
OutLabel[i] = Label[LabelIndex]
return OutData, OutLabel
def prepareSvmClassifier(TrainData, TrainLabel, N, Kernel, c=1.0, Gamma=0.0):
ClfSet = []
Num = TrainData.shape[0]
for i in range(N):
#print i , "++++++++++++"
TempLabel = TrainLabel.copy()
# Generate Label, once only a number's label will be +1 others will be -1
for j in range(Num):
if(TrainLabel[j] == i):
TempLabel[j] = 1
else:
TempLabel[j] = -1
#outputLabelList(TempLabel, "TrainLabel" + str(i), "Train Label For Classifier" + str(i))
if(Kernel == 'linear'):
clf = SVC(kernel='linear', C=c)
elif(Kernel == 'poly'):
clf = SVC(kernel='poly', C=c, gamma=Gamma)
elif(Kernel == 'rbf'):
clf = SVC(kernel='rbf', C=c, gamma=Gamma)
#print '----------------------------------'
#print "Fit classifier " , i
clf.fit(TrainData, TempLabel)
#print clf.support_vectors_.shape
#print '----------------------------------'
ClfSet.append(clf)
return ClfSet
def prepareLrClassifier(TrainData, TrainLabel, N):
ClfSet = []
Num = TrainData.shape[0]
for i in range(N):
#print i , "++++++++++++"
TempLabel = TrainLabel.copy()
# Generate Label, once only a number's label will be +1 others will be -1
for j in range(Num):
if(TrainLabel[j] == i):
TempLabel[j] = 1
else:
TempLabel[j] = -1
#outputLabelList(TempLabel, "TrainLabel" + str(i), "Train Label For Classifier" + str(i))
clf = LogisticRegression()
#print '----------------------------------'
#print "Fit classifier " , i
clf.fit(TrainData, TempLabel)
#print clf.support_vectors_.shape
#print '----------------------------------'
ClfSet.append(clf)
return ClfSet
def processLabel(Label, target):
ResultLabel =Label.copy()
Num = Label.shape[0]
# Generate Label, once only a number's label will be +1 others will be -1
for j in range(Num):
if(ResultLabel[j] == target):
ResultLabel[j] = 1
else:
ResultLabel[j] = -1
return ResultLabel
def showDigit(digit, title = "Digit"):
fig = pylab.figure()
pylab.title(title)
fig.add_subplot(1,1,1)
pylab.imshow(digit.reshape(28, 28).T, cmap = cm.Greys_r)
pylab.show()
def outputTrainingData(TrainData, TrainLabel, PCA_K):
output = open("./Output/Trial2_" + str(PCA_K) + ".txt", "w")
N = TrainData.shape[0]
D = TrainData.shape[1]
for i in range(N):
output.write(str(int(TrainLabel[i])) + " ")
for j in range(D):
output.write(str(j) + ":" + str(TrainData[i][j]) + " ")
output.write("\n")
output.close()
def normalization(data):
return data/255.0 * 2 - 1
def testWithSVM(labels, vectors, testset, trainset, Kernel='linear', C=1.0, gamma=0.0, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000, 784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(RetainRate) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
#outputTrainingData(TrainData, TrainLabel, PCA_K)
print 'SVM with ', Kernel, ' Kernel'
if(Kernel == 'linear'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, Kernel, C)
elif(Kernel == 'poly'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, Kernel, C, Gamma=gamma)
elif(Kernel == 'rbf'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, Kernel, C, Gamma=gamma)
else:
ClfSet = []
print "Please Choose a kernel"
exit()
N = test.shape[0]
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
for i in range(N):
confidence = -999
classification = -1
for j in range(10):
temp = ClfSet[j].decision_function(TestData[i])
if(confidence < temp):
confidence = temp
classification = j
if(classification == TestLabel[i]):
CorrectNum[classification] += 1
correct += 1
Correctness = correct/N * 100
print "Accuracy: ", Correctness, "%"
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/200"
return RetainRate, Correctness, CorrectNum
def testWithLR(labels, vectors, testset, trainset, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(sum * 100) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
print 'Logistic Regression With Dimension Reduced to ', PCA_K
clf = LogisticRegression()
clf.fit(TrainData, TrainLabel)
result = clf.predict(TestData)
N = TestLabel.shape[0]
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
for i in range(N):
#print result[i], " - ", TestLabel[i]
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
print "Accuracy: ", Correctness
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/200"
return RetainRate, Correctness, CorrectNum
def testWithKNN(labels, vectors, testset, trainset, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(sum * 100) +"% of the variance"
print '1NN With Dimension Reduced to ', PCA_K
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(TrainData, TrainLabel)
result = knn.predict(TestData)
N = TestLabel.shape[0]
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
for i in range(N):
#print result[i], " - ", TestLabel[i]
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
print "Accuracy: ", Correctness
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/200"
return RetainRate, Correctness, CorrectNum
from test import *
from numpy import *
def outputDataForCrossValidation(labels, vectors, testset, trainset, PCA_K=0):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
print "retain " + str(RetainRate) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
outputTrainingData(TrainData, TrainLabel, PCA_K)
def accuracyAndDimension(labels, vectors, testset, trainset):
TrialResult = zeros((5,1,6))
# ------------------------------------------------------------------------------------------------
# Trail 1
# ------------------------------------------------------------------------------------------------
print "Trial 1"
TestC_1 = [2, 8, 2, 2, 2, 2]
TestGamma_1 = [0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
TestK = [20, 50, 84, 150, 300, 400]
#drawCombinedDiagram(labels, vectors, testset, trainset, 'Trial 1', TestK, TestC, TestGamma)
Result_1 = getTrialAccuracyResult(labels, vectors, testset, trainset, TestK, TestC_1, TestGamma_1)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_1
print TrialResult
# ------------------------------------------------------------------------------------------------
# Trail 2
print "Trial 2"
TestC_2 = [2, 8, 8, 2, 2, 2]
TestGamma_2 = [0.03125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
#drawCombinedDiagram(labels, vectors, trainset, testset, 'Trial 2', TestK, TestC, TestGamma)
Result_2 = getTrialAccuracyResult(labels, vectors, trainset, testset, TestK, TestC_2, TestGamma_2)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_2
print TrialResult
# ------------------------------------------------------------------------------------------------
#Random Trial
'''
print "Trial 3"
#testWithSVM(labels, vectors, random_testset, random_trainset, Kernel='linear', PCA_K=20)
TestC_3 = [2, 8, 8, 2, 2, 2]
TestGamma_3 = [0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
#drawCombinedDiagram(labels, vectors, random_testset, random_trainset, 'Random Trial', TestK, TestC, TestGamma)
Result_3 = getTrialResult(labels, vectors, random_testset, random_trainset, TestK, TestC_3, TestGamma_3)
TrialResult += Result_3
'''
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# ------------------------------------------------------------------------------------------------
TrialResult = TrialResult/2.0
print TrialResult
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
C_SVM_LINEAR = TrialResult[0][0].tolist()
C_SVM_POLY = TrialResult[1][0].tolist()
C_SVM_RBF = TrialResult[2][0].tolist()
C_LR = TrialResult[3][0].tolist()
C_1NN = TrialResult[4][0].tolist()
plt.figure(1)
plt.title("Average Accuracy for all trials")
plt.xlabel("dimension")
plt.ylabel("accuracy(%)")
plt.axis([0,450,83,97])
plt.grid(True)
plt.plot(TestK, C_SVM_LINEAR, 'ro--')
plt.plot(TestK, C_SVM_POLY, 'b<--')
plt.plot(TestK, C_SVM_RBF, 'gs--')
plt.plot(TestK, C_LR, 'yd--')
plt.plot(TestK, C_1NN, 'co--')
P = 4
plt.annotate("svm linear", xy=(TestK[P], C_SVM_LINEAR[P]),xytext=(TestK[P]-18, C_SVM_LINEAR[P]+0.2))
plt.annotate("svm poly", xy=(TestK[P], C_SVM_POLY[P]),xytext=(TestK[P]-18, C_SVM_POLY[P]+0.5))
plt.annotate("svm rbf", xy=(TestK[P], C_SVM_RBF[P]),xytext=(TestK[P]-18, C_SVM_RBF[P]+0.2))
plt.annotate("lr", xy=(TestK[P], C_LR[P]),xytext=(TestK[P]-18, C_LR[P]+0.2))
plt.annotate("1nn", xy=(TestK[P], C_1NN[P]),xytext=(TestK[P]-18, C_1NN[P]+0.2))
plt.show()
def recognitionNumberAndDigit(labels, vectors, testset, trainset):
TrialResult = zeros((5,1,10))
Digit = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# ------------------------------------------------------------------------------------------------
# Trail 1
# ------------------------------------------------------------------------------------------------
print "Trial 1"
TestC_1 = [2, 8, 2, 2, 2, 2]
TestGamma_1 = [0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
TestK = [20, 50, 84, 150, 300, 400]
#drawCombinedDiagram(labels, vectors, testset, trainset, 'Trial 1', TestK, TestC, TestGamma)
Result_1 = getTrialDigitResult(labels, vectors, testset, trainset, TestK, TestC_1, TestGamma_1)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_1
print TrialResult
# ------------------------------------------------------------------------------------------------
# Trail 2
print "Trial 2"
TestC_2 = [2, 8, 8, 2, 2, 2]
TestGamma_2 = [0.03125, 0.0078125, 0.0078125, 0.0078125, 0.0078125, 0.0078125]
#drawCombinedDiagram(labels, vectors, trainset, testset, 'Trial 2', TestK, TestC, TestGamma)
Result_2 = getTrialDigitResult(labels, vectors, trainset, testset, TestK, TestC_2, TestGamma_2)
#print TrialResult
#print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
TrialResult += Result_2
print TrialResult
# ------------------------------------------------------------------------------------------------
TrialResult = TrialResult/2.0
TrialResult = TrialResult.round(2)
print TrialResult
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
C_SVM_LINEAR = TrialResult[0][0].tolist()
C_SVM_POLY = TrialResult[1][0].tolist()
C_SVM_RBF = TrialResult[2][0].tolist()
C_LR = TrialResult[3][0].tolist()
C_1NN = TrialResult[4][0].tolist()
plt.figure(1)
plt.title("Average Recognition Number for all digits")
plt.xlabel("digit")
plt.ylabel("num of correctly recognition")
plt.axis([-1,10,140,205])
plt.grid(True)
plt.plot(Digit, C_SVM_LINEAR, 'ro--')
plt.plot(Digit, C_SVM_POLY, 'b<--')
plt.plot(Digit, C_SVM_RBF, 'gs--')
plt.plot(Digit, C_LR, 'yd--')
plt.plot(Digit, C_1NN, 'co--')
P = 9
plt.annotate("svm linear", xy=(Digit[P], C_SVM_LINEAR[P]),xytext=(Digit[P]+0.1, C_SVM_LINEAR[P]))
plt.annotate("svm poly", xy=(Digit[P], C_SVM_POLY[P]),xytext=(Digit[P]+0.1, C_SVM_POLY[P]))
plt.annotate("svm rbf", xy=(Digit[P], C_SVM_RBF[P]),xytext=(Digit[P]+0.1, C_SVM_RBF[P]))
plt.annotate("lr", xy=(Digit[P], C_LR[P]),xytext=(Digit[P]+0.1, C_LR[P]))
plt.annotate("1nn", xy=(Digit[P], C_1NN[P]),xytext=(Digit[P]+0.1, C_1NN[P]))
plt.show()
def getTestResult(vectors, labels, testset, trainset, PCA_K=0, C=1, GAMMA=0.0, method=''):
Label = array(toFloatList(labels))
OriginData = array(toFloatList(vectors)).reshape(4000,784)
TestNum = len(testset)
test = array(toFloatList(testset)).reshape(TestNum/2,2) - 1
TrainNum = len(trainset)
train = array(toFloatList(trainset)).reshape(TrainNum/2,2) - 1
OriginData = normalization(OriginData)
Data = OriginData
RetainRate = 100
if(PCA_K != 0):
k = PCA_K
pca = PCA(n_components=k)
Data = pca.fit_transform(OriginData)
sum = 0
for i in range(k):
sum += pca.explained_variance_ratio_[i]
RetainRate = sum * 100
#print "retain " + str(RetainRate) +"% of the variance"
TrainData, TrainLabel = prepareData(Data, Label, train)
TestData, TestLabel = prepareData(Data, Label, test)
#outputTrainingData(TrainData, TrainLabel, PCA_K)
clf = 0
ClfSet = []
#print 'Method: ', method
if(method == 'svm_linear'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'linear')
elif(method == 'svm_poly'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'poly')
elif(method == 'svm_rbf'):
ClfSet = prepareSvmClassifier(TrainData, TrainLabel, 10, 'rbf', c=C, Gamma=GAMMA)
elif(method == 'lr'):
clf = LogisticRegression()
clf.fit(TrainData, TrainLabel)
elif(method == '1nn'):
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(TrainData, TrainLabel)
else:
print "Please Choose a kernel"
exit()
N = TestData.shape[0]
#print "Totoal ", N, " test data"
correct = 0.0
CorrectNum = []
for i in range(10):
CorrectNum.append(0)
if(method[0:3] == 'svm'):
for i in range(N):
confidence = -999
classification = -1
for j in range(10):
temp = ClfSet[j].decision_function(TestData[i])
if(confidence < temp):
confidence = temp
classification = j
if(classification == TestLabel[i]):
CorrectNum[classification] += 1
correct += 1
else:
result = clf.predict(TestData)
for i in range(N):
if(result[i] == TestLabel[i]):
CorrectNum[int(result[i])] += 1
correct += 1
Correctness = correct/N * 100
'''
print "Accuracy: ", Correctness, "%"
for i in range(10):
print "digit ", i , ": ", CorrectNum[i], "/10"
'''
return Correctness
def getTrialAccuracyResult(labels, vectors, testset, trainset, TestK, TestC, TestGamma):
N = len(TestK)
C_SVM_LINEAR = []
C_SVM_POLY = []
C_SVM_RBF = []
C_LR = []
C_1NN = []
AccuracyResult = zeros((5,1,N))
for i in range(N):
print "Test ", i
print "--------------------------------------------------------------------------------------------------"
print "K = ", TestK[i]
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='linear', PCA_K=TestK[i])
C_SVM_LINEAR.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='poly', PCA_K=TestK[i])
C_SVM_POLY.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='rbf', C=TestC[i], gamma=TestGamma[i], PCA_K=TestK[i])
C_SVM_RBF.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithLR(labels, vectors, testset, trainset, PCA_K=TestK[i])
C_LR.append(Correct)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithKNN(labels, vectors, testset, trainset, PCA_K=TestK[i])
C_1NN.append(Correct)
print "--------------------------------------------------------------------------------------------------"
AccuracyResult[0][0] = array(C_SVM_LINEAR)
AccuracyResult[1][0] = array(C_SVM_POLY)
AccuracyResult[2][0] = array(C_SVM_RBF)
AccuracyResult[3][0] = array(C_LR)
AccuracyResult[4][0] = array(C_1NN)
#print "Return:"
#print AccuracyResult
#print "\n\n"
return AccuracyResult
def getTrialDigitResult(labels, vectors, testset, trainset, TestK, TestC, TestGamma):
N = len(TestK)
Num_linear = zeros((1, 10))
Num_poly = zeros((1, 10))
Num_rbf = zeros((1, 10))
Num_lr = zeros((1, 10))
Num_1nn = zeros((1, 10))
RecognitionNum = zeros((5,1,10))
for i in range(len(TestK)):
print "Test ", i
print "--------------------------------------------------------------------------------------------------"
print "K = ", TestK[i]
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='linear', PCA_K=TestK[i])
Num_linear += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='poly', PCA_K=TestK[i])
Num_poly += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithSVM(labels, vectors, testset, trainset, Kernel='rbf', C=TestC[i], gamma=TestGamma[i], PCA_K=TestK[i])
Num_rbf += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithLR(labels, vectors, testset, trainset, PCA_K=TestK[i])
Num_lr += array(C_NUM)
print "--------------------------------------------------------------------------------------------------"
Rate, Correct, C_NUM = testWithKNN(labels, vectors, testset, trainset, PCA_K=TestK[i])
Num_1nn += array(C_NUM)
RecognitionNum[0][0] = array(Num_linear)
RecognitionNum[1][0] = array(Num_poly)
RecognitionNum[2][0] = array(Num_rbf)
RecognitionNum[3][0] = array(Num_lr)
RecognitionNum[4][0] = array(Num_1nn)
return RecognitionNum/N
|
{
"content_hash": "505cffe852ae687bd865c4dd3fbc0d5a",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 140,
"avg_line_length": 39.308970099667775,
"alnum_prop": 0.5346940500338067,
"repo_name": "dzh123xt/DigitRecognition",
"id": "5b269f437faaba3f3caf9b5db8b5cd41bbbb01f2",
"size": "23664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Methods/TestMethods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37736"
}
],
"symlink_target": ""
}
|
from ee.utils import test
from ee.cli.main import get_test_app
class CliTestCaseSite(test.EETestCase):
def test_ee_cli(self):
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_site_enable(self):
self.app = get_test_app(argv=['site', 'enable', 'example2.com'])
self.app.setup()
self.app.run()
self.app.close()
|
{
"content_hash": "0acad812d09285a1693e0a45e72c5aef",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 24.5,
"alnum_prop": 0.6020408163265306,
"repo_name": "Jurisdesk/freedoms",
"id": "87cc4f856d119103579dbc157cc7184187f0dc49",
"size": "392",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tests/cli/9_test_site_enable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "62065"
},
{
"name": "Python",
"bytes": "450536"
},
{
"name": "Shell",
"bytes": "24107"
}
],
"symlink_target": ""
}
|
import base64
from django.http import HttpResponse
from django.middleware.common import CommonMiddleware
from django.conf import settings
import os
class AuthMiddleware(CommonMiddleware):
"""
Add this to middleware:
'utils.basic_auth_middleware.AuthMiddleware',
Add these settings:
USE_BASIC_AUTH = True # This setting is optionally settable as an env var, env var will override whatever is set in settings
BASIC_AUTH_USER = 'user'
BASIC_AUTH_PASS = 'password'
"""
def process_request(self, request):
if (getattr(settings, 'USE_BASIC_AUTH', False) or os.environ.get('USE_BASIC_AUTH', 'False')=='True') and not os.environ.get('USE_BASIC_AUTH', None)=='False':
if request.META.get('HTTP_AUTHORIZATION', False):
authtype, auth = request.META['HTTP_AUTHORIZATION'].split(' ')
auth = base64.b64decode(auth)
username, password = auth.split(':')
if (username == getattr(settings, 'BASIC_AUTH_USER', None)
and password == getattr(settings, 'BASIC_AUTH_PASS', None)):
return
r = HttpResponse("Auth Required", status = 401)
r['WWW-Authenticate'] = 'Basic realm="bat"'
return r
|
{
"content_hash": "977ccd364fe0def4b2b6793237481c21",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 165,
"avg_line_length": 38.8125,
"alnum_prop": 0.644122383252818,
"repo_name": "blitzagency/django-chatterbox",
"id": "21f98cf67197924a9dab7426af42ec2f6bd2ad1e",
"size": "1242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/devbot/project/apps/utils/basic_auth_middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1745"
},
{
"name": "HTML",
"bytes": "10759"
},
{
"name": "JavaScript",
"bytes": "38264"
},
{
"name": "Makefile",
"bytes": "1540"
},
{
"name": "Python",
"bytes": "209440"
},
{
"name": "Ruby",
"bytes": "3342"
},
{
"name": "SaltStack",
"bytes": "2743"
},
{
"name": "Scheme",
"bytes": "615"
}
],
"symlink_target": ""
}
|
"""
Techies' landmines
:copyright: (c) 2014 Runzhou Li (Leo)
:license: The MIT License (MIT), see LICENSE for details.
"""
from __future__ import unicode_literals
from techies.compat import (
unicode, nativestr, unicode_data
)
import time
import redis
try:
import simplejson as json
except:
import json
class RedisBase(object):
def __init__(self, key, host='localhost', port=6379, db=0, **kwargs):
pool = redis.ConnectionPool(host=host, port=port, db=db)
self.conn = redis.StrictRedis(connection_pool=pool)
self.key = key
self.initialize(**kwargs)
def initialize(self, **kwargs):
pass
def clear(self, **kwargs):
self.conn.delete(self.key)
self.initialize(**kwargs)
class RedisHashBase(RedisBase):
def __str__(self):
return json.dumps(
unicode_data(self.json()), ensure_ascii=False
)
def __unicode__(self):
return self.__str__()
def json(self):
return unicode_data(self.conn.hgetall(self.key))
class MultiCounter(RedisHashBase):
'''
A stateless multi-event counter, based on Redis Hash
Hash fields:
event_1: positive int value
event_2: positive int value
...
event_N: positive int value
'''
def get_count(self, field):
return int(self.conn.hget(self.key, field) or 0)
def incr(self, field):
self.conn.hincrby(self.key, field, 1)
class TsCounter(RedisHashBase):
'''
A stateless multi-key, single-event timestamp counter, based on Redis
Hash
Similar to MultiCounter, but instead of using only one key, it
bundles timestamps of the same chunk. Therefore conceptually the
user passes in a namespace instead of a key in constructor. In
initialize(), user can define chunk size in order to group
timestamps under different redis keys with a format of
<namespace>:<chunk>; user can also pass in a TTL for these keys to
make the mechanism overall memory efficient.
The chunk is calculated as <timestamp> - <timestamp> % <chunk_size>
Hash fields:
timestamp_1: positive int value
timestamp_2: positive int value
...
timestamp_N: positive int value
'''
def initialize(self, **kwargs):
# default chunk_size is 86400 seconds (1 day)
self.chunk_size = kwargs.get('chunk_size', 86400)
# default ttl is chunk_size * 2
self.ttl = kwargs.get('ttl', self.chunk_size * 2)
def get_count(self, timestamp=None):
if not timestamp:
timestamp = time.time()
timestamp = int(timestamp)
key = '{0}:{1}'.format(
self.key, timestamp - timestamp % self.chunk_size
)
return int(self.conn.hget(key, timestamp) or 0)
def incr(self, timestamp=None):
if not timestamp:
timestamp = time.time()
timestamp = int(timestamp)
chunk = timestamp - timestamp % self.chunk_size
key = '{0}:{1}'.format(self.key, chunk)
self.conn.hincrby(key, timestamp, 1)
self.conn.expireat(key, chunk + self.ttl)
def _chunks(self):
return self.conn.keys(self.key + ':*')
def clear(self):
chunks = self._chunks()
if len(chunks) > 0:
self.conn.delete(*chunks)
def json(self):
r = {}
for chunk in self._chunks():
r[chunk] = self.conn.hgetall(chunk)
return unicode_data(r)
class StateCounter(RedisHashBase):
'''
A single event state counter, based on Redis Hash
Hash fields:
state: 1 or 0 (on or off, respectively)
count: positive int value (current count)
total: positive int value (total count since init)
'''
def initialize(self, **kwargs):
if not self.conn.exists(self.key):
self.start()
self.conn.hset(self.key, 'total', kwargs.get('total', 0))
def clear(self):
self.conn.delete(self.key)
def get_state(self):
return int(self.conn.hget(self.key, 'state') or 0)
def get_count(self):
return int(self.conn.hget(self.key, 'count') or 0)
def get_total(self):
return int(self.conn.hget(self.key, 'total') or 0)
def start(self):
if self.stopped:
self._count2total()
self.conn.hset(self.key, 'state', 1)
def stop(self):
self.conn.hset(self.key, 'state', 0)
self._count2total()
def _count2total(self):
self.conn.hincrby(self.key, 'total', self.get_count())
self.conn.hset(self.key, 'count', 0)
def incr(self):
if self.stopped:
self.start()
self.conn.hincrby(self.key, 'count', 1)
@property
def started(self):
return bool(self.get_state())
@property
def stopped(self):
return not self.started
def get_all(self):
''' deprecated, use json() '''
return self.json()
class Queue(RedisBase):
'''
Queue, based on Redis List
Interfaces are almost standard queue compatible
'''
def qsize(self):
return self.conn.llen(self.key)
def empty(self):
return self.qsize() == 0
def full(self):
return False # need a better mechanism since it's controlled by Redis
def task_done(self):
pass
def join(self):
pass
def __len__(self):
return self.qsize()
def put(self, var, block=True, timeout=None):
self.conn.rpush(self.key, var)
def put_nowait(self, var):
self.put(var, block=False)
def get(self, block=True, timeout=None):
return unicode(nativestr(self.conn.lpop(self.key) or ''))
def get_nowait(self):
return self.get(block=False)
class UniQueue(Queue):
'''
Unique Queue, based on Redis Sorted Set
Inherits Queue but ignores repetitive items, keeps items unique. Score of
the sorted set member is epoch timestamp from time.time()
'''
def qsize(self):
return int(self.conn.zcard(self.key))
def put(self, var, block=True, timeout=None):
if not self.conn.zscore(self.key, var):
self.conn.zadd(self.key, time.time(), var)
def get(self, block=True, timeout=None):
if self.empty():
return unicode()
ret = self.conn.zrange(self.key, 0, 0)[0]
# Pop it out
self.conn.zrem(self.key, ret)
return unicode(nativestr(ret))
class CountQueue(UniQueue):
'''
Count Queue, based on Redis Sorted Set
Inherits UniQueue but score is used as a count of item appearance, that
the item has the highest count gets placed in front to be get() first
'''
def put(self, var, block=True, timeout=None):
self.conn.zincrby(self.key, var, 1)
def get(self, block=True, timeout=None):
if self.empty():
return ()
ret = self.conn.zrevrange(
self.key, 0, 0, withscores=True, score_cast_func=int
)[0]
# Pop it out
self.conn.zrem(self.key, ret[0])
return unicode(nativestr(ret[0])), ret[1]
|
{
"content_hash": "cf1210a3eb641447112c44d365689e58",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 78,
"avg_line_length": 24.351535836177476,
"alnum_prop": 0.6023826208829712,
"repo_name": "woozyking/techies",
"id": "f0a8abed6b86d4ffaef26adf0de59afb63f31102",
"size": "7182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "techies/landmines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26196"
},
{
"name": "Shell",
"bytes": "732"
}
],
"symlink_target": ""
}
|
import base64
import datetime
import hashlib
import json
import logging
import urllib
import urllib2
import urlparse
import webapp2
from google.appengine.api import memcache
from webapp2_extras import jinja2
import credentials
class SinkHandler(webapp2.RequestHandler):
@webapp2.cached_property
def _jinja2(self):
return jinja2.get_jinja2(app=self.app)
def _render_template(self, filename, **template_args):
self.response.write(self._jinja2.render_template(
filename, **template_args))
def _set_cookie(self, key, value, days_expires=None):
kwargs = {
"domain": self.request.host,
"httponly": True,
"secure": self.request.scheme == "https",
}
if "localhost" in kwargs["domain"]:
del kwargs["domain"]
if days_expires:
kwargs["expires"] = datetime.datetime.now() + datetime.timedelta(
days_expires)
self.response.set_cookie(key, value, **kwargs)
def _all_tokens(self):
token = self.request.cookies.get("token")
if not token:
raise NeedAuthException(self.request.url)
return json.loads(token)
def _token(self):
if not hasattr(self, "_access_token"):
self._access_token = self._all_tokens()["access_token"]
return self._access_token
def _fetch(self, url, data=None, refresh=True):
try:
headers = {"Authorization": "Bearer %s" % self._token()}
if data:
data = json.dumps(data)
req = urllib2.Request(url, data, headers)
return urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
if e.code == 401 and refresh:
self._refresh(self._all_tokens()["refresh_token"])
return self._fetch(url, data, refresh=False)
else:
logging.error(e.read())
raise
def _refresh(self, token):
data = urllib.urlencode({
"grant_type": "refresh_token",
"refresh_token": token,
"client_id": credentials.CLIENT_ID,
"client_secret": credentials.SECRET,
})
req = urllib2.Request("https://api.amazon.com/auth/o2/token", data)
response = urllib2.urlopen(req).read()
self._set_cookie("token", response, 365)
self._access_token = json.loads(response)["access_token"]
return response
def _endpoints(self):
endpoints = self.request.cookies.get("endpoints")
if not endpoints or True:
# TODO: record fetched time
endpoints = self._fetch(
"https://drive.amazonaws.com/drive/v1/account/endpoint")
self._set_cookie("endpoints", endpoints, 5)
return json.loads(endpoints)
def _metadata(self):
return self._endpoints()["metadataUrl"]
def _content(self):
return self._endpoints()["contentUrl"]
def handle_exception(self, exception, debug):
if isinstance(exception, NeedAuthException):
# TODO: redirect back to exception.url afterwards
self.redirect("/auth?next=%s" % exception.url)
else:
super(SinkHandler, self).handle_exception(exception, debug)
class NeedAuthException(Exception):
def __init__(self, url):
self.url = url
class MainHandler(SinkHandler):
def get(self):
self._render_template("index.html")
class AuthHandler(SinkHandler):
def get(self):
code = self.request.get("code")
if not code:
# Need spaces to be encoded as %20 so can't use urllib.urlencode
url = "https://www.amazon.com/ap/oa?%s" % "&".join(
["=".join((urllib.quote(k), urllib.quote(v))) for k,v in{
"client_id": credentials.CLIENT_ID,
"scope": "clouddrive:read clouddrive:write",
"response_type": "code",
"redirect_uri": self.request.host_url + "/auth",
}.iteritems()])
self.redirect(url)
return
data = urllib.urlencode({
"grant_type": "authorization_code",
"code": code,
"client_id": credentials.CLIENT_ID,
"client_secret": credentials.SECRET,
"redirect_uri": self.request.host_url + "/auth",
})
req = urllib2.Request("https://api.amazon.com/auth/o2/token", data)
response = urllib2.urlopen(req).read()
self._set_cookie("token", response, 365)
self.redirect(self.request.get("next", "/config"))
class RefreshHandler(SinkHandler):
def post(self):
refresh_token = self.request.get("refresh_token")
self.response.write(self._refresh(refresh_token))
class NodesHandler(SinkHandler):
def get(self):
nodes = self._fetch(
"%snodes?%s" % (self._metadata(), self.request.query_string))
self.response.write("<pre>%s</pre>" % json.dumps(
json.loads(nodes), sort_keys=True, indent=4))
class ConfigHandler(SinkHandler):
def get(self):
if self.request.get("c"):
config = memcache.get("code:%s" % self.request.get("c"))
if config:
self.response.write(config)
else:
self.abort(404)
return
token = self._all_tokens()
token.update(self._endpoints())
config = json.dumps(token, sort_keys=True, indent=4)
code = base64.b64encode(hashlib.sha256(config).digest(), "cm")[:30]
memcache.set("code:%s" % code, config, time=900)
self._render_template("config.html", config=config,
code="%s?c=%s" % (self.request.url, code))
class UsageHandler(SinkHandler):
def get(self):
usage = self._fetch("%saccount/usage" % self._metadata())
self.response.write("<pre>%s</pre>" % json.dumps(
json.loads(usage), sort_keys=True, indent=4))
app = webapp2.WSGIApplication([
("/", MainHandler),
("/config", ConfigHandler),
("/auth", AuthHandler),
("/refresh", RefreshHandler),
("/nodes", NodesHandler),
("/usage", UsageHandler),
], debug=True)
|
{
"content_hash": "b3c3f80446525092a339bafe1a932aef",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 77,
"avg_line_length": 34.049180327868854,
"alnum_prop": 0.5793612582250041,
"repo_name": "caseymrm/drivesink",
"id": "6da07df5c30dd4dfe2d59331529a365402673277",
"size": "6254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drivesink/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "81"
},
{
"name": "HTML",
"bytes": "2483"
},
{
"name": "Python",
"bytes": "18429"
}
],
"symlink_target": ""
}
|
import collections
import datetime
from merc import errors
from merc import feature
from merc import message
from merc import util
class KLineFeature(feature.Feature):
NAME = __name__
install = KLineFeature.install
KLineDetail = collections.namedtuple("KLineDetail", ["reason", "expiry"])
@KLineFeature.register_user_command
class KLine(message.Command):
NAME = "KLINE"
MIN_ARITY = 1
def __init__(self, hostmask, duration=None, reason=None, *args):
self.hostmask = hostmask
self.duration = duration
self.reason = reason
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
user.check_is_irc_operator()
locals = app.server.get_feature_locals(KLineFeature)
klines = locals.setdefault("klines", {})
if self.hostmask[0] == "-":
hostmask = self.hostmask[1:]
try:
del klines[hostmask]
except KeyError:
pass
return
if self.duration is not None:
duration = util.parse_duration(self.duration)
if duration == datetime.timedelta(seconds=0):
expiry = None
else:
expiry = datetime.datetime.now() + duration
else:
expiry = None
klines[self.hostmask] = KLineDetail(self.reason, expiry)
kline_message = "K-Lined"
if self.reason is not None:
kline_message += ": " + self.reason
for target in app.users.all():
if target.hostmask_matches(self.hostmask):
target.send(None, errors.LinkError(kline_message))
target.close(kline_message)
@KLineFeature.hook("user.register.check")
def check_klines(app, user):
locals = app.server.get_feature_locals(KLineFeature)
klines = locals.get("klines", {})
now = datetime.datetime.now()
for hostmask, detail in list(klines.items()):
if user.hostmask_matches(hostmask):
if detail.expiry is not None and detail.expiry < now:
del klines[hostmask]
continue
kline_message = "K-Lined"
if detail.reason is not None:
kline_message += ": " + detail.reason
user.send(None, errors.LinkError(kline_message))
user.close(kline_message)
|
{
"content_hash": "ea4f4e5d7eebeab6ba72f07dc500e552",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 73,
"avg_line_length": 25.297619047619047,
"alnum_prop": 0.6672941176470588,
"repo_name": "merc-devel/merc",
"id": "538f41ce7e389535d0fa49d4f139a956287139fc",
"size": "2125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "merc/features/rfc1459/kline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158852"
}
],
"symlink_target": ""
}
|
"""
Functions for exiting scripts in a standard way.
"""
import json
import os
import sys
def fail(*args):
"""
Exit with failure with optional commentary to stderr.
"""
message = ''
for arg in args:
message += arg
if message != '':
sys.stderr.write(message.strip() + '\n')
exit(1)
def fail_other(status, *args):
"""
Exit with a specific failure code with optional commentary to stderr.
"""
# TODO: Find out how to pass args so this doesn't have to be rubberstamped.
message = ''
for arg in args:
message += arg
if message != '':
sys.stderr.write(message.strip() + '\n')
exit(status)
def succeed(text=None):
"""
Exit with success and an optional messsage to stdout.
"""
if text is not None:
print(text.strip())
exit(0)
def succeed_json(result=None):
"""
Exit with success, dumping JSON to stdout.
"""
json.dump(result, sys.stdout)
print()
exit(0)
def beta_feature(example=None):
"""
Require that the environment be set for a beta feature and fail if
it isn't.
"""
if example is None:
example = "pscheduler %s ..." % (os.path.basename(sys.argv[0]))
if "BETA" not in os.environ:
fail(
"This program is a beta feature and must be run with the\n"
"BETA environment variable set.\n"
"\n"
"For example:\n"
"$ BETA=1 %s\n"
"\n"
"This requirement will be removed in a future release."
% (example)
)
|
{
"content_hash": "172a075355bf3bdc93d55e98de75266d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 21.80821917808219,
"alnum_prop": 0.5646984924623115,
"repo_name": "perfsonar/pscheduler",
"id": "10356b2d3d0590141d4d2275683aef1b3e14861c",
"size": "1592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-pscheduler/pscheduler/pscheduler/exitstatus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2076"
},
{
"name": "Dockerfile",
"bytes": "2027"
},
{
"name": "Jinja",
"bytes": "586"
},
{
"name": "M4",
"bytes": "4638"
},
{
"name": "Makefile",
"bytes": "177025"
},
{
"name": "PLpgSQL",
"bytes": "184547"
},
{
"name": "Perl",
"bytes": "4575"
},
{
"name": "Python",
"bytes": "1866695"
},
{
"name": "Roff",
"bytes": "2379"
},
{
"name": "Shell",
"bytes": "138364"
},
{
"name": "jq",
"bytes": "2680"
},
{
"name": "sed",
"bytes": "27049"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from mainapp.functions.search import (
search_string_to_params,
params_to_search_string,
MainappSearch,
MULTI_MATCH_FIELDS,
)
expected_params = {
"query": {
"bool": {
"should": [
{
"multi_match": {
"query": "word radius anotherword",
"operator": "and",
"fields": MULTI_MATCH_FIELDS,
}
},
{
"multi_match": {
"query": "word radius anotherword",
"operator": "and",
"fields": MULTI_MATCH_FIELDS,
"fuzziness": "1",
"prefix_length": 1,
}
},
]
}
},
"post_filter": {"terms": {"_index": ["mst-test-file", "mst-test-committee"]}},
"indices_boost": [
{"mst-test-person": 4},
{"mst-test-organization": 4},
{"mst-test-paper": 2},
],
"_source": ["id", "name", "legal_date", "reference_number", "display_date"],
"aggs": {
"_filter_document_type": {
"filter": {"match_all": {}},
"aggs": {"document_type": {"terms": {"field": "_index"}}},
},
"_filter_person": {
"filter": {"terms": {"_index": ["mst-test-file", "mst-test-committee"]}},
"aggs": {"person": {"terms": {"field": "person_ids"}}},
},
"_filter_organization": {
"filter": {"terms": {"_index": ["mst-test-file", "mst-test-committee"]}},
"aggs": {"organization": {"terms": {"field": "organization_ids"}}},
},
},
"sort": [{"sort_date": {"order": "desc"}}],
"highlight": {
"fields": {
"*": {"fragment_size": 150, "pre_tags": "<mark>", "post_tags": "</mark>"}
}
},
}
class TestSearchtools(TestCase):
maxDiff = None
params = {
"document-type": "file,committee",
"radius": "50",
"searchterm": "word radius anotherword",
"sort": "date_newest",
}
def test_search_string_to_params(self):
instring = search_string_to_params(
"document-type:file,committee word radius radius:50 sort:date_newest anotherword"
)
self.assertEqual(instring, self.params)
def test_params_to_query(self):
main_search = MainappSearch(self.params)
self.assertEqual(main_search.errors, [])
self.assertEqual(main_search.build_search().to_dict(), expected_params)
def test_params_to_search_string(self):
expected = "document-type:file,committee radius:50 sort:date_newest word radius anotherword"
search_string = params_to_search_string(self.params)
self.assertEqual(search_string, expected)
|
{
"content_hash": "eccb8b46aaf7e4e548943384e55dd377",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 100,
"avg_line_length": 33.52325581395349,
"alnum_prop": 0.4890738813735692,
"repo_name": "meine-stadt-transparent/meine-stadt-transparent",
"id": "9592062f37878a42c1d81364832ab03eee2b6222",
"size": "2883",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mainapp/tests/main/test_searchtools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2397"
},
{
"name": "HTML",
"bytes": "158632"
},
{
"name": "JavaScript",
"bytes": "62206"
},
{
"name": "Python",
"bytes": "601144"
},
{
"name": "SCSS",
"bytes": "40214"
},
{
"name": "Shell",
"bytes": "1363"
}
],
"symlink_target": ""
}
|
__author__ = 'luckydonald'
from . import encoding
from .utils import escape # validate_input
from .exceptions import ArgumentParseError
from os import path # file checking.
import logging
logger = logging.getLogger(__name__)
class Argument(object):
def __init__(self, name, optional=False, multible=False):
self.name = name
self.optional = optional
self.multible = multible
def __str__(self):
string = self.name
if self.optional:
string = "["+string+"]"
else:
string = "<"+string+">"
if self.multible:
string = string + "+"
return string
def parse(self, value):
return value
class Nothing(Argument):
def parse(self, value):
value = super(Nothing, self).parse(value)
if not value is None:
raise ArgumentParseError("Is not null.")
return value
class UnescapedUnicodeString(Argument):
"""
Used for unicodes stings which will not be escaped.
"""
pass
class UnicodeString(UnescapedUnicodeString):
"""
Used for unicodes stings which will be escaped, and wrapped in 'simple quotes'
"""
def parse(self, value):
value = super(UnicodeString, self).parse(value)
value = escape(value)
if not isinstance(value, encoding.text_type):
raise ArgumentParseError("Not a string.")
return value
class Peer(UnescapedUnicodeString):
def parse(self, value):
value = super(Peer, self).parse(value)
if " " in value:
raise ArgumentParseError("Space in peer.")
return value
class Chat(Peer):
def parse(self, value):
return super(Chat, self).parse(value)
class User(Peer):
def parse(self, value):
return super(User, self).parse(value)
class SecretChat(Peer):
def parse(self, value):
return super(SecretChat, self).parse(value)
class Number(Argument):
def parse(self, value):
super(Number, self).parse(value)
if isinstance(encoding.native_type, encoding.text_type):
return int(value)
if not isinstance(value, (int, encoding.long_int)):
raise ArgumentParseError("Not a int/long")
return value
class Double(Argument):
def parse(self, value):
value = super(Double, self).parse(value)
if not isinstance(value, float):
raise ArgumentParseError("Not a float.")
return value
class NonNegativeNumber(Number):
def parse(self, value):
value = super(NonNegativeNumber, self).parse(value)
if value < 0:
raise ArgumentParseError("Number smaller than 0.")
return value
class PositiveNumber(NonNegativeNumber):
def parse(self, value):
value = super(PositiveNumber, self).parse(value)
if value <= 0:
raise ArgumentParseError("Number must be bigger than 0.")
return value
class File(UnicodeString):
def parse(self, value):
if not path.isfile(encoding.native_type(value)):
raise ArgumentParseError("File path \"{path}\" not valid.".format(path=value))
value = super(File, self).parse(value)
return value
class MsgId(PositiveNumber):
def parse(self, value):
return super(MsgId, self).parse(value)
def validate_input(function_name, arguments, arguments_types):
logger.warn("validate_input() is deprecated!")
raise NotImplementedError()
if (len(arguments) != len(arguments_types)):
raise ValueError("Error in function {function_name}: {expected_number} paramters expected, but {given_number} were given.".format(function_name=function_name, expected_number=len(arguments_types), given_number=len(args)))
i = 0
new_args = []
for arg in arguments:
func_type = arguments_types[i]
# arg is the given one, which should be func_type.
if not func_type(arg):
raise ValueError("Error in function {function_name}: parameter {number} is not type {type}.".format(function_name=function_name, number=i, type=func_type.__name__))
if func_type == UnicodeString:
new_args.append(encoding.to_unicode(escape(arg)))
else:
new_args.append(encoding.to_unicode(str(arg)))
i += 1
# end for
return new_args
|
{
"content_hash": "57085d37b59ba90cebf97486be17228a",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 223,
"avg_line_length": 25.410596026490065,
"alnum_prop": 0.7159238988793328,
"repo_name": "luckydonald/pytg2",
"id": "115811718c0bc553ea8e90c193a94d36c1ee2e1c",
"size": "3861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytg2/argument_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49058"
}
],
"symlink_target": ""
}
|
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class GlanceImage(resource.Resource):
'''
A resource managing for image in Glance.
'''
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
DISK_FORMAT, CONTAINER_FORMAT, LOCATION
) = (
'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected',
'disk_format', 'container_format', 'location'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the image. The name of an image is not '
'unique to a Image Service node.')
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The image ID. Glance will generate a UUID if not specified.')
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Scope of image accessibility. Public or private. '
'Default value is False means private.'),
default=False,
),
MIN_DISK: properties.Schema(
properties.Schema.INTEGER,
_('Amount of disk space (in GB) required to boot image. '
'Default value is 0 if not specified '
'and means no limit on the disk size.'),
constraints=[
constraints.Range(min=0),
]
),
MIN_RAM: properties.Schema(
properties.Schema.INTEGER,
_('Amount of ram (in MB) required to boot image. Default value '
'is 0 if not specified and means no limit on the ram size.'),
constraints=[
constraints.Range(min=0),
]
),
PROTECTED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether the image can be deleted. If the value is True, '
'the image is protected and cannot be deleted.')
),
DISK_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Disk format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'vhd', 'vmdk', 'raw',
'qcow2', 'vdi', 'iso'])
]
),
CONTAINER_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Container format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'bare', 'ova', 'ovf'])
]
),
LOCATION: properties.Schema(
properties.Schema.STRING,
_('URL where the data for this image already resides. For '
'example, if the image data is stored in swift, you could '
'specify "swift://example.com/container/obj".'),
required=True,
),
}
default_client_name = 'glance'
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
image_id = self.glance().images.create(**args).id
self.resource_id_set(image_id)
return image_id
def check_create_complete(self, image_id):
image = self.glance().images.get(image_id)
return image.status == 'active'
def handle_delete(self):
if self.resource_id is None:
return
try:
self.glance().images.delete(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def resource_mapping():
return {
'OS::Glance::Image': GlanceImage
}
|
{
"content_hash": "fb996bce2d4c1b2f06024c834c5eb79c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 76,
"avg_line_length": 34.19130434782609,
"alnum_prop": 0.5363682604272635,
"repo_name": "rdo-management/heat",
"id": "66c5d4653389862c3555fb38450a851a24ce997c",
"size": "4507",
"binary": false,
"copies": "3",
"ref": "refs/heads/mgt-master",
"path": "heat/engine/resources/openstack/glance/glance_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5970886"
},
{
"name": "Shell",
"bytes": "25070"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.