repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
django-nonrel/django-nonrel | django/utils/termcolors.py | 417 | 6885 | """
termcolors.py
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print bold_red('hello')
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
},
DARK_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'yellow' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green' },
'HTTP_NOT_MODIFIED': { 'fg': 'cyan' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'yellow' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
},
LIGHT_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'blue' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green', 'opts': ('bold',) },
'HTTP_NOT_MODIFIED': { 'fg': 'green' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'red' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| bsd-3-clause | 18d9e8732a5514ec9791d5bffa91b5af | 33.772727 | 89 | 0.525054 | 3.68576 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/databrowse/sites.py | 329 | 5628 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class DatabrowsePlugin(object):
def urls(self, plugin_name, easy_instance_field):
"""
Given an EasyInstanceField object, returns a list of URLs for this
plugin's views of this object. These URLs should be absolute.
Returns None if the EasyInstanceField object doesn't get a
list of plugin-specific URLs.
"""
return None
def model_index_html(self, request, model, site):
"""
Returns a snippet of HTML to include on the model index page.
"""
return ''
def model_view(self, request, model_databrowse, url):
"""
Handles main URL routing for a plugin's model-specific pages.
"""
raise NotImplementedError
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()]))
return render_to_response('databrowse/model_detail.html', {
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
})
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, model_or_iterable, databrowse_class=None, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = databrowse_class or DefaultModelDatabrowse
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
self.registry[model] = databrowse_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url})
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site, delegating
to the appropriate ModelDatabrowse class.
"""
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
site = DatabrowseSite()
from django.contrib.databrowse.plugins.calendars import CalendarPlugin
from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin
from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
| bsd-3-clause | df8bcd140149f360b41a48efe25be08e | 36.771812 | 130 | 0.636816 | 4.066474 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/gis/tests/geo3d/models.py | 404 | 1835 | from django.contrib.gis.db import models
class City3D(models.Model):
name = models.CharField(max_length=30)
point = models.PointField(dim=3)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon2D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon3D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Point2D(models.Model):
point = models.PointField()
objects = models.GeoManager()
class Point3D(models.Model):
point = models.PointField(dim=3)
objects = models.GeoManager()
class MultiPoint3D(models.Model):
mpoint = models.MultiPointField(dim=3)
objects = models.GeoManager()
| bsd-3-clause | 13737ee5a16105f034085870bc2a9ddb | 25.594203 | 52 | 0.679019 | 3.528846 | false | false | false | false |
django-nonrel/django-nonrel | django/template/debug.py | 232 | 3797 | from django.conf import settings
from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source,msg):
e = TemplateSyntaxError(msg)
e.source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'source'):
e.source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
result = node.render(context)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = node.source
raise
except Exception, e:
from sys import exc_info
wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' %
(e.__class__.__name__, force_unicode(e, errors='replace')))
wrapped.source = node.source
wrapped.exc_info = exc_info()
raise wrapped, None, wrapped.exc_info[2]
return result
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = localize(output, use_l10n=context.use_l10n)
output = force_unicode(output)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = self.source
raise
except UnicodeDecodeError:
return ''
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| bsd-3-clause | a0ec82e6a08f968e75cf82e76f413732 | 36.22549 | 104 | 0.62049 | 4.07841 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/forms/localflavor/ro.py | 50 | 5203 | # -*- coding: utf-8 -*-
from django.contrib.localflavor.ro.forms import (ROCIFField, ROCNPField,
ROCountyField, ROCountySelect, ROIBANField, ROPhoneNumberField,
ROPostalCodeField)
from utils import LocalFlavorTestCase
class ROLocalFlavorTests(LocalFlavorTestCase):
def test_ROCountySelect(self):
f = ROCountySelect()
out = u'''<select name="county">
<option value="AB">Alba</option>
<option value="AR">Arad</option>
<option value="AG">Arge\u015f</option>
<option value="BC">Bac\u0103u</option>
<option value="BH">Bihor</option>
<option value="BN">Bistri\u0163a-N\u0103s\u0103ud</option>
<option value="BT">Boto\u015fani</option>
<option value="BV">Bra\u015fov</option>
<option value="BR">Br\u0103ila</option>
<option value="B">Bucure\u015fti</option>
<option value="BZ">Buz\u0103u</option>
<option value="CS">Cara\u015f-Severin</option>
<option value="CL">C\u0103l\u0103ra\u015fi</option>
<option value="CJ" selected="selected">Cluj</option>
<option value="CT">Constan\u0163a</option>
<option value="CV">Covasna</option>
<option value="DB">D\xe2mbovi\u0163a</option>
<option value="DJ">Dolj</option>
<option value="GL">Gala\u0163i</option>
<option value="GR">Giurgiu</option>
<option value="GJ">Gorj</option>
<option value="HR">Harghita</option>
<option value="HD">Hunedoara</option>
<option value="IL">Ialomi\u0163a</option>
<option value="IS">Ia\u015fi</option>
<option value="IF">Ilfov</option>
<option value="MM">Maramure\u015f</option>
<option value="MH">Mehedin\u0163i</option>
<option value="MS">Mure\u015f</option>
<option value="NT">Neam\u0163</option>
<option value="OT">Olt</option>
<option value="PH">Prahova</option>
<option value="SM">Satu Mare</option>
<option value="SJ">S\u0103laj</option>
<option value="SB">Sibiu</option>
<option value="SV">Suceava</option>
<option value="TR">Teleorman</option>
<option value="TM">Timi\u015f</option>
<option value="TL">Tulcea</option>
<option value="VS">Vaslui</option>
<option value="VL">V\xe2lcea</option>
<option value="VN">Vrancea</option>
</select>'''
self.assertEqual(f.render('county', 'CJ'), out)
def test_ROCIFField(self):
error_invalid = [u'Enter a valid CIF.']
error_atmost = [u'Ensure this value has at most 10 characters (it has 11).']
error_atleast = [u'Ensure this value has at least 2 characters (it has 1).']
valid = {
'21694681': u'21694681',
'RO21694681': u'21694681',
}
invalid = {
'21694680': error_invalid,
'21694680000': error_atmost,
'0': error_atleast,
}
self.assertFieldOutput(ROCIFField, valid, invalid)
def test_ROCNPField(self):
error_invalid = [u'Enter a valid CNP.']
error_atleast = [u'Ensure this value has at least 13 characters (it has 10).']
error_atmost = [u'Ensure this value has at most 13 characters (it has 14).']
valid = {
'1981211204489': '1981211204489',
}
invalid = {
'1981211204487': error_invalid,
'1981232204489': error_invalid,
'9981211204489': error_invalid,
'9981211209': error_atleast,
'19812112044891': error_atmost,
}
self.assertFieldOutput(ROCNPField, valid, invalid)
def test_ROCountyField(self):
error_format = [u'Enter a Romanian county code or name.']
valid = {
'CJ': 'CJ',
'cj': 'CJ',
u'Argeş': 'AG',
u'argeş': 'AG',
}
invalid = {
'Arges': error_format,
}
self.assertFieldOutput(ROCountyField, valid, invalid)
def test_ROIBANField(self):
error_invalid = [u'Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format']
error_atleast = [u'Ensure this value has at least 24 characters (it has 23).']
valid = {
'RO56RZBR0000060003291177': 'RO56RZBR0000060003291177',
'RO56-RZBR-0000-0600-0329-1177': 'RO56RZBR0000060003291177',
}
invalid = {
'RO56RZBR0000060003291176': error_invalid,
'AT61 1904 3002 3457 3201': error_invalid,
'RO56RZBR000006000329117': error_atleast,
}
self.assertFieldOutput(ROIBANField, valid, invalid)
def test_ROPhoneNumberField(self):
error_format = [u'Phone numbers must be in XXXX-XXXXXX format.']
error_atleast = [u'Ensure this value has at least 10 characters (it has 9).']
valid = {
'0264485936': '0264485936',
'(0264)-485936': '0264485936',
}
invalid = {
'02644859368': error_format,
'026448593': error_atleast,
}
self.assertFieldOutput(ROPhoneNumberField, valid, invalid)
def test_ROPostalCodeField(self):
error_atleast = [u'Ensure this value has at least 6 characters (it has 5).']
error_atmost = [u'Ensure this value has at most 6 characters (it has 7).']
valid = {
'400473': '400473',
}
invalid = {
'40047': error_atleast,
'4004731': error_atmost,
}
self.assertFieldOutput(ROPostalCodeField, valid, invalid)
| bsd-3-clause | d8a01e8ca8d49fe041bb60331ff24a83 | 36.15 | 87 | 0.627187 | 3.090315 | false | true | false | false |
django-nonrel/django-nonrel | django/core/management/commands/shell.py | 5 | 3164 | import os
from django.core.management.base import NoArgsCommand
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
shells = ['ipython', 'bpython']
requires_model_validation = False
def ipython(self):
try:
from IPython import embed
embed()
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
try:
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
except ImportError:
# IPython not found at all, raise ImportError
raise
def bpython(self):
import bpython
bpython.embed()
def run_shell(self):
for shell in self.shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell()
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
| bsd-3-clause | 3bd324495db29420e5cc58df17d59357 | 37.120482 | 92 | 0.565107 | 4.99842 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/comments/views/moderation.py | 5 | 5002 | from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required, permission_required
from utils import next_redirect, confirmation_view
from django.contrib import comments
from django.contrib.comments import signals
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: `comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, next, delete_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: `comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, next, approve_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into seperate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
# Confirmation views.
flag_done = confirmation_view(
template = "comments/flagged.html",
doc = 'Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template = "comments/deleted.html",
doc = 'Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template = "comments/approved.html",
doc = 'Displays a "comment was approved" success page.'
)
| bsd-3-clause | ef47a729565fab0db3b07550e28f79dd | 30.2625 | 95 | 0.654538 | 3.969841 | false | false | false | false |
django-nonrel/django-nonrel | django/test/testcases.py | 157 | 24882 | import re
import sys
from urlparse import urlsplit, urlunsplit
from xml.dom.minidom import parseString, Node
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.core.signals import request_started
from django.core.urlresolvers import clear_url_caches
from django.db import (transaction, connection, connections, DEFAULT_DB_ALIAS,
reset_queries)
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import simplejson, unittest as ut2
from django.utils.encoding import smart_str
from django.utils.functional import wraps
__all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase',
'skipIfDBFeature', 'skipUnlessDBFeature')
try:
all
except NameError:
from django.utils.itercompat import all
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_managed = transaction.managed
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.managed = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.managed = real_managed
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"The entry method for doctest output checking. Defers to a sequence of child checkers"
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"The default comparator provided by doctest - not perfect, but good for most purposes"
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
"""Tries to do a 'xml-comparision' of want and got. Plain string
comparision doesn't always work because, for example, attribute
ordering should not be important.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
want, got = self._strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
try:
want_root = parseString(want).firstChild
got_root = parseString(got).firstChild
except:
return False
return check_element(want_root, got_root)
def check_output_json(self, want, got, optionsflags):
"Tries to compare want and got as if they were JSON-encoded data"
want, got = self._strip_quotes(want, got)
try:
want_json = simplejson.loads(want)
got_json = simplejson.loads(got)
except:
return False
return want_json == got_json
def _strip_quotes(self, want, got):
"""
Strip quotes of doctests output values:
>>> o = OutputChecker()
>>> o._strip_quotes("'foo'")
"foo"
>>> o._strip_quotes('"foo"')
"foo"
>>> o._strip_quotes("u'foo'")
"foo"
>>> o._strip_quotes('u"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
for conn in connections:
transaction.rollback_unless_managed(using=conn)
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug_cursor = self.connection.use_debug_cursor
self.connection.use_debug_cursor = True
self.starting_queries = len(self.connection.queries)
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.use_debug_cursor = self.old_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_queries
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class TransactionTestCase(ut2.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self._fixture_setup()
self._urlconf_setup()
mail.outbox = []
def _fixture_setup(self):
# If the test case has a multi_db=True flag, flush all databases.
# Otherwise, just flush default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
call_command('flush', verbosity=0, interactive=False, database=db)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db})
def _urlconf_setup(self):
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self.client = self.client_class()
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(TransactionTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
* Force closing the connection, so that the next test gets
a clean cursor.
"""
self._fixture_teardown()
self._urlconf_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect
# of these statements is lost, which can effect the operation
# of tests (e.g., losing a timezone setting causing objects to
# be created with the wrong time).
# To make sure this doesn't happen, get a clean connection at the
# start of every test.
for connection in connections.all():
connection.close()
def _fixture_teardown(self):
pass
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def save_warnings_state(self):
"""
Saves the state of the warnings module
"""
self._warnings_state = get_warnings_state()
def restore_warnings_state(self):
"""
Restores the sate of the warnings module to the state
saved by save_warnings_state()
"""
restore_warnings_state(self._warnings_state)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
real_count = response.content.count(text)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, text, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % text)
def assertNotContains(self, response, text, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
self.assertEqual(response.content.count(text), 0,
msg_prefix + "Response should not contain '%s'" % text)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertTemplateUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names)))
def assertTemplateNotUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertQuerysetEqual(self, qs, values, transform=repr):
return self.assertEqual(map(transform, qs), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
connection = connections[using]
context = _AssertNumQueriesContext(self, num, connection)
if func is None:
return context
# Basically emulate the `with` statement here.
context.__enter__()
try:
func(*args, **kwargs)
except:
context.__exit__(*sys.exc_info())
raise
else:
context.__exit__(*sys.exc_info())
def connections_support_transactions():
"""
Returns True if all connections support transactions. This is messy
because 2.4 doesn't support any or all.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines to
do nothing, and rollsback the test transaction at the end of the test. You have
to use TransactionTestCase, if you need transaction management inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
# If the test case has a multi_db=True flag, setup all databases.
# Otherwise, just use default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
transaction.enter_transaction_management(using=db)
transaction.managed(True, using=db)
disable_transaction_methods()
from django.contrib.sites.models import Site
Site.objects.clear_cache()
for db in databases:
if hasattr(self, 'fixtures'):
call_command('loaddata', *self.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db
})
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
# If the test case has a multi_db=True flag, teardown all databases.
# Otherwise, just teardown default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
restore_transaction_methods()
for db in databases:
transaction.rollback(using=db)
transaction.leave_transaction_management(using=db)
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and issubclass(test_func, TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise ut2.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
test_item = test_func
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"Skip a test if a database has the named feature"
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"Skip a test unless a database has the named feature"
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
| bsd-3-clause | 70a90e08a8f1058f2c34edb2f856e0c2 | 38.308057 | 116 | 0.580701 | 4.435294 | false | true | false | false |
django-nonrel/django-nonrel | django/contrib/gis/db/backends/postgis/models.py | 403 | 1970 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause | 2481211257ad125d589d3c31e450be30 | 28.848485 | 70 | 0.642132 | 3.78119 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/gis/maps/google/__init__.py | 604 | 2648 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render_to_response('template.html', {'google' : GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied to
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import GEvent, GIcon, GMarker, GPolygon, GPolyline
from django.contrib.gis.maps.google.zoom import GoogleZoom
| bsd-3-clause | 0dac93eaf5ca74ead1c91216299c9a50 | 42.409836 | 126 | 0.672205 | 3.657459 | false | false | false | false |
django-nonrel/django-nonrel | django/utils/crypto.py | 245 | 1443 | """
Django's standard crypto functions and utilities.
"""
import hmac
from django.conf import settings
from django.utils.hashcompat import sha_constructor, sha_hmac
def salted_hmac(key_salt, value, secret=None):
"""
Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = sha_constructor(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=value, digestmod=sha_hmac)
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
| bsd-3-clause | 71537f085fd6f92fb5d3bb500112b742 | 31.066667 | 80 | 0.679141 | 3.837766 | false | false | false | false |
django-nonrel/django-nonrel | tests/regressiontests/forms/tests/media.py | 51 | 46165 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.forms import TextInput, Media, TextInput, CharField, Form, MultiWidget
from django.utils.unittest import TestCase
class FormsMediaTestCase(TestCase):
# Tests for the media handling on widgets and forms
def setUp(self):
super(FormsMediaTestCase, self).setUp()
self.original_media_url = settings.MEDIA_URL
settings.MEDIA_URL = 'http://media.example.com/media/'
def tearDown(self):
settings.MEDIA_URL = self.original_media_url
super(FormsMediaTestCase, self).tearDown()
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
from django.template import Template, Context
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
class StaticFormsMediaTestCase(TestCase):
# Tests for the media handling on widgets and forms
def setUp(self):
super(StaticFormsMediaTestCase, self).setUp()
self.original_media_url = settings.MEDIA_URL
self.original_static_url = settings.STATIC_URL
settings.MEDIA_URL = 'http://media.example.com/static/'
settings.STATIC_URL = 'http://media.example.com/static/'
def tearDown(self):
settings.MEDIA_URL = self.original_media_url
settings.STATIC_URL = self.original_static_url
super(StaticFormsMediaTestCase, self).tearDown()
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
from django.template import Template, Context
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
| bsd-3-clause | 7f8e26b84eaf05f201a67fc6be57fdeb | 49.179348 | 173 | 0.570584 | 3.59121 | false | false | false | false |
django-nonrel/django-nonrel | django/core/management/commands/syncdb.py | 161 | 8141 | from optparse import make_option
import sys
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
print "Creating tables ..."
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
print "Installing custom SQL ..."
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 3:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
if verbosity >= 1:
print "Installing indexes ..."
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
| bsd-3-clause | 2a9b8150a951fbaa3f55f8bab7eb2268 | 48.640244 | 121 | 0.567744 | 4.73865 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/admindocs/views.py | 296 | 15504 | from django import template, templatetags
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import inspect, os, re
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
def get_root_path():
try:
return urlresolvers.reverse('admin:index')
except urlresolvers.NoReverseMatch:
from django.contrib import admin
try:
return urlresolvers.reverse(admin.site.root, args=[''])
except urlresolvers.NoReverseMatch:
return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/")
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': get_root_path(),
}, context_instance=RequestContext(request))
doc_index = staff_member_required(doc_index)
def bookmarklets(request):
admin_root = get_root_path()
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
bookmarklets = staff_member_required(bookmarklets)
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': get_root_path(),
'tags': tags
}, context_instance=RequestContext(request))
template_tag_index = staff_member_required(template_tag_index)
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': get_root_path(),
'filters': filters
}, context_instance=RequestContext(request))
template_filter_index = staff_member_required(template_filter_index)
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'name': getattr(func, '__name__', func.__class__.__name__),
'module': func.__module__,
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': get_root_path(),
'views': views
}, context_instance=RequestContext(request))
view_index = staff_member_required(view_index)
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': get_root_path(),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
view_detail = staff_member_required(view_detail)
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': get_root_path(),
'models': m_list
}, context_instance=RequestContext(request))
model_index = staff_member_required(model_index)
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': get_root_path(),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
model_detail = staff_member_required(model_detail)
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': get_root_path(),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
template_detail = staff_member_required(template_detail)
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern))
except ViewDoesNotExist:
continue
elif hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause | 7abe1056b16a196a96f447250e4ae036 | 39.062016 | 180 | 0.597072 | 3.92407 | false | false | false | false |
django-nonrel/django-nonrel | django/contrib/gis/db/backends/postgis/creation.py | 308 | 2845 | from django.conf import settings
from django.db.backends.postgresql.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_opts = 'GIST_GEOMETRY_OPS'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography:
# Geogrophy columns are created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns
if f.geography:
index_opts = ''
else:
index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_opts + ' );')
return output
def sql_table_creation_suffix(self):
qn = self.connection.ops.quote_name
return ' TEMPLATE %s' % qn(getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis'))
| bsd-3-clause | 45833da96f8cc9a905dc27e39c7d3aca | 46.416667 | 93 | 0.491388 | 4.376923 | false | false | false | false |
django-nonrel/django-nonrel | django/conf/locale/en_GB/formats.py | 234 | 2048 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause | c5056c6317d0c7ba4fb09f227fa0fd1e | 45.545455 | 79 | 0.434082 | 2.836565 | false | false | true | false |
django-nonrel/django-nonrel | django/test/_doctest.py | 152 | 100621 | # This is a slightly modified version of the doctest.py that shipped with Python 2.4
# It incorporates changes that have been submitted to the Python ticket tracker
# as ticket #1521051. These changes allow for a DoctestRunner and Doctest base
# class to be specified when constructing a DoctestSuite.
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
if sys.platform.startswith('java'):
# On Jython, isclass() reports some modules as classes. Patch it.
def patch_isclass(isclass):
def patched_isclass(obj):
return isclass(obj) and hasattr(obj, '__module__')
return patched_isclass
inspect.isclass = patch_isclass(inspect.isclass)
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self)
def set_trace(self):
self.__debugger_used = True
pdb.Pdb.set_trace(self)
def set_continue(self):
# Calling set_continue unconditionally would break unit test coverage
# reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
if sys.version_info < (2, 5, 0):
return self.save_linecache_getlines(filename)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, runner=DocTestRunner):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
self._dt_runner = runner
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = self._dt_runner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
test_class=DocTestCase, **options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(test_class(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| bsd-3-clause | 19bb416d515e0d642c233f6c1e36dd43 | 36.363906 | 84 | 0.571382 | 4.450878 | false | true | false | false |
django-nonrel/django-nonrel | django/contrib/contenttypes/management.py | 315 | 2458 | from django.contrib.contenttypes.models import ContentType
from django.db.models import get_apps, get_models, signals
from django.utils.encoding import smart_unicode
def update_contenttypes(app, created_models, verbosity=2, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
ContentType.objects.clear_cache()
content_types = list(ContentType.objects.filter(app_label=app.__name__.split('.')[-2]))
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
opts = klass._meta
try:
ct = ContentType.objects.get(app_label=opts.app_label,
model=opts.object_name.lower())
content_types.remove(ct)
except ContentType.DoesNotExist:
ct = ContentType(name=smart_unicode(opts.verbose_name_raw),
app_label=opts.app_label, model=opts.object_name.lower())
ct.save()
if verbosity >= 2:
print "Adding content type '%s | %s'" % (ct.app_label, ct.model)
# The presence of any remaining content types means the supplied app has an
# undefined model. Confirm that the content type is stale before deletion.
if content_types:
if kwargs.get('interactive', False):
content_type_display = '\n'.join([' %s | %s' % (ct.app_label, ct.model) for ct in content_types])
ok_to_delete = raw_input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in content_types:
if verbosity >= 2:
print "Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)
ct.delete()
else:
if verbosity >= 2:
print "Stale content types remain."
def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
| bsd-3-clause | 338f81a32d2746dcdd6f91091f263f82 | 39.966667 | 112 | 0.620016 | 4.042763 | false | false | false | false |
django-nonrel/django-nonrel | docs/_ext/literals_to_xrefs.py | 143 | 4814 | """
Runs through a reST file looking for old-style literals, and helps replace them
with new-style references.
"""
import re
import sys
import shelve
refre = re.compile(r'``([^`\s]+?)``')
ROLES = (
'attr',
'class',
"djadmin",
'data',
'exc',
'file',
'func',
'lookup',
'meth',
'mod' ,
"djadminopt",
"ref",
"setting",
"term",
"tfilter",
"ttag",
# special
"skip"
)
ALWAYS_SKIP = [
"NULL",
"True",
"False",
]
def fixliterals(fname):
data = open(fname).read()
last = 0
new = []
storage = shelve.open("/tmp/literals_to_xref.shelve")
lastvalues = storage.get("lastvalues", {})
for m in refre.finditer(data):
new.append(data[last:m.start()])
last = m.end()
line_start = data.rfind("\n", 0, m.start())
line_end = data.find("\n", m.end())
prev_start = data.rfind("\n", 0, line_start)
next_end = data.find("\n", line_end + 1)
# Skip always-skip stuff
if m.group(1) in ALWAYS_SKIP:
new.append(m.group(0))
continue
# skip when the next line is a title
next_line = data[m.end():next_end].strip()
if next_line[0] in "!-/:-@[-`{-~" and all(c == next_line[0] for c in next_line):
new.append(m.group(0))
continue
sys.stdout.write("\n"+"-"*80+"\n")
sys.stdout.write(data[prev_start+1:m.start()])
sys.stdout.write(colorize(m.group(0), fg="red"))
sys.stdout.write(data[m.end():next_end])
sys.stdout.write("\n\n")
replace_type = None
while replace_type is None:
replace_type = raw_input(
colorize("Replace role: ", fg="yellow")
).strip().lower()
if replace_type and replace_type not in ROLES:
replace_type = None
if replace_type == "":
new.append(m.group(0))
continue
if replace_type == "skip":
new.append(m.group(0))
ALWAYS_SKIP.append(m.group(1))
continue
default = lastvalues.get(m.group(1), m.group(1))
if default.endswith("()") and replace_type in ("class", "func", "meth"):
default = default[:-2]
replace_value = raw_input(
colorize("Text <target> [", fg="yellow") + default + colorize("]: ", fg="yellow")
).strip()
if not replace_value:
replace_value = default
new.append(":%s:`%s`" % (replace_type, replace_value))
lastvalues[m.group(1)] = replace_value
new.append(data[last:])
open(fname, "w").write("".join(new))
storage["lastvalues"] = lastvalues
storage.close()
#
# The following is taken from django.utils.termcolors and is copied here to
# avoid the dependancy.
#
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
text = str(text)
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
if __name__ == '__main__':
try:
fixliterals(sys.argv[1])
except (KeyboardInterrupt, SystemExit):
print | bsd-3-clause | be649f56d629c0b28094dd58df090378 | 27.157895 | 93 | 0.524096 | 3.534508 | false | false | false | false |
batiste/django-page-cms | pages/management/commands/pages_demo.py | 1 | 2856 | from django.core.management.base import BaseCommand, CommandError
from pages.models import Page
from pages.tests.testcase import new_page
from django.contrib.auth import get_user_model
lorem = """Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Quisque tempus tellus enim, quis tempus dui pretium non.
Cras eget enim vel magna fringilla cursus ut sit amet mi.
Curabitur id pharetra turpis. Pellentesque quis eros nunc.
Etiam interdum nisi ut sapien facilisis ornare. Mauris in tellus elit.
Integer vulputate venenatis odio. Vivamus in diam vitae magna gravida
sodales sit amet id ex. Aliquam commodo massa at mollis blandit.
Donec auctor sapien et risus gravida, ultrices imperdiet est laoreet."""
class Command(BaseCommand):
help = 'Create a couple of dummy pages for a demo'
def handle(self, *args, **options):
UserModel = get_user_model()
if UserModel.objects.count() == 0:
UserModel.objects.create(username='demo', password='demo')
new_page(content={
'title': 'Home', 'slug': 'home', 'lead': 'Welcome to the Gerbi CMS', 'content': lorem
}, template='index.html')
prod_page = new_page(content={
'title': 'Products', 'slug': 'products', 'lead': 'Our products', 'content': lorem
}, template='index.html')
new_page(content={
'title': 'Poney', 'slug': 'poney', 'lead': 'Our shiny poney', 'content': lorem},
parent=prod_page,
template='index.html')
new_page(content={
'title': 'Hat', 'slug': 'hat', 'lead': 'Fedora for elegance', 'content': lorem},
parent=prod_page,
template='index.html')
blog = new_page(content={
'title': 'Blog', 'slug': 'blog', 'lead': 'Blog example', 'content': lorem},
template='blog-home.html', delegate_to='blog')
new_page(content={
'title': 'Blog post 1', 'slug': 'blog-post-1', 'lead': 'Blog post example', 'content': lorem},
parent=blog,
template='blog-post.html')
new_page(content={
'title': 'Blog post 2', 'slug': 'blog-post-2', 'lead': 'Blog post example', 'content': lorem},
parent=blog,
template='blog-post.html')
new_page(content={
'title': 'Blog post 3', 'slug': 'blog-post-3', 'lead': 'Blog post example', 'content': lorem},
parent=blog,
template='blog-post.html')
new_page(content={
'title': 'Blog post 4', 'slug': 'blog-post-4', 'lead': 'Blog post example', 'content': lorem},
parent=blog,
template='blog-post.html')
new_page(content={
'title': 'Contact', 'slug': 'contact',
'lead': 'Contact us at gerbi@example.com', 'content':lorem
}, template='index.html')
| bsd-3-clause | f99b9c2a4a56a8583cb746466f689538 | 44.333333 | 106 | 0.59944 | 3.412186 | false | false | false | false |
django-nonrel/django-nonrel | tests/modeltests/files/tests.py | 48 | 4113 | import shutil
import sys
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from models import Storage, temp_storage, temp_storage_location
if sys.version_info >= (2, 5):
from tests_25 import FileObjTests
class FileTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), "content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", "content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(
sorted(files), ["assignment.txt", "default.txt", "django_test.txt"]
)
# Files can be read in a little at a time, if necessary.
obj1.normal.open()
self.assertEqual(obj1.normal.read(3), "con")
self.assertEqual(obj1.normal.read(), "tent")
self.assertEqual(list(obj1.normal.chunks(chunk_size=2)), ["co", "nt", "en", "t"])
obj1.normal.close()
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
# Push the objects into the cache to make sure they pickle properly
cache.set("obj1", obj1)
cache.set("obj2", obj2)
self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
# Default values allow an object to access a single file.
obj3 = Storage.objects.create()
self.assertEqual(obj3.default.name, "tests/default.txt")
self.assertEqual(obj3.default.read(), "default content")
obj3.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj3.delete()
obj3 = Storage()
self.assertEqual(obj3.default.read(), "default content")
obj3.default.close()
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj4 = Storage()
obj4.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj4.random.name.endswith("/random_file"))
# Clean up the temporary files and dir.
obj1.normal.delete()
obj2.normal.delete()
obj3.default.delete()
obj4.random.delete()
| bsd-3-clause | c447953a4ed5e0e9ae9669015a342fae | 38.171429 | 100 | 0.637977 | 3.898578 | false | true | false | false |
django-nonrel/django-nonrel | django/db/models/fields/files.py | 156 | 16228 | import datetime
import os
import django.utils.copycompat as copy
from django.conf import settings
from django.db.models.fields import Field
from django.core.files.base import File, ContentFile
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile, get_image_dimensions
from django.core.files.uploadedfile import UploadedFile
from django.utils.functional import curry
from django.db.models import signals
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext_lazy, ugettext as _
from django import forms
from django.db.models.loading import cache
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Required because we defined a custom __eq__.
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> instance.file = File(...)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, basestring) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = ugettext_lazy("File path")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return unicode(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = ugettext_lazy("File path")
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
FileField.__init__(self, verbose_name, name, **kwargs)
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| bsd-3-clause | 57adf6777ee2693a9107413f2c1d3f05 | 39.773869 | 107 | 0.645243 | 4.315957 | false | false | false | false |
aparo/pyes | performance/utils.py | 6 | 1454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import os
from django.contrib.webdesign.lorem_ipsum import words as li_words
import shelve
import codecs
def get_names():
"""
Return a list of names.
"""
return [n.strip() for n in codecs.open(os.path.join("data", "names.txt"),"rb",'utf8').readlines()]
def generate_dataset(number_items=1000):
"""
Generate a dataset with number_items elements.
"""
data = []
names = get_names()
totalnames = len(names)
#init random seeder
random.seed()
#calculate items
# names = random.sample(names, number_items)
for i in range(number_items):
data.append({"name":names[random.randint(0,totalnames-1)],
"age":random.randint(1,100),
"description":li_words(50, False)})
return data
def generate_dataset_shelve(filename, number_items=1000):
"""
Generate a dataset with number_items elements.
"""
if os.path.exists(filename):
os.remove(filename)
data = shelve.open(filename)
names = get_names()
totalnames = len(names)
#init random seeder
random.seed()
#calculate items
# names = random.sample(names, number_items)
for i in range(number_items):
data[str(i+1)] = {"name":names[random.randint(0,totalnames-1)],
"age":random.randint(1,100),
"description":li_words(50, False)}
data.close()
| bsd-3-clause | 488197886360ae8a801bb6ec404563b3 | 27.509804 | 102 | 0.611417 | 3.581281 | false | false | false | false |
mne-tools/mne-python | mne/preprocessing/nirs/_optical_density.py | 8 | 1719 | # Authors: Robert Luke <mail@robertluke.net>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
from ...io import BaseRaw
from ...io.constants import FIFF
from ...utils import _validate_type, warn, verbose
from ..nirs import _validate_nirs_info
@verbose
def optical_density(raw, *, verbose=None):
r"""Convert NIRS raw data to optical density.
Parameters
----------
raw : instance of Raw
The raw data.
%(verbose)s
Returns
-------
raw : instance of Raw
The modified raw instance.
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
picks = _validate_nirs_info(raw.info, fnirs='cw_amplitude')
# The devices measure light intensity. Negative light intensities should
# not occur. If they do it is likely due to hardware or movement issues.
# Set all negative values to abs(x), this also has the benefit of ensuring
# that the means are all greater than zero for the division below.
if np.any(raw._data[picks] <= 0):
warn("Negative intensities encountered. Setting to abs(x)")
min_ = np.inf
for pi in picks:
np.abs(raw._data[pi], out=raw._data[pi])
min_ = min(min_, raw._data[pi].min() or min_)
# avoid == 0
for pi in picks:
np.maximum(raw._data[pi], min_, out=raw._data[pi])
for pi in picks:
data_mean = np.mean(raw._data[pi])
raw._data[pi] /= data_mean
np.log(raw._data[pi], out=raw._data[pi])
raw._data[pi] *= -1
raw.info['chs'][pi]['coil_type'] = FIFF.FIFFV_COIL_FNIRS_OD
return raw
| bsd-3-clause | 0e24ba865745c6c60d7e84b4100b5341 | 30.254545 | 78 | 0.610239 | 3.363992 | false | false | false | false |
mne-tools/mne-python | tutorials/time-freq/20_sensors_time_frequency.py | 2 | 7731 | # -*- coding: utf-8 -*-
"""
.. _tut-sensors-time-freq:
============================================
Frequency and time-frequency sensor analysis
============================================
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use this dataset: :ref:`somato-dataset`. It contains so-called event
related synchronizations (ERS) / desynchronizations (ERD) in the beta band.
""" # noqa: E501
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import somato
from mne.time_frequency import tfr_morlet
# %%
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = (data_path / f'sub-{subject}' / 'meg' /
f'sub-{subject}_task-{task}_meg.fif')
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
# crop and resample just to reduce computation time
raw.crop(120, 360).load_data().resample(200)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# %%
# Frequency analysis
# ------------------
#
# We start by exploring the frequency content of our epochs.
# %%
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2., fmax=40., average=True)
# %%
# Now, let's take a look at the spatial distributions of the PSD, averaged
# across epochs and frequency bands.
epochs.plot_psd_topomap(ch_type='grad', normalize=False)
# %%
# Alternatively, you can also create PSDs from `~mne.Epochs` methods directly.
#
# .. note::
# In contrast to the methods for visualization, the ``compute_psd`` methods
# do **not** scale the data from SI units to more "convenient" values. So
# when e.g. calculating the PSD of gradiometers via
# :meth:`~mne.Epochs.compute_psd`, you will get the power as
# ``(T/m)²/Hz`` (instead of ``(fT/cm)²/Hz`` via
# :meth:`~mne.Epochs.plot_psd`).
_, ax = plt.subplots()
spectrum = epochs.compute_psd(fmin=2., fmax=40., tmax=3., n_jobs=None)
# average across epochs first
mean_spectrum = spectrum.average()
psds, freqs = mean_spectrum.get_data(return_freqs=True)
# then convert to dB and take mean & standard deviation across channels
psds = 10 * np.log10(psds)
psds_mean = psds.mean(axis=0)
psds_std = psds.std(axis=0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5, edgecolor='none')
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
# %%
# Notably, :meth:`mne.Epochs.compute_psd` supports the keyword argument
# ``average``, which specifies how to estimate the PSD based on the individual
# windowed segments. The default is ``average='mean'``, which simply calculates
# the arithmetic mean across segments. Specifying ``average='median'``, in
# contrast, returns the PSD based on the median of the segments (corrected for
# bias relative to the mean), which is a more robust measure.
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=None)
psds_welch_mean, freqs_mean = epochs.compute_psd(
'welch', average='mean', **kwargs).get_data(return_freqs=True)
psds_welch_median, freqs_median = epochs.compute_psd(
'welch', average='median', **kwargs).get_data(return_freqs=True)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title=f'Welch PSD ({ch_name}, Epoch {epo_idx})',
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
# %%
# Lastly, we can also retrieve the unaggregated segments by passing
# ``average=None`` to :meth:`mne.Epochs.compute_psd`. The dimensions of
# the returned array are ``(n_epochs, n_sensors, n_freqs, n_segments)``.
welch_unagg = epochs.compute_psd('welch', average=None, **kwargs)
print(welch_unagg.shape)
# %%
# .. _inter-trial-coherence:
#
# Time-frequency analysis: power and inter-trial coherence
# --------------------------------------------------------
#
# We now compute time-frequency representations (TFRs) from our Epochs.
# We'll look at power and inter-trial coherence (ITC).
#
# To this we'll use the function :func:`mne.time_frequency.tfr_morlet`
# but you can also use :func:`mne.time_frequency.tfr_multitaper`
# or :func:`mne.time_frequency.tfr_stockwell`.
#
# .. note::
# The ``decim`` parameter reduces the sampling rate of the time-frequency
# decomposition by the defined factor. This is usually done to reduce
# memory usage. For more information refer to the documentation of
# :func:`mne.time_frequency.tfr_morlet`.
#
# define frequencies of interest (log-spaced)
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=None)
# %%
# Inspect power
# -------------
#
# .. note::
# The generated figures are interactive. In the topo you can click
# on an image to visualize the data for one sensor.
# You can also select a portion in the time-frequency plane to
# obtain a topomap for a certain time-frequency region.
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axes = plt.subplots(1, 2, figsize=(7, 4))
topomap_kw = dict(ch_type='grad', tmin=0.5, tmax=1.5, baseline=(-0.5, 0),
mode='logratio', show=False)
plot_dict = dict(Alpha=dict(fmin=8, fmax=12), Beta=dict(fmin=13, fmax=25))
for ax, (title, fmin_fmax) in zip(axes, plot_dict.items()):
power.plot_topomap(**fmin_fmax, axes=ax, **topomap_kw)
ax.set_title(title)
fig.tight_layout()
fig.show()
# %%
# Joint Plot
# ----------
# You can also create a joint plot showing both the aggregated TFR
# across channels and topomaps at specific times and frequencies to obtain
# a quick overview regarding oscillatory effects across time and space.
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(0.5, 10), (1.3, 8)])
# %%
# Inspect ITC
# -----------
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
# %%
# .. note::
# Baseline correction can be applied to power or done in plots.
# To illustrate the baseline correction in plots, the next line is
# commented::
#
# # power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
#
# Exercise
# --------
#
# - Visualize the inter-trial coherence values as topomaps as done with
# power.
| bsd-3-clause | 41291cd9dd11c6119a86ee6d60944fb4 | 35.45283 | 79 | 0.668866 | 3.054545 | false | false | false | false |
mne-tools/mne-python | mne/io/kit/coreg.py | 5 | 7386 | """Coordinate Point Extractor for KIT system."""
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD-3-Clause
from collections import OrderedDict
from os import SEEK_CUR, path as op
import pickle
import re
import numpy as np
from .constants import KIT, FIFF
from .._digitization import _make_dig_points
from ...transforms import (Transform, apply_trans, get_ras_to_neuromag_trans,
als_ras_trans)
from ...utils import warn, _check_option
INT32 = '<i4'
FLOAT64 = '<f8'
def read_mrk(fname):
r"""Marker Point Extraction in MEG space directly from sqd.
Parameters
----------
fname : str
Absolute path to Marker file.
File formats allowed: \*.sqd, \*.mrk, \*.txt, \*.pickled.
Returns
-------
mrk_points : ndarray, shape (n_points, 3)
Marker points in MEG space [m].
"""
from .kit import _read_dirs
ext = op.splitext(fname)[-1]
if ext in ('.sqd', '.mrk'):
with open(fname, 'rb', buffering=0) as fid:
dirs = _read_dirs(fid)
fid.seek(dirs[KIT.DIR_INDEX_COREG]['offset'])
# skips match_done, meg_to_mri and mri_to_meg
fid.seek(KIT.INT + (2 * KIT.DOUBLE * 16), SEEK_CUR)
mrk_count = np.fromfile(fid, INT32, 1)[0]
pts = []
for _ in range(mrk_count):
# mri_type, meg_type, mri_done, meg_done
_, _, _, meg_done = np.fromfile(fid, INT32, 4)
_, meg_pts = np.fromfile(fid, FLOAT64, 6).reshape(2, 3)
if meg_done:
pts.append(meg_pts)
mrk_points = np.array(pts)
elif ext == '.txt':
mrk_points = _read_dig_kit(fname, unit='m')
elif ext == '.pickled':
with open(fname, 'rb') as fid:
food = pickle.load(fid)
try:
mrk_points = food['mrk']
except Exception:
err = ("%r does not contain marker points." % fname)
raise ValueError(err)
else:
raise ValueError('KIT marker file must be *.sqd, *.mrk, *.txt or '
'*.pickled, *%s is not supported.' % ext)
# check output
mrk_points = np.asarray(mrk_points)
if mrk_points.shape != (5, 3):
err = ("%r is no marker file, shape is "
"%s" % (fname, mrk_points.shape))
raise ValueError(err)
return mrk_points
def read_sns(fname):
"""Sensor coordinate extraction in MEG space.
Parameters
----------
fname : str
Absolute path to sensor definition file.
Returns
-------
locs : numpy.array, shape = (n_points, 3)
Sensor coil location.
"""
p = re.compile(r'\d,[A-Za-z]*,([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+)')
with open(fname) as fid:
locs = np.array(p.findall(fid.read()), dtype=float)
return locs
def _set_dig_kit(mrk, elp, hsp, eeg):
"""Add landmark points and head shape data to the KIT instance.
Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
ALS coordinate system. This is converted to [m].
Parameters
----------
mrk : None | str | array_like, shape (5, 3)
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more
than 10`000 points are in the head shape, they are automatically
decimated.
eeg : dict
Ordered dict of EEG dig points.
Returns
-------
dig_points : list
List of digitizer points for info['dig'].
dev_head_t : Transform
A dictionary describing the device-head transformation.
hpi_results : list
The hpi results.
"""
from ...coreg import fit_matched_points, _decimate_points
if isinstance(hsp, str):
hsp = _read_dig_kit(hsp)
n_pts = len(hsp)
if n_pts > KIT.DIG_POINTS:
hsp = _decimate_points(hsp, res=0.005)
n_new = len(hsp)
warn("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
"downsampled to {n_new} points. The preferred way to "
"downsample is using FastScan.".format(
n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new))
if isinstance(elp, str):
elp_points = _read_dig_kit(elp)
if len(elp_points) != 8:
raise ValueError("File %r should contain 8 points; got shape "
"%s." % (elp, elp_points.shape))
elp = elp_points
elif len(elp) not in (6, 7, 8):
raise ValueError("ELP should contain 6 ~ 8 points; got shape "
"%s." % (elp.shape,))
if isinstance(mrk, str):
mrk = read_mrk(mrk)
mrk = apply_trans(als_ras_trans, mrk)
nasion, lpa, rpa = elp[:3]
nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
elp = apply_trans(nmtrans, elp)
hsp = apply_trans(nmtrans, hsp)
eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items())
# device head transform
trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
nasion, lpa, rpa = elp[:3]
elp = elp[3:]
dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg)
dev_head_t = Transform('meg', 'head', trans)
hpi_results = [dict(dig_points=[
dict(ident=ci, r=r, kind=FIFF.FIFFV_POINT_HPI,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
for ci, r in enumerate(mrk)], coord_trans=dev_head_t)]
return dig_points, dev_head_t, hpi_results
def _read_dig_kit(fname, unit='auto'):
# Read dig points from a file and return ndarray, using FastSCAN for .txt
from ...channels.montage import (
read_polhemus_fastscan, read_dig_polhemus_isotrak, read_custom_montage,
_check_dig_shape)
assert unit in ('auto', 'm', 'mm')
_, ext = op.splitext(fname)
_check_option('file extension', ext[1:], ('hsp', 'elp', 'mat', 'txt'))
if ext == '.txt':
unit = 'mm' if unit == 'auto' else unit
out = read_polhemus_fastscan(fname, unit=unit,
on_header_missing='ignore')
elif ext in ('.hsp', '.elp'):
unit = 'm' if unit == 'auto' else unit
mon = read_dig_polhemus_isotrak(fname, unit=unit)
if fname.endswith('.hsp'):
dig = [d['r'] for d in mon.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
else:
dig = [d['r'] for d in mon.dig]
if dig and \
mon.dig[0]['kind'] == FIFF.FIFFV_POINT_CARDINAL and \
mon.dig[0]['ident'] == FIFF.FIFFV_POINT_LPA:
# LPA, Nasion, RPA -> NLR
dig[:3] = [dig[1], dig[0], dig[2]]
out = np.array(dig, float)
else:
assert ext == '.mat'
out = np.array([d['r'] for d in read_custom_montage(fname).dig])
_check_dig_shape(out)
return out
| bsd-3-clause | 34f0caf27648fab15f6005e6317cf8ff | 33.839623 | 79 | 0.559572 | 3.310623 | false | false | false | false |
mne-tools/mne-python | mne/commands/mne_coreg.py | 8 | 3851 | #!/usr/bin/env python
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
"""Open the coregistration GUI.
Examples
--------
.. code-block:: console
$ mne coreg
"""
import os.path as op
import mne
def run():
"""Run command."""
from mne.commands.utils import get_optparser, _add_verbose_flag
parser = get_optparser(__file__)
parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
default=None, help="Subjects directory")
parser.add_option("-s", "--subject", dest="subject", default=None,
help="Subject name")
parser.add_option("-f", "--fiff", dest="inst", default=None,
help="FIFF file with digitizer data for coregistration")
parser.add_option("-t", "--tabbed", dest="tabbed", action="store_true",
default=False, help="Option for small screens: Combine "
"the data source panel and the coregistration panel "
"into a single panel with tabs.")
parser.add_option("--no-guess-mri", dest="guess_mri_subject",
action='store_false', default=None,
help="Prevent the GUI from automatically guessing and "
"changing the MRI subject when a new head shape source "
"file is selected.")
parser.add_option("--head-opacity", type=float, default=None,
dest="head_opacity",
help="The opacity of the head surface, in the range "
"[0, 1].")
parser.add_option("--high-res-head",
action='store_true', default=False, dest="high_res_head",
help="Use a high-resolution head surface.")
parser.add_option("--low-res-head",
action='store_true', default=False, dest="low_res_head",
help="Use a low-resolution head surface.")
parser.add_option('--trans', dest='trans', default=None,
help='Head<->MRI transform FIF file ("-trans.fif")')
parser.add_option('--interaction',
type=str, default=None, dest='interaction',
help='Interaction style to use, can be "trackball" or '
'"terrain".')
parser.add_option('--scale',
type=float, default=None, dest='scale',
help='Scale factor for the scene.')
parser.add_option('--simple-rendering', action='store_false',
dest='advanced_rendering',
help='Use simplified OpenGL rendering')
_add_verbose_flag(parser)
options, args = parser.parse_args()
if options.low_res_head:
if options.high_res_head:
raise ValueError("Can't specify --high-res-head and "
"--low-res-head at the same time.")
head_high_res = False
elif options.high_res_head:
head_high_res = True
else:
head_high_res = None
# expanduser allows ~ for --subjects-dir
subjects_dir = options.subjects_dir
if subjects_dir is not None:
subjects_dir = op.expanduser(subjects_dir)
trans = options.trans
if trans is not None:
trans = op.expanduser(trans)
import faulthandler
faulthandler.enable()
mne.gui.coregistration(
options.tabbed, inst=options.inst, subject=options.subject,
subjects_dir=subjects_dir,
guess_mri_subject=options.guess_mri_subject,
head_opacity=options.head_opacity, head_high_res=head_high_res,
trans=trans, scrollable=True,
interaction=options.interaction,
scale=options.scale,
advanced_rendering=options.advanced_rendering,
show=True, block=True,
verbose=options.verbose)
mne.utils.run_command_if_main()
| bsd-3-clause | e267bdc859117abe3edd8f5ee473058e | 38.295918 | 79 | 0.576474 | 4.131974 | false | false | false | false |
mne-tools/mne-python | examples/time_frequency/source_label_time_frequency.py | 11 | 3703 | # -*- coding: utf-8 -*-
"""
.. _ex-source-space-power-phase-locking:
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
# %%
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
# %%
# Set parameters
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_raw.fif'
fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = meg_path / 'labels' / f'{label_name}.label'
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
freqs = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = freqs / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=None)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause | af9eb78f0eb0ef2d62a14668afa89abb | 33.607477 | 79 | 0.611396 | 3.342058 | false | false | false | false |
mne-tools/mne-python | mne/viz/misc.py | 5 | 53706 | # -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import copy
import io
from glob import glob
from itertools import cycle
import os.path as op
import warnings
from collections import defaultdict
import numpy as np
from ..defaults import DEFAULTS
from .._freesurfer import (_reorient_image, _read_mri_info, _check_mri,
_mri_orientation)
from ..rank import compute_rank
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import read_source_spaces, SourceSpaces, _ensure_src
from ..transforms import apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing, fill_doc)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import (tight_layout, _get_color_list, _prepare_trellis, plt_show,
_figure_agg, _validate_type)
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
%(info_not_none)s
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from scipy import linalg
from ..cov import Covariance
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
if np.iscomplexobj(C):
C = np.sqrt((C * C.conj()).real)
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
cax.grid(False) # avoid mpl warning about auto-removal
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
with this_info._unlock():
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(*, mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, width=512,
slices_as_subplots=True):
"""Plot BEM contours on anatomical MRI slices.
Parameters
----------
slices_as_subplots : bool
Whether to add all slices as subplots to a single figure, or to
create a new figure for each slice. If ``False``, return NumPy
arrays instead of Matplotlib figures.
Returns
-------
matplotlib.figure.Figure | list of array
The plotted slices.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
# For ease of plotting, we will do everything in voxel coordinates.
_validate_type(show_orientation, (bool, str), 'show_orientation')
if isinstance(show_orientation, str):
_check_option('show_orientation', show_orientation, ('always',),
extra='when str')
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, _, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
data, rasvox_mri_t = _reorient_image(nim)
mri_rasvox_t = np.linalg.inv(rasvox_mri_t)
axis, x, y = _mri_orientation(orientation)
n_slices = data.shape[axis]
# if no slices were specified, pick some equally-spaced ones automatically
if slices is None:
slices = np.round(
np.linspace(
start=0,
stop=n_slices - 1,
num=14
)
).astype(int)
# omit first and last one (not much brain visible there anyway…)
slices = slices[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_rasvox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]["coord_frame"]]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_rasvox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
# get the figure dimensions right
if slices_as_subplots:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
fig.set_facecolor('k')
dpi = fig.get_dpi()
n_axes = len(axs)
else:
n_col = n_axes = 1
dpi = 96
# 2x standard MRI resolution is probably good enough for the
# traces
w = width / dpi
figsize = (w, w / data.shape[x] * data.shape[y])
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2.,
[np.inf]]
) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels['RAS'[x]], ori_labels['RAS'[y]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
figs = []
for ai, (sl, lower, upper) in enumerate(
zip(slices, bounds[:-1], bounds[1:])
):
if slices_as_subplots:
ax = axs[ai]
else:
fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k')
ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k')
# adjust the orientations for good view
slicer[axis] = sl
dat = data[tuple(slicer)].T
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(surf['rr'][:, x], surf['rr'][:, y],
surf['tris'], surf['rr'][:, axis],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, axis] >= lower) & (sources[:, axis] < upper)
ax.scatter(sources[in_slice, x], sources[in_slice, y],
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
always = (show_orientation == 'always')
if show_orientation:
if ai % n_col == 0 or always: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1 or always: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col or always: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1 or always: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if not slices_as_subplots:
# convert to NumPy array
with io.BytesIO() as buff:
fig.savefig(
buff, format='raw', bbox_inches='tight', pad_inches=0,
dpi=dpi
)
w_, h_ = fig.canvas.get_width_height()
plt.close(fig)
buff.seek(0)
fig_array = np.frombuffer(buff.getvalue(), dtype=np.uint8)
fig = fig_array.reshape((int(h_), int(w_), -1))
figs.append(fig)
if slices_as_subplots:
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return fig
else:
return figs
@fill_doc
def plot_bem(subject, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical MRI slices.
Parameters
----------
%(subject)s
%(subjects_dir)s
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int | None
The indices of the MRI slices to plot. If ``None``, automatically
pick 12 equally-spaced slices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : bool | str
Show the orientation (L/R, P/A, I/S) of the data slices.
True (default) will only show it on the outside most edges of the
figure, False will never show labels, and "always" will label each
plot.
.. versionadded:: 0.21
.. versionchanged:: 0.24
Added support for "always".
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError(f'Subject bem directory "{bem_path}" does not exist')
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
fig = _plot_mri_contours(
mri_fname=mri_fname, surfaces=surfaces, src=src,
orientation=orientation, slices=slices, show=show,
show_indices=show_indices, show_orientation=show_orientation,
slices_as_subplots=True
)
return fig
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot :term:`events` to get a visual display of the paradigm.
Parameters
----------
%(events)s
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <matplotlib:tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
func = val()
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None, *, dlim=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
dlim : None | tuple
The y-axis delay limits (sec) to use (default:
``(-tmax / 2., tmax / 2.)``).
.. versionadded:: 1.1.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay((h['b'].conj(), h['a'].conj()), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
if dlim is None:
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +SKIP
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
@fill_doc
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
%(info)s
Used to split the figure by channel-type, if provided.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
def plot_chpi_snr(snr_dict, axes=None):
"""Plot time-varying SNR estimates of the HPI coils.
Parameters
----------
snr_dict : dict
The dictionary returned by `~mne.chpi.compute_chpi_snr`. Must have keys
``times``, ``freqs``, ``TYPE_snr``, ``TYPE_power``, and ``TYPE_resid``
(where ``TYPE`` can be ``mag`` or ``grad`` or both).
axes : None | list of matplotlib.axes.Axes
Figure axes in which to draw the SNR, power, and residual plots. The
number of axes should be 3× the number of MEG sensor types present in
``snr_dict``. If ``None`` (the default), a new
`~matplotlib.figure.Figure` is created with the required number of
axes.
Returns
-------
fig : instance of matplotlib.figure.Figure
A figure with subplots for SNR, power, and residual variance,
separately for magnetometers and/or gradiometers (depending on what is
present in ``snr_dict``).
Notes
-----
If you supply a list of existing `~matplotlib.axes.Axes`, then the figure
legend will not be drawn automatically. If you still want it, running
``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it,
though you may also need to manually adjust the margin to make room for it
(e.g., using ``fig.subplots_adjust(right=0.8)``).
.. versionadded:: 0.24
"""
import matplotlib.pyplot as plt
valid_keys = list(snr_dict)[2:]
titles = dict(snr='SNR', power='cHPI power', resid='Residual variance')
full_names = dict(mag='magnetometers', grad='gradiometers')
axes_was_none = axes is None
if axes_was_none:
fig, axes = plt.subplots(len(valid_keys), 1, sharex=True)
else:
fig = axes[0].get_figure()
if len(axes) != len(valid_keys):
raise ValueError(f'axes must be a list of {len(valid_keys)} axes, got '
f'length {len(axes)} ({axes}).')
fig.set_size_inches(10, 10)
legend_labels_exist = False
for key, ax in zip(valid_keys, axes):
ch_type, kind = key.split('_')
scaling = 1 if kind == 'snr' else DEFAULTS['scalings'][ch_type]
plot_kwargs = dict(color='k') if kind == 'resid' else dict()
lines = ax.plot(snr_dict['times'], snr_dict[key] * scaling ** 2,
**plot_kwargs)
# the freqs should be the same for all sensor types (and for SNR and
# power subplots), so we only need to label the lines on one axes
# (otherwise we get duplicate legend entries).
if not legend_labels_exist:
for line, freq in zip(lines, snr_dict['freqs']):
line.set_label(f'{freq} Hz')
legend_labels_exist = True
unit = DEFAULTS['units'][ch_type]
unit = f'({unit})' if '/' in unit else unit
set_kwargs = dict(title=f'{titles[kind]}, {full_names[ch_type]}',
ylabel='dB' if kind == 'snr' else f'{unit}²')
if not axes_was_none:
set_kwargs.update(xlabel='Time (s)')
ax.set(**set_kwargs)
if axes_was_none:
ax.set(xlabel='Time (s)')
fig.align_ylabels()
fig.subplots_adjust(left=0.1, right=0.825, bottom=0.075, top=0.95,
hspace=0.7)
fig.legend(loc='right', title='cHPI frequencies')
return fig
| bsd-3-clause | 8a77dcbbed9a7a15381f7fa6a5763634 | 36.417422 | 79 | 0.557586 | 3.634112 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/window/mouse.py | 7 | 2508 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Mouse constants and utilities for pyglet.window.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: mouse.py 1579 2008-01-15 14:47:19Z Alex.Holkner $'
def buttons_string(buttons):
'''Return a string describing a set of active mouse buttons.
Example::
>>> buttons_string(LEFT | RIGHT)
'LEFT|RIGHT'
:Parameters:
`buttons` : int
Bitwise combination of mouse button constants.
:rtype: str
'''
button_names = []
if buttons & LEFT:
button_names.append('LEFT')
if buttons & MIDDLE:
button_names.append('MIDDLE')
if buttons & RIGHT:
button_names.append('RIGHT')
return '|'.join(button_names)
# Symbolic names for the mouse buttons
LEFT = 1 << 0
MIDDLE = 1 << 1
RIGHT = 1 << 2
| bsd-3-clause | d957a3f4ce86b8e8d1d2fdce37c4aee7 | 36.432836 | 78 | 0.672249 | 4.462633 | false | false | false | false |
mattpap/sympy-polys | sympy/solvers/solvers.py | 1 | 28681 | """ This module contain solvers for all kinds of equations:
- algebraic, use solve()
- recurrence, use rsolve()
- differential, use dsolve()
- transcendental, use tsolve()
- nonlinear (numerically), use nsolve()
(you will need a good starting point)
"""
from sympy.core.sympify import sympify
from sympy.core import S, Mul, Add, Pow, Symbol, Wild, Equality
from sympy.core.numbers import ilcm
from sympy.functions import log, exp, LambertW
from sympy.simplify import simplify, collect
from sympy.matrices import Matrix, zeros
from sympy.polys import roots
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import any, all
from sympy.utilities.iterables import iff
from sympy.utilities.lambdify import lambdify
from sympy.mpmath import findroot
from sympy.solvers.polysys import solve_poly_system
from warnings import warn
# Codes for guess solve strategy
GS_POLY = 0
GS_RATIONAL = 1
GS_POLY_CV_1 = 2 # can be converted to a polynomial equation via the change of variable y -> x**a, a real
GS_POLY_CV_2 = 3 # can be converted to a polynomial equation multiplying on both sides by x**m
# for example, x + 1/x == 0. Multiplying by x yields x**2 + x == 0
GS_RATIONAL_CV_1 = 4 # can be converted to a rational equation via the change of variable y -> x**n
GS_PIECEWISE = 5
GS_TRANSCENDENTAL = 6
def guess_solve_strategy(expr, symbol):
"""
Tries to guess what approach should be used to solve a specific equation
Returns
=======
- -1: could not guess
- integer > 0: code representing certain type of equation. See GS_* fields
on this module for a complete list
Examples
========
>>> from sympy import Symbol, Rational
>>> from sympy.solvers.solvers import guess_solve_strategy
>>> from sympy.abc import x
>>> guess_solve_strategy(x**2 + 1, x)
0
>>> guess_solve_strategy(x**Rational(1,2) + 1, x)
2
"""
eq_type = -1
if expr.is_Add:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Mul:
# check for rational functions
num, denom = expr.as_numer_denom()
if denom != 1 and denom.has(symbol):
#we have a quotient
m = max(guess_solve_strategy(num, symbol), guess_solve_strategy(denom, symbol))
if m == GS_POLY:
return GS_RATIONAL
elif m == GS_POLY_CV_1:
return GS_RATIONAL_CV_1
else:
raise NotImplementedError
else:
return max([guess_solve_strategy(i, symbol) for i in expr.args])
elif expr.is_Symbol:
return GS_POLY
elif expr.is_Pow:
if expr.exp.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.exp.has(symbol) and expr.base.has(symbol):
if expr.exp.is_Integer and expr.exp > 0:
eq_type = max(eq_type, GS_POLY)
elif expr.exp.is_Integer and expr.exp < 0:
eq_type = max(eq_type, GS_POLY_CV_2)
elif expr.exp.is_Rational:
eq_type = max(eq_type, GS_POLY_CV_1)
else:
return GS_TRANSCENDENTAL
elif expr.is_Piecewise:
return GS_PIECEWISE
elif expr.is_Function and expr.has(symbol):
return GS_TRANSCENDENTAL
elif not expr.has(symbol):
return GS_POLY
return eq_type
def solve(f, *symbols, **flags):
"""Solves equations and systems of equations.
Currently supported are univariate polynomial, transcendental
equations, piecewise combinations thereof and systems of linear
and polynomial equations. Input is formed as a single expression
or an equation, or an iterable container in case of an equation
system. The type of output may vary and depends heavily on the
input. For more details refer to more problem specific functions.
By default all solutions are simplified to make the output more
readable. If this is not the expected behavior (e.g., because of
speed issues) set simplified=False in function arguments.
To solve equations and systems of equations like recurrence relations
or differential equations, use rsolve() or dsolve(), respectively.
>>> from sympy import I, solve
>>> from sympy.abc import x, y
Solve a polynomial equation:
>>> solve(x**4-1, x)
[1, -1, -I, I]
Solve a linear system:
>>> solve((x+5*y-2, -3*x+6*y-15), x, y)
{x: -3, y: 1}
"""
def sympit(w):
return map(sympify, iff(isinstance(w,(list, tuple, set)), w, [w]))
# make f and symbols into lists of sympified quantities
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
bare_f = not isinstance(f, (list, tuple, set))
f, symbols = (sympit(w) for w in [f, symbols])
for i, fi in enumerate(f):
if isinstance(fi, Equality):
f[i] = fi.lhs - fi.rhs
if not symbols:
#get symbols from equations or supply dummy symbols since
#solve(3,x) returns []...though it seems that it should raise some sort of error TODO
symbols = set([])
for fi in f:
symbols |= fi.atoms(Symbol) or set([Symbol('x',dummy=True)])
symbols = list(symbols)
if bare_f:
f=f[0]
if len(symbols) == 1:
if isinstance(symbols[0], (list, tuple, set)):
symbols = symbols[0]
result = list()
# Begin code handling for Function and Derivative instances
# Basic idea: store all the passed symbols in symbols_passed, check to see
# if any of them are Function or Derivative types, if so, use a dummy
# symbol in their place, and set symbol_swapped = True so that other parts
# of the code can be aware of the swap. Once all swapping is done, the
# continue on with regular solving as usual, and swap back at the end of
# the routine, so that whatever was passed in symbols is what is returned.
symbols_new = []
symbol_swapped = False
symbols_passed = list(symbols)
for i, s in enumerate(symbols):
if s.is_Symbol:
s_new = s
elif s.is_Function:
symbol_swapped = True
s_new = Symbol('F%d' % i, dummy=True)
elif s.is_Derivative:
symbol_swapped = True
s_new = Symbol('D%d' % i, dummy=True)
else:
raise TypeError('not a Symbol or a Function')
symbols_new.append(s_new)
if symbol_swapped:
swap_back_dict = dict(zip(symbols_new, symbols))
# End code for handling of Function and Derivative instances
if not isinstance(f, (tuple, list, set)):
# Create a swap dictionary for storing the passed symbols to be solved
# for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = f.subs(swap_dict)
symbols = symbols_new
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
f = piecewise_fold(f)
if len(symbols) != 1:
result = {}
for s in symbols:
result[s] = solve(f, s, **flags)
if flags.get('simplified', True):
for s, r in result.items():
result[s] = map(simplify, r)
return result
symbol = symbols[0]
strategy = guess_solve_strategy(f, symbol)
if strategy == GS_POLY:
poly = f.as_poly( symbol )
if poly is None:
raise NotImplementedError("Cannot solve equation " + str(f) + " for "
+ str(symbol))
# for cubics and quartics, if the flag wasn't set, DON'T do it
# by default since the results are quite long. Perhaps one could
# base this decision on a certain crtical length of the roots.
if poly.degree > 2:
flags['simplified'] = flags.get('simplified', False)
result = roots(poly, cubics=True, quartics=True).keys()
elif strategy == GS_RATIONAL:
P, Q = f.as_numer_denom()
#TODO: check for Q != 0
result = solve(P, symbol, **flags)
elif strategy == GS_POLY_CV_1:
args = list(f.args)
if isinstance(f, Add):
# we must search for a suitable change of variable
# collect exponents
exponents_denom = list()
for arg in args:
if isinstance(arg, Pow):
exponents_denom.append(arg.exp.q)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
exponents_denom.append(mul_arg.exp.q)
assert len(exponents_denom) > 0
if len(exponents_denom) == 1:
m = exponents_denom[0]
else:
# get the LCM of the denominators
m = reduce(ilcm, exponents_denom)
# x -> y**m.
# we assume positive for simplification purposes
t = Symbol('t', positive=True, dummy=True)
f_ = f.subs(symbol, t**m)
if guess_solve_strategy(f_, t) != GS_POLY:
raise NotImplementedError("Could not convert to a polynomial equation: %s" % f_)
cv_sols = solve(f_, t)
for sol in cv_sols:
result.append(sol**m)
elif isinstance(f, Mul):
for mul_arg in args:
result.extend(solve(mul_arg, symbol))
elif strategy == GS_POLY_CV_2:
m = 0
args = list(f.args)
if isinstance(f, Add):
for arg in args:
if isinstance(arg, Pow):
m = min(m, arg.exp)
elif isinstance(arg, Mul):
for mul_arg in arg.args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
elif isinstance(f, Mul):
for mul_arg in args:
if isinstance(mul_arg, Pow):
m = min(m, mul_arg.exp)
f1 = simplify(f*symbol**(-m))
result = solve(f1, symbol)
# TODO: we might have introduced unwanted solutions
# when multiplied by x**-m
elif strategy == GS_PIECEWISE:
result = set()
for expr, cond in f.args:
candidates = solve(expr, *symbols)
if isinstance(cond, bool) or cond.is_Number:
if not cond:
continue
# Only include solutions that do not match the condition
# of any of the other pieces.
for candidate in candidates:
matches_other_piece = False
for other_expr, other_cond in f.args:
if isinstance(other_cond, bool) \
or other_cond.is_Number:
continue
if bool(other_cond.subs(symbol, candidate)):
matches_other_piece = True
break
if not matches_other_piece:
result.add(candidate)
else:
for candidate in candidates:
if bool(cond.subs(symbol, candidate)):
result.add(candidate)
result = list(result)
elif strategy == GS_TRANSCENDENTAL:
#a, b = f.as_numer_denom()
# Let's throw away the denominator for now. When we have robust
# assumptions, it should be checked, that for the solution,
# b!=0.
result = tsolve(f, *symbols)
elif strategy == -1:
raise ValueError('Could not parse expression %s' % f)
else:
raise NotImplementedError("No algorithms are implemented to solve equation %s" % f)
# This symbol swap should not be necessary for the single symbol case: if you've
# solved for the symbol the it will not appear in the solution. Right now, however
# ode's are getting solutions for solve (even though they shouldn't be -- see the
# swap_back test in test_solvers).
if symbol_swapped:
result = [ri.subs(swap_back_dict) for ri in result]
if flags.get('simplified', True) and strategy != GS_RATIONAL:
return map(simplify, result)
else:
return result
else:
if not f:
return {}
else:
# Create a swap dictionary for storing the passed symbols to be
# solved for, so that they may be swapped back.
if symbol_swapped:
swap_dict = zip(symbols, symbols_new)
f = [fi.subs(swap_dict) for fi in f]
symbols = symbols_new
polys = []
for g in f:
poly = g.as_poly(*symbols)
if poly is not None:
polys.append(poly)
else:
raise NotImplementedError()
if all(p.is_linear for p in polys):
n, m = len(f), len(symbols)
matrix = zeros((n, m + 1))
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = list(monom).index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
soln = solve_linear_system(matrix, *symbols, **flags)
else:
soln = solve_poly_system(polys)
# Use swap_dict to ensure we return the same type as what was
# passed
if symbol_swapped:
if isinstance(soln, dict):
res = {}
for k in soln.keys():
res.update({swap_back_dict[k]: soln[k]})
return res
else:
return soln
else:
return soln
def solve_linear_system(system, *symbols, **flags):
"""Solve system of N linear equations with M variables, which means
both Cramer and over defined systems are supported. The possible
number of solutions is zero, one or infinite. Respectively this
procedure will return None or dictionary with solutions. In the
case of over defined system all arbitrary parameters are skipped.
This may cause situation in with empty dictionary is returned.
In this case it means all symbols can be assigned arbitrary values.
Input to this functions is a Nx(M+1) matrix, which means it has
to be in augmented form. If you are unhappy with such setting
use 'solve' method instead, where you can input equations
explicitly. And don't worry about the matrix, this function
is persistent and will make a local copy of it.
The algorithm used here is fraction free Gaussian elimination,
which results, after elimination, in upper-triangular matrix.
Then solutions are found using back-substitution. This approach
is more efficient and compact than the Gauss-Jordan method.
>>> from sympy import Matrix, solve_linear_system
>>> from sympy.abc import x, y
Solve the following system:
x + 4 y == 2
-2 x + y == 14
>>> system = Matrix(( (1, 4, 2), (-2, 1, 14)))
>>> solve_linear_system(system, x, y)
{x: -6, y: 2}
"""
matrix = system[:,:]
syms = list(symbols)
i, m = 0, matrix.cols-1 # don't count augmentation
while i < matrix.rows:
if i == m:
# an overdetermined system
if any(matrix[i:,m]):
return None # no solutions
else:
# remove trailing rows
matrix = matrix[:i,:]
break
if not matrix[i, i]:
# there is no pivot in current column
# so try to find one in other columns
for k in xrange(i+1, m):
if matrix[i, k]:
break
else:
if matrix[i, m]:
return None # no solutions
else:
# zero row or was a linear combination of
# other rows so now we can safely skip it
matrix.row_del(i)
continue
# we want to change the order of colums so
# the order of variables must also change
syms[i], syms[k] = syms[k], syms[i]
matrix.col_swap(i, k)
pivot_inv = S.One / matrix [i, i]
# divide all elements in the current row by the pivot
matrix.row(i, lambda x, _: x * pivot_inv)
for k in xrange(i+1, matrix.rows):
if matrix[k, i]:
coeff = matrix[k, i]
# subtract from the current row the row containing
# pivot and multiplied by extracted coefficient
matrix.row(k, lambda x, j: simplify(x - matrix[i, j]*coeff))
i += 1
# if there weren't any problems, augmented matrix is now
# in row-echelon form so we can check how many solutions
# there are and extract them using back substitution
simplified = flags.get('simplified', True)
if len(syms) == matrix.rows:
# this system is Cramer equivalent so there is
# exactly one solution to this system of equations
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, m):
content -= matrix[k, j]*solutions[syms[j]]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
elif len(syms) > matrix.rows:
# this system will have infinite number of solutions
# dependent on exactly len(syms) - i parameters
k, solutions = i-1, {}
while k >= 0:
content = matrix[k, m]
# run back-substitution for variables
for j in xrange(k+1, i):
content -= matrix[k, j]*solutions[syms[j]]
# run back-substitution for parameters
for j in xrange(i, m):
content -= matrix[k, j]*syms[j]
if simplified:
solutions[syms[k]] = simplify(content)
else:
solutions[syms[k]] = content
k -= 1
return solutions
else:
return None # no solutions
def solve_undetermined_coeffs(equ, coeffs, sym, **flags):
"""Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both
p, q are univariate polynomials and f depends on k parameters.
The result of this functions is a dictionary with symbolic
values of those parameters with respect to coefficients in q.
This functions accepts both Equations class instances and ordinary
SymPy expressions. Specification of parameters and variable is
obligatory for efficiency and simplicity reason.
>>> from sympy import Eq
>>> from sympy.abc import a, b, c, x
>>> from sympy.solvers import solve_undetermined_coeffs
>>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x)
{a: 1/2, b: -1/2}
>>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x)
{a: 1/c, b: -1/c}
"""
if isinstance(equ, Equality):
# got equation, so move all the
# terms to the left hand side
equ = equ.lhs - equ.rhs
system = collect(equ.expand(), sym, evaluate=False).values()
if not any([ equ.has(sym) for equ in system ]):
# consecutive powers in the input expressions have
# been successfully collected, so solve remaining
# system using Gaussian elimination algorithm
return solve(system, *coeffs, **flags)
else:
return None # no solutions
def solve_linear_system_LU(matrix, syms):
""" LU function works for invertible only """
assert matrix.rows == matrix.cols-1
A = matrix[:matrix.rows,:matrix.rows]
b = matrix[:,matrix.cols-1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i,0]
return solutions
x = Symbol('x', dummy=True)
a,b,c,d,e,f,g,h = [Wild(t, exclude=[x]) for t in 'abcdefgh']
patterns = None
def _generate_patterns():
"""
Generates patterns for transcendental equations.
This is lazily calculated (called) in the tsolve() function and stored in
the patterns global variable.
"""
tmp1 = f ** (h-(c*g/b))
tmp2 = (-e*tmp1/a)**(1/d)
global patterns
patterns = [
(a*(b*x+c)**d + e , ((-(e/a))**(1/d)-c)/b),
( b+c*exp(d*x+e) , (log(-b/c)-e)/d),
(a*x+b+c*exp(d*x+e) , -b/a-LambertW(c*d*exp(e-b*d/a)/a)/d),
( b+c*f**(d*x+e) , (log(-b/c)-e*log(f))/d/log(f)),
(a*x+b+c*f**(d*x+e) , -b/a-LambertW(c*d*f**(e-b*d/a)*log(f)/a)/d/log(f)),
( b+c*log(d*x+e) , (exp(-b/c)-e)/d),
(a*x+b+c*log(d*x+e) , -e/d+c/a*LambertW(a/c/d*exp(-b/c+a*e/c/d))),
(a*(b*x+c)**d + e*f**(g*x+h) , -c/b-d*LambertW(-tmp2*g*log(f)/b/d)/g/log(f))
]
def tsolve(eq, sym):
"""
Solves a transcendental equation with respect to the given
symbol. Various equations containing mixed linear terms, powers,
and logarithms, can be solved.
Only a single solution is returned. This solution is generally
not unique. In some cases, a complex solution may be returned
even though a real solution exists.
>>> from sympy import tsolve, log
>>> from sympy.abc import x
>>> tsolve(3**(2*x+5)-4, x)
[(-5*log(3) + log(4))/(2*log(3))]
>>> tsolve(log(x) + 2*x, x)
[LambertW(2)/2]
"""
if patterns is None:
_generate_patterns()
eq = sympify(eq)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
sym = sympify(sym)
eq2 = eq.subs(sym, x)
# First see if the equation has a linear factor
# In that case, the other factor can contain x in any way (as long as it
# is finite), and we have a direct solution to which we add others that
# may be found for the remaining portion.
r = Wild('r')
m = eq2.match((a*x+b)*r)
if m and m[a]:
return [(-b/a).subs(m).subs(x, sym)] + solve(m[r], x)
for p, sol in patterns:
m = eq2.match(p)
if m:
return [sol.subs(m).subs(x, sym)]
# let's also try to inverse the equation
lhs = eq
rhs = S.Zero
while True:
indep, dep = lhs.as_independent(sym)
# dep + indep == rhs
if lhs.is_Add:
# this indicates we have done it all
if indep is S.Zero:
break
lhs = dep
rhs-= indep
# dep * indep == rhs
else:
# this indicates we have done it all
if indep is S.One:
break
lhs = dep
rhs/= indep
# -1
# f(x) = g -> x = f (g)
if lhs.is_Function and lhs.nargs==1 and hasattr(lhs, 'inverse'):
rhs = lhs.inverse() (rhs)
lhs = lhs.args[0]
sol = solve(lhs-rhs, sym)
return sol
elif lhs.is_Add:
# just a simple case - we do variable substitution for first function,
# and if it removes all functions - let's call solve.
# x -x -1
# UC: e + e = y -> t + t = y
t = Symbol('t', dummy=True)
terms = lhs.args
# find first term which is Function
for f1 in lhs.args:
if f1.is_Function:
break
else:
raise NotImplementedError("Unable to solve the equation" + \
"(tsolve: at least one Function expected at this point")
# perform the substitution
lhs_ = lhs.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not (lhs_.is_Function or
any(term.is_Function for term in lhs_.args)):
cv_sols = solve(lhs_ - rhs, t)
for sol in cv_sols:
if sol.has(sym):
raise NotImplementedError("Unable to solve the equation")
cv_inv = solve( t - f1, sym )[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
return sols
raise NotImplementedError("Unable to solve the equation.")
def msolve(*args, **kwargs):
"""
Compatibility wrapper pointing to nsolve().
msolve() has been renamed to nsolve(), please use nsolve() directly."""
warn('msolve() is has been renamed, please use nsolve() instead',
DeprecationWarning)
args[0], args[1] = args[1], args[0]
return nsolve(*args, **kwargs)
# TODO: option for calculating J numerically
def nsolve(*args, **kwargs):
"""
Solve a nonlinear equation system numerically.
nsolve(f, [args,] x0, modules=['mpmath'], **kwargs)
f is a vector function of symbolic expressions representing the system.
args are the variables. If there is only one variable, this argument can be
omitted.
x0 is a starting vector close to a solution.
Use the modules keyword to specify which modules should be used to evaluate
the function and the Jacobian matrix. Make sure to use a module that
supports matrices. For more information on the syntax, please see the
docstring of lambdify.
Overdetermined systems are supported.
>>> from sympy import Symbol, nsolve
>>> import sympy
>>> sympy.mpmath.mp.dps = 15
>>> x1 = Symbol('x1')
>>> x2 = Symbol('x2')
>>> f1 = 3 * x1**2 - 2 * x2**2 - 1
>>> f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
>>> print nsolve((f1, f2), (x1, x2), (-1, 1))
[-1.19287309935246]
[ 1.27844411169911]
For one-dimensional functions the syntax is simplified:
>>> from sympy import sin, nsolve
>>> from sympy.abc import x
>>> nsolve(sin(x), x, 2)
3.14159265358979
>>> nsolve(sin(x), 2)
3.14159265358979
mpmath.findroot is used, you can find there more extensive documentation,
especially concerning keyword parameters and available solvers.
"""
# interpret arguments
if len(args) == 3:
f = args[0]
fargs = args[1]
x0 = args[2]
elif len(args) == 2:
f = args[0]
fargs = None
x0 = args[1]
elif len(args) < 2:
raise TypeError('nsolve expected at least 2 arguments, got %i'
% len(args))
else:
raise TypeError('nsolve expected at most 3 arguments, got %i'
% len(args))
modules = kwargs.get('modules', ['mpmath'])
if isinstance(f, (list, tuple)):
f = Matrix(f).T
if not isinstance(f, Matrix):
# assume it's a sympy expression
if isinstance(f, Equality):
f = f.lhs - f.rhs
f = f.evalf()
atoms = f.atoms(Symbol)
if fargs is None:
fargs = atoms.copy().pop()
if not (len(atoms) == 1 and (fargs in atoms or fargs[0] in atoms)):
raise ValueError('expected a one-dimensional and numerical function')
# the function is much better behaved if there is no denominator
f = f.as_numer_denom()[0]
f = lambdify(fargs, f, modules)
return findroot(f, x0, **kwargs)
if len(fargs) > f.cols:
raise NotImplementedError('need at least as many equations as variables')
verbose = kwargs.get('verbose', False)
if verbose:
print 'f(x):'
print f
# derive Jacobian
J = f.jacobian(fargs)
if verbose:
print 'J(x):'
print J
# create functions
f = lambdify(fargs, f.T, modules)
J = lambdify(fargs, J, modules)
# solve the system numerically
x = findroot(f, x0, J=J, **kwargs)
return x
| bsd-3-clause | 931301688252a7f296baa826c9c25a91 | 34.102815 | 105 | 0.552948 | 3.938341 | false | false | false | false |
mattpap/sympy-polys | examples/intermediate/mplot3d.py | 6 | 1197 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
from sympy import Basic, sin, Symbol
from sample import sample
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
try:
import pylab as p
import mpl_toolkits.mplot3d as p3
except ImportError:
try:
import matplotlib.axes3d as p3 # older matplotlib
except ImportError:
raise ImportError("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
#ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x,y,z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2-y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
#mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
#mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause | 59969c55749c4adfd975d41297d3adec | 22.470588 | 71 | 0.57477 | 2.803279 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/__init__.py | 5 | 12300 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''pyglet is a cross-platform games and multimedia package.
Detailed documentation is available at http://www.pyglet.org
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 2444 2009-03-22 08:51:01Z Alex.Holkner $'
import os
import sys
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
#: The release version of this pyglet installation.
#:
#: Valid only if pyglet was installed from a source or binary distribution
#: (i.e. not in a checked-out copy from SVN).
#:
#: Use setuptools if you need to check for a specific release version, e.g.::
#:
#: >>> import pyglet
#: >>> from pkg_resources import parse_version
#: >>> parse_version(pyglet.version) >= parse_version('1.1')
#: True
#:
version = '1.1.3'
def _require_ctypes_version(version):
# Check ctypes version
import ctypes
req = [int(i) for i in version.split('.')]
have = [int(i) for i in ctypes.__version__.split('.')]
if not tuple(have) >= tuple(req):
raise ImportError('pyglet requires ctypes %s or later.' % version)
_require_ctypes_version('1.0.0')
_enable_optimisations = not __debug__
if getattr(sys, 'frozen', None):
_enable_optimisations = True
#: Global dict of pyglet options. To change an option from its default, you
#: must import ``pyglet`` before any sub-packages. For example::
#:
#: import pyglet
#: pyglet.options['debug_gl'] = False
#:
#: The default options can be overridden from the OS environment. The
#: corresponding environment variable for each option key is prefaced by
#: ``PYGLET_``. For example, in Bash you can set the ``debug_gl`` option with::
#:
#: PYGLET_DEBUG_GL=True; export PYGLET_DEBUG_GL
#:
#: For options requiring a tuple of values, separate each value with a comma.
#:
#: The non-development options are:
#:
#: audio
#: A sequence of the names of audio modules to attempt to load, in
#: order of preference. Valid driver names are:
#:
#: * directsound, the Windows DirectSound audio module (Windows only)
#: * alsa, the ALSA audio module (Linux only)
#: * openal, the OpenAL audio module
#: * silent, no audio
#: debug_lib
#: If True, prints the path of each dynamic library loaded.
#: debug_gl
#: If True, all calls to OpenGL functions are checked afterwards for
#: errors using ``glGetError``. This will severely impact performance,
#: but provides useful exceptions at the point of failure. By default,
#: this option is enabled if ``__debug__`` is (i.e., if Python was not run
#: with the -O option). It is disabled by default when pyglet is "frozen"
#: within a py2exe or py2app library archive.
#: shadow_window
#: By default, pyglet creates a hidden window with a GL context when
#: pyglet.gl is imported. This allows resources to be loaded before
#: the application window is created, and permits GL objects to be
#: shared between windows even after they've been closed. You can
#: disable the creation of the shadow window by setting this option to
#: False. Recommended for advanced devlopers only.
#:
#: **Since:** pyglet 1.1
#: vsync
#: If set, the `pyglet.window.Window.vsync` property is ignored, and
#: this option overrides it (to either force vsync on or off). If unset,
#: or set to None, the `pyglet.window.Window.vsync` property behaves
#: as documented.
#: xsync
#: If set (the default), pyglet will attempt to synchronise the drawing of
#: double-buffered windows to the border updates of the X11 window
#: manager. This improves the appearance of the window during resize
#: operations. This option only affects double-buffered windows on
#: X11 servers supporting the Xsync extension with a window manager
#: that implements the _NET_WM_SYNC_REQUEST protocol.
#:
#: **Since:** pyglet 1.1
#:
options = {
'audio': ('directsound', 'openal', 'alsa', 'silent'),
'font': ('gdiplus', 'win32'), # ignored outside win32; win32 is deprecated
'debug_font': False,
'debug_gl': not _enable_optimisations,
'debug_gl_trace': False,
'debug_gl_trace_args': False,
'debug_graphics_batch': False,
'debug_lib': False,
'debug_media': False,
'debug_texture': False,
'debug_trace': False,
'debug_trace_args': False,
'debug_trace_depth': 1,
'debug_trace_flush': True,
'debug_win32': False,
'debug_x11': False,
'graphics_vbo': True,
'shadow_window': True,
'vsync': None,
'xsync': True,
}
_option_types = {
'audio': tuple,
'font': tuple,
'debug_font': bool,
'debug_gl': bool,
'debug_gl_trace': bool,
'debug_gl_trace_args': bool,
'debug_graphics_batch': bool,
'debug_lib': bool,
'debug_media': bool,
'debug_texture': bool,
'debug_trace': bool,
'debug_trace_args': bool,
'debug_trace_depth': int,
'debug_trace_flush': bool,
'debug_win32': bool,
'debug_x11': bool,
'graphics_vbo': bool,
'shadow_window': bool,
'vsync': bool,
'xsync': bool,
}
def _read_environment():
'''Read defaults for options from environment'''
for key in options:
env = 'PYGLET_%s' % key.upper()
try:
value = os.environ['PYGLET_%s' % key.upper()]
if _option_types[key] is tuple:
options[key] = value.split(',')
elif _option_types[key] is bool:
options[key] = value in ('true', 'TRUE', 'True', '1')
elif _option_types[key] is int:
options[key] = int(value)
except KeyError:
pass
_read_environment()
if sys.platform == 'cygwin':
# This hack pretends that the posix-like ctypes provides windows
# functionality. COM does not work with this hack, so there is no
# DirectSound support.
import ctypes
ctypes.windll = ctypes.cdll
ctypes.oledll = ctypes.cdll
ctypes.WINFUNCTYPE = ctypes.CFUNCTYPE
ctypes.HRESULT = ctypes.c_long
# Call tracing
# ------------
_trace_filename_abbreviations = {}
def _trace_repr(value, size=40):
value = repr(value)
if len(value) > size:
value = value[:size//2-2] + '...' + value[-size//2-1:]
return value
def _trace_frame(frame, indent):
from pyglet import lib
import os
if frame.f_code is lib._TraceFunction.__call__.func_code:
is_ctypes = True
func = frame.f_locals['self']._func
name = func.__name__
location = '[ctypes]'
else:
is_ctypes = False
code = frame.f_code
name = code.co_name
path = code.co_filename
line = code.co_firstlineno
try:
filename = _trace_filename_abbreviations[path]
except KeyError:
# Trim path down
dir = ''
path, filename = os.path.split(path)
while len(dir + filename) < 30:
filename = os.path.join(dir, filename)
path, dir = os.path.split(path)
if not dir:
filename = os.path.join('', filename)
break
else:
filename = os.path.join('...', filename)
_trace_filename_abbreviations[path] = filename
location = '(%s:%d)' % (filename, line)
if indent:
name = 'Called from %s' % name
print '%s%s %s' % (indent, name, location)
if _trace_args:
if is_ctypes:
args = [_trace_repr(arg) for arg in frame.f_locals['args']]
print ' %sargs=(%s)' % (indent, ', '.join(args))
else:
for argname in code.co_varnames[:code.co_argcount]:
try:
argvalue = _trace_repr(frame.f_locals[argname])
print ' %s%s=%s' % (indent, argname, argvalue)
except:
pass
if _trace_flush:
sys.stdout.flush()
def _trace_func(frame, event, arg):
if event == 'call':
indent = ''
for i in range(_trace_depth):
_trace_frame(frame, indent)
indent += ' '
frame = frame.f_back
if not frame:
break
elif event == 'exception':
(exception, value, traceback) = arg
print 'First chance exception raised:', repr(exception)
def _install_trace():
sys.setprofile(_trace_func)
_trace_args = options['debug_trace_args']
_trace_depth = options['debug_trace_depth']
_trace_flush = options['debug_trace_flush']
if options['debug_trace']:
_install_trace()
# Lazy loading
# ------------
class _ModuleProxy(object):
_module = None
def __init__(self, name):
self.__dict__['_module_name'] = name
def __getattr__(self, name):
try:
return getattr(self._module, name)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
return getattr(module, name)
def __setattr__(self, name, value):
try:
setattr(self._module, name, value)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
setattr(module, name, value)
if not _is_epydoc:
app = _ModuleProxy('app')
clock = _ModuleProxy('clock')
com = _ModuleProxy('com')
event = _ModuleProxy('event')
font = _ModuleProxy('font')
gl = _ModuleProxy('gl')
graphics = _ModuleProxy('graphics')
image = _ModuleProxy('image')
lib = _ModuleProxy('lib')
media = _ModuleProxy('media')
resource = _ModuleProxy('resource')
sprite = _ModuleProxy('sprite')
text = _ModuleProxy('text')
window = _ModuleProxy('window')
# Fool py2exe, py2app into including all top-level modules (doesn't understand
# lazy loading)
if False:
import app
import clock
import com
import event
import font
import gl
import graphics
import image
import lib
import media
import resource
import sprite
import text
import window
# Hack around some epydoc bug that causes it to think pyglet.window is None.
if _is_epydoc:
import window
| bsd-3-clause | 212d08d062d3f389f2f8efe8b2fc1ca2 | 32.884298 | 80 | 0.616016 | 3.82939 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/text/formats/html.py | 5 | 13147 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Decode HTML into attributed text.
A subset of HTML 4.01 Transitional is implemented. The following elements are
supported fully::
B BLOCKQUOTE BR CENTER CODE DD DIR DL EM FONT H1 H2 H3 H4 H5 H6 I IMG KBD
LI MENU OL P PRE Q SAMP STRONG SUB SUP TT U UL VAR
The mark (bullet or number) of a list item is separated from the body of the
list item with a tab, as the pyglet document model does not allow
out-of-stream text. This means lists display as expected, but behave a little
oddly if edited.
No CSS styling is supported.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import HTMLParser
import htmlentitydefs
import os
import re
import pyglet
from pyglet.text.formats import structured
def _hex_color(val):
return [(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff, 255]
_color_names = {
'black': _hex_color(0x000000),
'silver': _hex_color(0xc0c0c0),
'gray': _hex_color(0x808080),
'white': _hex_color(0xffffff),
'maroon': _hex_color(0x800000),
'red': _hex_color(0xff0000),
'purple': _hex_color(0x800080),
'fucsia': _hex_color(0x008000),
'green': _hex_color(0x00ff00),
'lime': _hex_color(0xffff00),
'olive': _hex_color(0x808000),
'yellow': _hex_color(0xff0000),
'navy': _hex_color(0x000080),
'blue': _hex_color(0x0000ff),
'teal': _hex_color(0x008080),
'aqua': _hex_color(0x00ffff),
}
def _parse_color(value):
if value.startswith('#'):
return _hex_color(int(value[1:], 16))
else:
try:
return _color_names[value.lower()]
except KeyError:
raise ValueError()
_whitespace_re = re.compile(u'[\u0020\u0009\u000c\u200b\r\n]+', re.DOTALL)
_metadata_elements = ['head', 'title']
_block_elements = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'ul', 'ol', 'dir', 'menu',
'pre', 'dl', 'div', 'center',
'noscript', 'noframes', 'blockquote', 'form',
'isindex', 'hr', 'table', 'fieldset', 'address',
# Incorrect, but we treat list items as blocks:
'li', 'dd', 'dt', ]
_block_containers = ['_top_block',
'body', 'div', 'center', 'object', 'applet',
'blockquote', 'ins', 'del', 'dd', 'li', 'form',
'fieldset', 'button', 'th', 'td', 'iframe', 'noscript',
'noframes',
# Incorrect, but we treat list items as blocks:
'ul', 'ol', 'dir', 'menu', 'dl']
class HTMLDecoder(HTMLParser.HTMLParser, structured.StructuredTextDecoder):
'''Decoder for HTML documents.
'''
#: Default style attributes for unstyled text in the HTML document.
#:
#: :type: dict
default_style = {
'font_name': 'Times New Roman',
'font_size': 12,
'margin_bottom': '12pt',
}
#: Map HTML font sizes to actual font sizes, in points.
#:
#: :type: dict
font_sizes = {
1: 8,
2: 10,
3: 12,
4: 14,
5: 18,
6: 24,
7: 48
}
def decode_structured(self, text, location):
self.location = location
self._font_size_stack = [3]
self.list_stack.append(structured.UnorderedListBuilder({}))
self.strip_leading_space = True
self.block_begin = True
self.need_block_begin = False
self.element_stack = ['_top_block']
self.in_metadata = False
self.in_pre = False
self.push_style('_default', self.default_style)
self.feed(text)
self.close()
def get_image(self, filename):
return pyglet.image.load(filename, file=self.location.open(filename))
def prepare_for_data(self):
if self.need_block_begin:
self.add_text('\n')
self.block_begin = True
self.need_block_begin = False
def handle_data(self, data):
if self.in_metadata:
return
if self.in_pre:
self.add_text(data)
else:
data = _whitespace_re.sub(' ', data)
if data.strip():
self.prepare_for_data()
if self.block_begin or self.strip_leading_space:
data = data.lstrip()
self.block_begin = False
self.add_text(data)
self.strip_leading_space = data.endswith(' ')
def handle_starttag(self, tag, case_attrs):
if self.in_metadata:
return
element = tag.lower()
attrs = {}
for key, value in case_attrs:
attrs[key.lower()] = value
if element in _metadata_elements:
self.in_metadata = True
elif element in _block_elements:
# Pop off elements until we get to a block container.
while self.element_stack[-1] not in _block_containers:
self.handle_endtag(self.element_stack[-1])
if not self.block_begin:
self.add_text('\n')
self.block_begin = True
self.need_block_begin = False
self.element_stack.append(element)
style = {}
if element in ('b', 'strong'):
style['bold'] = True
elif element in ('i', 'em', 'var'):
style['italic'] = True
elif element in ('tt', 'code', 'samp', 'kbd'):
style['font_name'] = 'Courier New'
elif element == 'u':
color = self.current_style.get('color')
if color is None:
color = [0, 0, 0, 255]
style['underline'] = color
elif element == 'font':
if 'face' in attrs:
style['font_name'] = attrs['face'].split(',')
if 'size' in attrs:
size = attrs['size']
try:
if size.startswith('+'):
size = self._font_size_stack[-1] + int(size[1:])
elif size.startswith('-'):
size = self._font_size_stack[-1] - int(size[1:])
else:
size = int(size)
except ValueError:
size = 3
self._font_size_stack.append(size)
if size in self.font_sizes:
style['font_size'] = self.font_sizes.get(size, 3)
else:
self._font_size_stack.append(self._font_size_stack[-1])
if 'color' in attrs:
try:
style['color'] = _parse_color(attrs['color'])
except ValueError:
pass
elif element == 'sup':
size = self._font_size_stack[-1] - 1
style['font_size'] = self.font_sizes.get(size, 1)
style['baseline'] = '3pt'
elif element == 'sub':
size = self._font_size_stack[-1] - 1
style['font_size'] = self.font_sizes.get(size, 1)
style['baseline'] = '-3pt'
elif element == 'h1':
style['font_size'] = 24
style['bold'] = True
style['align'] = 'center'
elif element == 'h2':
style['font_size'] = 18
style['bold'] = True
elif element == 'h3':
style['font_size'] = 16
style['bold'] = True
elif element == 'h4':
style['font_size'] = 14
style['bold'] = True
elif element == 'h5':
style['font_size'] = 12
style['bold'] = True
elif element == 'h6':
style['font_size'] = 12
style['italic'] = True
elif element == 'br':
self.add_text(u'\u2028')
self.strip_leading_space = True
elif element == 'p':
if attrs.get('align') in ('left', 'center', 'right'):
style['align'] = attrs['align']
elif element == 'center':
style['align'] = 'center'
elif element == 'pre':
style['font_name'] = 'Courier New'
style['margin_bottom'] = 0
self.in_pre = True
elif element == 'blockquote':
left_margin = self.current_style.get('margin_left') or 0
right_margin = self.current_style.get('margin_right') or 0
style['margin_left'] = left_margin + 60
style['margin_right'] = right_margin + 60
elif element == 'q':
self.handle_data(u'\u201c')
elif element == 'ol':
try:
start = int(attrs.get('start', 1))
except ValueError:
start = 1
format = attrs.get('type', '1') + '.'
builder = structured.OrderedListBuilder(start, format)
builder.begin(self, style)
self.list_stack.append(builder)
elif element in ('ul', 'dir', 'menu'):
type = attrs.get('type', 'disc').lower()
if type == 'circle':
mark = u'\u25cb'
elif type == 'square':
mark = u'\u25a1'
else:
mark = u'\u25cf'
builder = structured.UnorderedListBuilder(mark)
builder.begin(self, style)
self.list_stack.append(builder)
elif element == 'li':
self.list_stack[-1].item(self, style)
self.strip_leading_space = True
elif element == 'dl':
style['margin_bottom'] = 0
elif element == 'dd':
left_margin = self.current_style.get('margin_left') or 0
style['margin_left'] = left_margin + 30
elif element == 'img':
image = self.get_image(attrs.get('src'))
if image:
width = attrs.get('width')
if width:
width = int(width)
height = attrs.get('height')
if height:
height = int(height)
self.prepare_for_data()
self.add_element(structured.ImageElement(image, width, height))
self.strip_leading_space = False
self.push_style(element, style)
def handle_endtag(self, tag):
element = tag.lower()
if element not in self.element_stack:
return
self.pop_style(element)
while self.element_stack.pop() != element:
pass
if element in _metadata_elements:
self.in_metadata = False
elif element in _block_elements:
self.block_begin = False
self.need_block_begin = True
if element == 'font' and len(self._font_size_stack) > 1:
self._font_size_stack.pop()
elif element == 'pre':
self.in_pre = False
elif element == 'q':
self.handle_data(u'\u201d')
elif element in ('ul', 'ol'):
if len(self.list_stack) > 1:
self.list_stack.pop()
def handle_entityref(self, name):
if name in htmlentitydefs.name2codepoint:
self.handle_data(unichr(htmlentitydefs.name2codepoint[name]))
def handle_charref(self, name):
name = name.lower()
try:
if name.startswith('x'):
self.handle_data(unichr(int(name[1:], 16)))
else:
self.handle_data(unichr(int(name)))
except ValueError:
pass
| bsd-3-clause | 3af061642ce2f271676adbc52a44e8fd | 35.118132 | 79 | 0.531224 | 3.953985 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/font/base.py | 5 | 14445 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Abstract classes used by pyglet.font implementations.
These classes should not be constructed directly. Instead, use the functions
in `pyglet.font` to obtain platform-specific instances. You can use these
classes as a documented interface to the concrete classes.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: base.py 2278 2008-09-23 12:18:50Z Alex.Holkner $'
import unicodedata
from pyglet.gl import *
from pyglet import image
_other_grapheme_extend = \
map(unichr, [0x09be, 0x09d7, 0x0be3, 0x0b57, 0x0bbe, 0x0bd7, 0x0cc2,
0x0cd5, 0x0cd6, 0x0d3e, 0x0d57, 0x0dcf, 0x0ddf, 0x200c,
0x200d, 0xff9e, 0xff9f]) # skip codepoints above U+10000
_logical_order_exception = \
map(unichr, range(0xe40, 0xe45) + range(0xec0, 0xec4))
_grapheme_extend = lambda c, cc: \
cc in ('Me', 'Mn') or c in _other_grapheme_extend
_CR = u'\u000d'
_LF = u'\u000a'
_control = lambda c, cc: cc in ('ZI', 'Zp', 'Cc', 'Cf') and not \
c in map(unichr, [0x000d, 0x000a, 0x200c, 0x200d])
_extend = lambda c, cc: _grapheme_extend(c, cc) or \
c in map(unichr, [0xe30, 0xe32, 0xe33, 0xe45, 0xeb0, 0xeb2, 0xeb3])
_prepend = lambda c, cc: c in _logical_order_exception
_spacing_mark = lambda c, cc: cc == 'Mc' and c not in _other_grapheme_extend
def _grapheme_break(left, right):
# GB1
if left is None:
return True
# GB2 not required, see end of get_grapheme_clusters
# GB3
if left == _CR and right == LF:
return False
left_cc = unicodedata.category(left)
# GB4
if _control(left, left_cc):
return True
right_cc = unicodedata.category(right)
# GB5
if _control(right, right_cc):
return True
# GB6, GB7, GB8 not implemented
# GB9
if _extend(right, right_cc):
return False
# GB9a
if _spacing_mark(right, right_cc):
return False
# GB9b
if _prepend(left, left_cc):
return False
# GB10
return True
def get_grapheme_clusters(text):
'''Implements Table 2 of UAX #29: Grapheme Cluster Boundaries.
Does not currently implement Hangul syllable rules.
:Parameters:
`text` : unicode
String to cluster.
:since: pyglet 1.1.2
:rtype: List of `unicode`
:return: List of Unicode grapheme clusters
'''
clusters = []
cluster = ''
left = None
for right in text:
if cluster and _grapheme_break(left, right):
clusters.append(cluster)
cluster = ''
elif cluster:
# Add a zero-width space to keep len(clusters) == len(text)
clusters.append(u'\u200b')
cluster += right
left = right
# GB2
if cluster:
clusters.append(cluster)
return clusters
class Glyph(image.TextureRegion):
'''A single glyph located within a larger texture.
Glyphs are drawn most efficiently using the higher level APIs, for example
`GlyphString`.
:Ivariables:
`advance` : int
The horizontal advance of this glyph, in pixels.
`vertices` : (int, int, int, int)
The vertices of this glyph, with (0,0) originating at the
left-side bearing at the baseline.
'''
advance = 0
vertices = (0, 0, 0, 0)
def set_bearings(self, baseline, left_side_bearing, advance):
'''Set metrics for this glyph.
:Parameters:
`baseline` : int
Distance from the bottom of the glyph to its baseline;
typically negative.
`left_side_bearing` : int
Distance to add to the left edge of the glyph.
`advance` : int
Distance to move the horizontal advance to the next glyph.
'''
self.advance = advance
self.vertices = (
left_side_bearing,
-baseline,
left_side_bearing + self.width,
-baseline + self.height)
def draw(self):
'''Debug method.
Use the higher level APIs for performance and kerning.
'''
glBindTexture(GL_TEXTURE_2D, self.owner.id)
glBegin(GL_QUADS)
self.draw_quad_vertices()
glEnd()
def draw_quad_vertices(self):
'''Debug method.
Use the higher level APIs for performance and kerning.
'''
glTexCoord3f(*self.tex_coords[:3])
glVertex2f(self.vertices[0], self.vertices[1])
glTexCoord3f(*self.tex_coords[3:6])
glVertex2f(self.vertices[2], self.vertices[1])
glTexCoord3f(*self.tex_coords[6:9])
glVertex2f(self.vertices[2], self.vertices[3])
glTexCoord3f(*self.tex_coords[9:12])
glVertex2f(self.vertices[0], self.vertices[3])
def get_kerning_pair(self, right_glyph):
'''Not implemented.
'''
return 0
class GlyphTextureAtlas(image.Texture):
'''A texture within which glyphs can be drawn.
'''
region_class = Glyph
x = 0
y = 0
line_height = 0
def apply_blend_state(self):
'''Set the OpenGL blend state for the glyphs in this texture.
'''
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
def fit(self, image):
'''Place `image` within this texture.
:Parameters:
`image` : `pyglet.image.AbstractImage`
Image to place within the texture.
:rtype: `Glyph`
:return: The glyph representing the image from this texture, or None
if the image doesn't fit.
'''
if self.x + image.width > self.width:
self.x = 0
self.y += self.line_height
self.line_height = 0
if self.y + image.height > self.height:
return None
self.line_height = max(self.line_height, image.height)
region = self.get_region(
self.x, self.y, image.width, image.height)
if image.width > 0:
region.blit_into(image, 0, 0, 0)
self.x += image.width + 1
return region
class GlyphRenderer(object):
'''Abstract class for creating glyph images.
'''
def __init__(self, font):
pass
def render(self, text):
raise NotImplementedError('Subclass must override')
class FontException(Exception):
'''Generic exception related to errors from the font module. Typically
these relate to invalid font data.'''
pass
class Font(object):
'''Abstract font class able to produce glyphs.
To construct a font, use `pyglet.font.load`, which will instantiate the
platform-specific font class.
Internally, this class is used by the platform classes to manage the set
of textures into which glyphs are written.
:Ivariables:
`ascent` : int
Maximum ascent above the baseline, in pixels.
`descent` : int
Maximum descent below the baseline, in pixels. Usually negative.
'''
texture_width = 256
texture_height = 256
texture_internalformat = GL_ALPHA
# These should also be set by subclass when known
ascent = 0
descent = 0
glyph_renderer_class = GlyphRenderer
texture_class = GlyphTextureAtlas
def __init__(self):
self.textures = []
self.glyphs = {}
@classmethod
def add_font_data(cls, data):
'''Add font data to the font loader.
This is a class method and affects all fonts loaded. Data must be
some byte string of data, for example, the contents of a TrueType font
file. Subclasses can override this method to add the font data into
the font registry.
There is no way to instantiate a font given the data directly, you
must use `pyglet.font.load` specifying the font name.
'''
pass
@classmethod
def have_font(cls, name):
'''Determine if a font with the given name is installed.
:Parameters:
`name` : str
Name of a font to search for
:rtype: bool
'''
return True
def create_glyph(self, image):
'''Create a glyph using the given image.
This is used internally by `Font` subclasses to add glyph data
to the font. Glyphs are packed within large textures maintained by
`Font`. This method inserts the image into a font texture and returns
a glyph reference; it is up to the subclass to add metadata to the
glyph.
Applications should not use this method directly.
:Parameters:
`image` : `pyglet.image.AbstractImage`
The image to write to the font texture.
:rtype: `Glyph`
'''
glyph = None
for texture in self.textures:
glyph = texture.fit(image)
if glyph:
break
if not glyph:
if image.width > self.texture_width or \
image.height > self.texture_height:
texture = self.texture_class.create_for_size(GL_TEXTURE_2D,
image.width * 2, image.height * 2,
self.texture_internalformat)
self.texture_width = texture.width
self.texture_height = texture.height
else:
texture = self.texture_class.create_for_size(GL_TEXTURE_2D,
self.texture_width, self.texture_height,
self.texture_internalformat)
self.textures.insert(0, texture)
glyph = texture.fit(image)
return glyph
def get_glyphs(self, text):
'''Create and return a list of Glyphs for `text`.
If any characters do not have a known glyph representation in this
font, a substitution will be made.
:Parameters:
`text` : str or unicode
Text to render.
:rtype: list of `Glyph`
'''
glyph_renderer = None
glyphs = [] # glyphs that are committed.
for c in get_grapheme_clusters(unicode(text)):
# Get the glyph for 'c'. Hide tabs (Windows and Linux render
# boxes)
if c == '\t':
c = ' '
if c not in self.glyphs:
if not glyph_renderer:
glyph_renderer = self.glyph_renderer_class(self)
self.glyphs[c] = glyph_renderer.render(c)
glyphs.append(self.glyphs[c])
return glyphs
def get_glyphs_for_width(self, text, width):
'''Return a list of glyphs for `text` that fit within the given width.
If the entire text is larger than 'width', as much as possible will be
used while breaking after a space or zero-width space character. If a
newline is enountered in text, only text up to that newline will be
used. If no break opportunities (newlines or spaces) occur within
`width`, the text up to the first break opportunity will be used (this
will exceed `width`). If there are no break opportunities, the entire
text will be used.
You can assume that each character of the text is represented by
exactly one glyph; so the amount of text "used up" can be determined
by examining the length of the returned glyph list.
:Parameters:
`text` : str or unicode
Text to render.
`width` : int
Maximum width of returned glyphs.
:rtype: list of `Glyph`
:see: `GlyphString`
'''
glyph_renderer = None
glyph_buffer = [] # next glyphs to be added, as soon as a BP is found
glyphs = [] # glyphs that are committed.
for c in text:
if c == '\n':
glyphs += glyph_buffer
break
# Get the glyph for 'c'
if c not in self.glyphs:
if not glyph_renderer:
glyph_renderer = self.glyph_renderer_class(self)
self.glyphs[c] = glyph_renderer.render(c)
glyph = self.glyphs[c]
# Add to holding buffer and measure
glyph_buffer.append(glyph)
width -= glyph.advance
# If over width and have some committed glyphs, finish.
if width <= 0 and len(glyphs) > 0:
break
# If a valid breakpoint, commit holding buffer
if c in u'\u0020\u200b':
glyphs += glyph_buffer
glyph_buffer = []
# If nothing was committed, commit everything (no breakpoints found).
if len(glyphs) == 0:
glyphs = glyph_buffer
return glyphs
| bsd-3-clause | 62be8de6ae9df0f71212d7e8e3e05d9e | 31.680995 | 79 | 0.598131 | 4.070161 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/gl/glu_info.py | 7 | 5690 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Information about version and extensions of current GLU implementation.
Usage::
from pyglet.gl import glu_info
if glu_info.have_extension('GLU_EXT_nurbs_tessellator'):
# ...
If multiple contexts are in use you can use a separate GLUInfo object for each
context. Call `set_active_context` after switching to the desired context for
each GLUInfo::
from pyglet.gl.glu_info import GLUInfo
info = GLUInfo()
info.set_active_context()
if info.have_version(1, 3):
# ...
Note that GLUInfo only returns meaningful information if a context has been
created.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: glu_info.py 1979 2008-03-28 15:23:51Z Alex.Holkner $'
from ctypes import *
import warnings
from pyglet.gl.glu import *
class GLUInfo(object):
'''Information interface for the GLU library.
A default instance is created automatically when the first OpenGL context
is created. You can use the module functions as a convenience for
this default instance's methods.
If you are using more than one context, you must call `set_active_context`
when the context is active for this `GLUInfo` instance.
'''
have_context = False
version = '0.0.0'
extensions = []
_have_info = False
def set_active_context(self):
'''Store information for the currently active context.
This method is called automatically for the default context.
'''
self.have_context = True
if not self._have_info:
self.extensions = \
cast(gluGetString(GLU_EXTENSIONS), c_char_p).value.split()
self.version = cast(gluGetString(GLU_VERSION), c_char_p).value
self._have_info = True
def have_version(self, major, minor=0, release=0):
'''Determine if a version of GLU is supported.
:Parameters:
`major` : int
The major revision number (typically 1).
`minor` : int
The minor revision number.
`release` : int
The release number.
:rtype: bool
:return: True if the requested or a later version is supported.
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
ver = '%s.0.0' % self.version.split(' ', 1)[0]
imajor, iminor, irelease = [int(v) for v in ver.split('.', 3)[:3]]
return imajor > major or \
(imajor == major and iminor > minor) or \
(imajor == major and iminor == minor and irelease >= release)
def get_version(self):
'''Get the current GLU version.
:return: the GLU version
:rtype: str
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.version
def have_extension(self, extension):
'''Determine if a GLU extension is available.
:Parameters:
`extension` : str
The name of the extension to test for, including its
``GLU_`` prefix.
:return: True if the extension is provided by the implementation.
:rtype: bool
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return extension in self.extensions
def get_extensions(self):
'''Get a list of available GLU extensions.
:return: a list of the available extensions.
:rtype: list of str
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.extensions
# Single instance useful for apps with only a single context (or all contexts
# have same GLU driver, common case).
_glu_info = GLUInfo()
set_active_context = _glu_info.set_active_context
have_version = _glu_info.have_version
get_version = _glu_info.get_version
have_extension = _glu_info.have_extension
get_extensions = _glu_info.get_extensions
| bsd-3-clause | b2f6ca23e5ea76e187c13fa4d6ef12cb | 34.5625 | 78 | 0.647276 | 4.287867 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/app/carbon.py | 7 | 5804 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
from pyglet.app import windows, BaseEventLoop
from pyglet.window.carbon import carbon, types, constants, _oscheck
EventLoopTimerProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
kEventDurationForever = ctypes.c_double(constants.kEventDurationForever)
class CarbonEventLoop(BaseEventLoop):
def run(self):
self._setup()
e = ctypes.c_void_p()
event_dispatcher = carbon.GetEventDispatcherTarget()
self._event_loop = event_loop = carbon.GetMainEventLoop()
event_queue = carbon.GetMainEventQueue()
self._timer = timer = ctypes.c_void_p()
idle_event_proc = EventLoopTimerProc(self._timer_proc)
carbon.InstallEventLoopTimer(event_loop,
ctypes.c_double(0.1), #?
kEventDurationForever,
idle_event_proc,
None,
ctypes.byref(timer))
self._force_idle = False
self._allow_polling = True
self.dispatch_event('on_enter')
while not self.has_exit:
if self._force_idle:
duration = 0
else:
duration = kEventDurationForever
if carbon.ReceiveNextEvent(0, None, duration,
True, ctypes.byref(e)) == 0:
carbon.SendEventToEventTarget(e, event_dispatcher)
carbon.ReleaseEvent(e)
# Manual idle event
if carbon.GetNumEventsInQueue(event_queue) == 0 or self._force_idle:
self._force_idle = False
self._timer_proc(timer, None, False)
carbon.RemoveEventLoopTimer(self._timer)
self.dispatch_event('on_exit')
def _stop_polling(self):
carbon.SetEventLoopTimerNextFireTime(self._timer, ctypes.c_double(0.0))
def _enter_blocking(self):
carbon.SetEventLoopTimerNextFireTime(self._timer, ctypes.c_double(0.0))
self._allow_polling = False
def _exit_blocking(self):
self._allow_polling = True
def _timer_proc(self, timer, data, in_events=True):
allow_polling = True
for window in windows:
# Check for live resizing
if window._resizing is not None:
allow_polling = False
old_width, old_height = window._resizing
rect = types.Rect()
carbon.GetWindowBounds(window._window,
constants.kWindowContentRgn,
ctypes.byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
if width != old_width or height != old_height:
window._resizing = width, height
window.switch_to()
window.dispatch_event('on_resize', width, height)
# Check for live dragging
if window._dragging:
allow_polling = False
# Check for deferred recreate
if window._recreate_deferred:
if in_events:
# Break out of ReceiveNextEvent so it can be processed
# in next iteration.
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
else:
# Do it now.
window._recreate_immediate()
sleep_time = self.idle()
if sleep_time is None:
sleep_time = constants.kEventDurationForever
elif sleep_time < 0.01 and allow_polling and self._allow_polling:
# Switch event loop to polling.
if in_events:
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
sleep_time = constants.kEventDurationForever
carbon.SetEventLoopTimerNextFireTime(timer, ctypes.c_double(sleep_time))
| bsd-3-clause | 862dcb6004b12f18ae79f3c74799551c | 39.305556 | 80 | 0.592695 | 4.509713 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/media/drivers/openal/lib_openal.py | 5 | 27903 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for openal
Generated with:
../tools/wraptypes/wrap.py /usr/include/AL/al.h -lopenal -olib_openal.py
.. Hacked to remove non-existant library functions.
TODO add alGetError check.
.. alListener3i and alListeneriv are present in my OS X 10.4 but not another
10.4 user's installation. They've also been removed for compatibility.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: lib_openal.py 2270 2008-09-21 08:01:58Z Alex.Holkner $'
import ctypes
from ctypes import *
import sys
import pyglet.lib
_lib = pyglet.lib.load_library('openal', win32='openal32',
framework='/System/Library/Frameworks/OpenAL.framework')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
AL_API = 0 # /usr/include/AL/al.h:39
ALAPI = 0 # /usr/include/AL/al.h:59
AL_INVALID = -1 # /usr/include/AL/al.h:61
AL_ILLEGAL_ENUM = 0 # /usr/include/AL/al.h:62
AL_ILLEGAL_COMMAND = 0 # /usr/include/AL/al.h:63
ALboolean = c_int # Better return type than c_char, as generated
ALchar = c_char # /usr/include/AL/al.h:73
ALbyte = c_char # /usr/include/AL/al.h:76
ALubyte = c_ubyte # /usr/include/AL/al.h:79
ALshort = c_short # /usr/include/AL/al.h:82
ALushort = c_ushort # /usr/include/AL/al.h:85
ALint = c_int # /usr/include/AL/al.h:88
ALuint = c_uint # /usr/include/AL/al.h:91
ALsizei = c_int # /usr/include/AL/al.h:94
ALenum = c_int # /usr/include/AL/al.h:97
ALfloat = c_float # /usr/include/AL/al.h:100
ALdouble = c_double # /usr/include/AL/al.h:103
ALvoid = None # /usr/include/AL/al.h:106
AL_NONE = 0 # /usr/include/AL/al.h:112
AL_FALSE = 0 # /usr/include/AL/al.h:115
AL_TRUE = 1 # /usr/include/AL/al.h:118
AL_SOURCE_RELATIVE = 514 # /usr/include/AL/al.h:121
AL_CONE_INNER_ANGLE = 4097 # /usr/include/AL/al.h:130
AL_CONE_OUTER_ANGLE = 4098 # /usr/include/AL/al.h:137
AL_PITCH = 4099 # /usr/include/AL/al.h:145
AL_POSITION = 4100 # /usr/include/AL/al.h:157
AL_DIRECTION = 4101 # /usr/include/AL/al.h:160
AL_VELOCITY = 4102 # /usr/include/AL/al.h:163
AL_LOOPING = 4103 # /usr/include/AL/al.h:171
AL_BUFFER = 4105 # /usr/include/AL/al.h:178
AL_GAIN = 4106 # /usr/include/AL/al.h:191
AL_MIN_GAIN = 4109 # /usr/include/AL/al.h:200
AL_MAX_GAIN = 4110 # /usr/include/AL/al.h:209
AL_ORIENTATION = 4111 # /usr/include/AL/al.h:216
AL_SOURCE_STATE = 4112 # /usr/include/AL/al.h:221
AL_INITIAL = 4113 # /usr/include/AL/al.h:222
AL_PLAYING = 4114 # /usr/include/AL/al.h:223
AL_PAUSED = 4115 # /usr/include/AL/al.h:224
AL_STOPPED = 4116 # /usr/include/AL/al.h:225
AL_BUFFERS_QUEUED = 4117 # /usr/include/AL/al.h:230
AL_BUFFERS_PROCESSED = 4118 # /usr/include/AL/al.h:231
AL_SEC_OFFSET = 4132 # /usr/include/AL/al.h:236
AL_SAMPLE_OFFSET = 4133 # /usr/include/AL/al.h:237
AL_BYTE_OFFSET = 4134 # /usr/include/AL/al.h:238
AL_SOURCE_TYPE = 4135 # /usr/include/AL/al.h:246
AL_STATIC = 4136 # /usr/include/AL/al.h:247
AL_STREAMING = 4137 # /usr/include/AL/al.h:248
AL_UNDETERMINED = 4144 # /usr/include/AL/al.h:249
AL_FORMAT_MONO8 = 4352 # /usr/include/AL/al.h:252
AL_FORMAT_MONO16 = 4353 # /usr/include/AL/al.h:253
AL_FORMAT_STEREO8 = 4354 # /usr/include/AL/al.h:254
AL_FORMAT_STEREO16 = 4355 # /usr/include/AL/al.h:255
AL_REFERENCE_DISTANCE = 4128 # /usr/include/AL/al.h:265
AL_ROLLOFF_FACTOR = 4129 # /usr/include/AL/al.h:273
AL_CONE_OUTER_GAIN = 4130 # /usr/include/AL/al.h:282
AL_MAX_DISTANCE = 4131 # /usr/include/AL/al.h:292
AL_FREQUENCY = 8193 # /usr/include/AL/al.h:300
AL_BITS = 8194 # /usr/include/AL/al.h:301
AL_CHANNELS = 8195 # /usr/include/AL/al.h:302
AL_SIZE = 8196 # /usr/include/AL/al.h:303
AL_UNUSED = 8208 # /usr/include/AL/al.h:310
AL_PENDING = 8209 # /usr/include/AL/al.h:311
AL_PROCESSED = 8210 # /usr/include/AL/al.h:312
AL_NO_ERROR = 0 # /usr/include/AL/al.h:316
AL_INVALID_NAME = 40961 # /usr/include/AL/al.h:321
AL_INVALID_ENUM = 40962 # /usr/include/AL/al.h:326
AL_INVALID_VALUE = 40963 # /usr/include/AL/al.h:331
AL_INVALID_OPERATION = 40964 # /usr/include/AL/al.h:336
AL_OUT_OF_MEMORY = 40965 # /usr/include/AL/al.h:342
AL_VENDOR = 45057 # /usr/include/AL/al.h:346
AL_VERSION = 45058 # /usr/include/AL/al.h:347
AL_RENDERER = 45059 # /usr/include/AL/al.h:348
AL_EXTENSIONS = 45060 # /usr/include/AL/al.h:349
AL_DOPPLER_FACTOR = 49152 # /usr/include/AL/al.h:356
AL_DOPPLER_VELOCITY = 49153 # /usr/include/AL/al.h:361
AL_SPEED_OF_SOUND = 49155 # /usr/include/AL/al.h:366
AL_DISTANCE_MODEL = 53248 # /usr/include/AL/al.h:375
AL_INVERSE_DISTANCE = 53249 # /usr/include/AL/al.h:376
AL_INVERSE_DISTANCE_CLAMPED = 53250 # /usr/include/AL/al.h:377
AL_LINEAR_DISTANCE = 53251 # /usr/include/AL/al.h:378
AL_LINEAR_DISTANCE_CLAMPED = 53252 # /usr/include/AL/al.h:379
AL_EXPONENT_DISTANCE = 53253 # /usr/include/AL/al.h:380
AL_EXPONENT_DISTANCE_CLAMPED = 53254 # /usr/include/AL/al.h:381
# /usr/include/AL/al.h:386
alEnable = _lib.alEnable
alEnable.restype = None
alEnable.argtypes = [ALenum]
# /usr/include/AL/al.h:388
alDisable = _lib.alDisable
alDisable.restype = None
alDisable.argtypes = [ALenum]
# /usr/include/AL/al.h:390
alIsEnabled = _lib.alIsEnabled
alIsEnabled.restype = ALboolean
alIsEnabled.argtypes = [ALenum]
# /usr/include/AL/al.h:396
alGetString = _lib.alGetString
alGetString.restype = POINTER(ALchar)
alGetString.argtypes = [ALenum]
# /usr/include/AL/al.h:398
alGetBooleanv = _lib.alGetBooleanv
alGetBooleanv.restype = None
alGetBooleanv.argtypes = [ALenum, POINTER(ALboolean)]
# /usr/include/AL/al.h:400
alGetIntegerv = _lib.alGetIntegerv
alGetIntegerv.restype = None
alGetIntegerv.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:402
alGetFloatv = _lib.alGetFloatv
alGetFloatv.restype = None
alGetFloatv.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:404
alGetDoublev = _lib.alGetDoublev
alGetDoublev.restype = None
alGetDoublev.argtypes = [ALenum, POINTER(ALdouble)]
# /usr/include/AL/al.h:406
alGetBoolean = _lib.alGetBoolean
alGetBoolean.restype = ALboolean
alGetBoolean.argtypes = [ALenum]
# /usr/include/AL/al.h:408
alGetInteger = _lib.alGetInteger
alGetInteger.restype = ALint
alGetInteger.argtypes = [ALenum]
# /usr/include/AL/al.h:410
alGetFloat = _lib.alGetFloat
alGetFloat.restype = ALfloat
alGetFloat.argtypes = [ALenum]
# /usr/include/AL/al.h:412
alGetDouble = _lib.alGetDouble
alGetDouble.restype = ALdouble
alGetDouble.argtypes = [ALenum]
# /usr/include/AL/al.h:419
alGetError = _lib.alGetError
alGetError.restype = ALenum
alGetError.argtypes = []
# /usr/include/AL/al.h:427
alIsExtensionPresent = _lib.alIsExtensionPresent
alIsExtensionPresent.restype = ALboolean
alIsExtensionPresent.argtypes = [POINTER(ALchar)]
# /usr/include/AL/al.h:429
alGetProcAddress = _lib.alGetProcAddress
alGetProcAddress.restype = POINTER(c_void)
alGetProcAddress.argtypes = [POINTER(ALchar)]
# /usr/include/AL/al.h:431
alGetEnumValue = _lib.alGetEnumValue
alGetEnumValue.restype = ALenum
alGetEnumValue.argtypes = [POINTER(ALchar)]
# /usr/include/AL/al.h:450
alListenerf = _lib.alListenerf
alListenerf.restype = None
alListenerf.argtypes = [ALenum, ALfloat]
# /usr/include/AL/al.h:452
alListener3f = _lib.alListener3f
alListener3f.restype = None
alListener3f.argtypes = [ALenum, ALfloat, ALfloat, ALfloat]
# /usr/include/AL/al.h:454
alListenerfv = _lib.alListenerfv
alListenerfv.restype = None
alListenerfv.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:456
alListeneri = _lib.alListeneri
alListeneri.restype = None
alListeneri.argtypes = [ALenum, ALint]
# /usr/include/AL/al.h:458
#alListener3i = _lib.alListener3i
#alListener3i.restype = None
#alListener3i.argtypes = [ALenum, ALint, ALint, ALint]
# /usr/include/AL/al.h:460
#alListeneriv = _lib.alListeneriv
#alListeneriv.restype = None
#alListeneriv.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:465
alGetListenerf = _lib.alGetListenerf
alGetListenerf.restype = None
alGetListenerf.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:467
alGetListener3f = _lib.alGetListener3f
alGetListener3f.restype = None
alGetListener3f.argtypes = [ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)]
# /usr/include/AL/al.h:469
alGetListenerfv = _lib.alGetListenerfv
alGetListenerfv.restype = None
alGetListenerfv.argtypes = [ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:471
alGetListeneri = _lib.alGetListeneri
alGetListeneri.restype = None
alGetListeneri.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:473
alGetListener3i = _lib.alGetListener3i
alGetListener3i.restype = None
alGetListener3i.argtypes = [ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)]
# /usr/include/AL/al.h:475
alGetListeneriv = _lib.alGetListeneriv
alGetListeneriv.restype = None
alGetListeneriv.argtypes = [ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:512
alGenSources = _lib.alGenSources
alGenSources.restype = None
alGenSources.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:515
alDeleteSources = _lib.alDeleteSources
alDeleteSources.restype = None
alDeleteSources.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:518
alIsSource = _lib.alIsSource
alIsSource.restype = ALboolean
alIsSource.argtypes = [ALuint]
# /usr/include/AL/al.h:523
alSourcef = _lib.alSourcef
alSourcef.restype = None
alSourcef.argtypes = [ALuint, ALenum, ALfloat]
# /usr/include/AL/al.h:525
alSource3f = _lib.alSource3f
alSource3f.restype = None
alSource3f.argtypes = [ALuint, ALenum, ALfloat, ALfloat, ALfloat]
# /usr/include/AL/al.h:527
alSourcefv = _lib.alSourcefv
alSourcefv.restype = None
alSourcefv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:529
alSourcei = _lib.alSourcei
alSourcei.restype = None
alSourcei.argtypes = [ALuint, ALenum, ALint]
# /usr/include/AL/al.h:531
#alSource3i = _lib.alSource3i
#alSource3i.restype = None
#alSource3i.argtypes = [ALuint, ALenum, ALint, ALint, ALint]
# /usr/include/AL/al.h:533
#alSourceiv = _lib.alSourceiv
#alSourceiv.restype = None
#alSourceiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:538
alGetSourcef = _lib.alGetSourcef
alGetSourcef.restype = None
alGetSourcef.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:540
alGetSource3f = _lib.alGetSource3f
alGetSource3f.restype = None
alGetSource3f.argtypes = [ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)]
# /usr/include/AL/al.h:542
alGetSourcefv = _lib.alGetSourcefv
alGetSourcefv.restype = None
alGetSourcefv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:544
alGetSourcei = _lib.alGetSourcei
alGetSourcei.restype = None
alGetSourcei.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:546
#alGetSource3i = _lib.alGetSource3i
#alGetSource3i.restype = None
#alGetSource3i.argtypes = [ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)]
# /usr/include/AL/al.h:548
alGetSourceiv = _lib.alGetSourceiv
alGetSourceiv.restype = None
alGetSourceiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:556
alSourcePlayv = _lib.alSourcePlayv
alSourcePlayv.restype = None
alSourcePlayv.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:559
alSourceStopv = _lib.alSourceStopv
alSourceStopv.restype = None
alSourceStopv.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:562
alSourceRewindv = _lib.alSourceRewindv
alSourceRewindv.restype = None
alSourceRewindv.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:565
alSourcePausev = _lib.alSourcePausev
alSourcePausev.restype = None
alSourcePausev.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:572
alSourcePlay = _lib.alSourcePlay
alSourcePlay.restype = None
alSourcePlay.argtypes = [ALuint]
# /usr/include/AL/al.h:575
alSourceStop = _lib.alSourceStop
alSourceStop.restype = None
alSourceStop.argtypes = [ALuint]
# /usr/include/AL/al.h:578
alSourceRewind = _lib.alSourceRewind
alSourceRewind.restype = None
alSourceRewind.argtypes = [ALuint]
# /usr/include/AL/al.h:581
alSourcePause = _lib.alSourcePause
alSourcePause.restype = None
alSourcePause.argtypes = [ALuint]
# /usr/include/AL/al.h:586
alSourceQueueBuffers = _lib.alSourceQueueBuffers
alSourceQueueBuffers.restype = None
alSourceQueueBuffers.argtypes = [ALuint, ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:588
alSourceUnqueueBuffers = _lib.alSourceUnqueueBuffers
alSourceUnqueueBuffers.restype = None
alSourceUnqueueBuffers.argtypes = [ALuint, ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:606
alGenBuffers = _lib.alGenBuffers
alGenBuffers.restype = None
alGenBuffers.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:609
alDeleteBuffers = _lib.alDeleteBuffers
alDeleteBuffers.restype = None
alDeleteBuffers.argtypes = [ALsizei, POINTER(ALuint)]
# /usr/include/AL/al.h:612
alIsBuffer = _lib.alIsBuffer
alIsBuffer.restype = ALboolean
alIsBuffer.argtypes = [ALuint]
# /usr/include/AL/al.h:615
alBufferData = _lib.alBufferData
alBufferData.restype = None
alBufferData.argtypes = [ALuint, ALenum, POINTER(ALvoid), ALsizei, ALsizei]
# /usr/include/AL/al.h:620
alBufferf = _lib.alBufferf
alBufferf.restype = None
alBufferf.argtypes = [ALuint, ALenum, ALfloat]
# /usr/include/AL/al.h:622
alBuffer3f = _lib.alBuffer3f
alBuffer3f.restype = None
alBuffer3f.argtypes = [ALuint, ALenum, ALfloat, ALfloat, ALfloat]
# /usr/include/AL/al.h:624
alBufferfv = _lib.alBufferfv
alBufferfv.restype = None
alBufferfv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:626
alBufferi = _lib.alBufferi
alBufferi.restype = None
alBufferi.argtypes = [ALuint, ALenum, ALint]
# /usr/include/AL/al.h:628
alBuffer3i = _lib.alBuffer3i
alBuffer3i.restype = None
alBuffer3i.argtypes = [ALuint, ALenum, ALint, ALint, ALint]
# /usr/include/AL/al.h:630
alBufferiv = _lib.alBufferiv
alBufferiv.restype = None
alBufferiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:635
alGetBufferf = _lib.alGetBufferf
alGetBufferf.restype = None
alGetBufferf.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:637
alGetBuffer3f = _lib.alGetBuffer3f
alGetBuffer3f.restype = None
alGetBuffer3f.argtypes = [ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)]
# /usr/include/AL/al.h:639
alGetBufferfv = _lib.alGetBufferfv
alGetBufferfv.restype = None
alGetBufferfv.argtypes = [ALuint, ALenum, POINTER(ALfloat)]
# /usr/include/AL/al.h:641
alGetBufferi = _lib.alGetBufferi
alGetBufferi.restype = None
alGetBufferi.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:643
alGetBuffer3i = _lib.alGetBuffer3i
alGetBuffer3i.restype = None
alGetBuffer3i.argtypes = [ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)]
# /usr/include/AL/al.h:645
alGetBufferiv = _lib.alGetBufferiv
alGetBufferiv.restype = None
alGetBufferiv.argtypes = [ALuint, ALenum, POINTER(ALint)]
# /usr/include/AL/al.h:651
alDopplerFactor = _lib.alDopplerFactor
alDopplerFactor.restype = None
alDopplerFactor.argtypes = [ALfloat]
# /usr/include/AL/al.h:653
alDopplerVelocity = _lib.alDopplerVelocity
alDopplerVelocity.restype = None
alDopplerVelocity.argtypes = [ALfloat]
# /usr/include/AL/al.h:655
alSpeedOfSound = _lib.alSpeedOfSound
alSpeedOfSound.restype = None
alSpeedOfSound.argtypes = [ALfloat]
# /usr/include/AL/al.h:657
alDistanceModel = _lib.alDistanceModel
alDistanceModel.restype = None
alDistanceModel.argtypes = [ALenum]
LPALENABLE = CFUNCTYPE(None, ALenum) # /usr/include/AL/al.h:662
LPALDISABLE = CFUNCTYPE(None, ALenum) # /usr/include/AL/al.h:663
LPALISENABLED = CFUNCTYPE(ALboolean, ALenum) # /usr/include/AL/al.h:664
LPALGETSTRING = CFUNCTYPE(POINTER(ALchar), ALenum) # /usr/include/AL/al.h:665
LPALGETBOOLEANV = CFUNCTYPE(None, ALenum, POINTER(ALboolean)) # /usr/include/AL/al.h:666
LPALGETINTEGERV = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:667
LPALGETFLOATV = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:668
LPALGETDOUBLEV = CFUNCTYPE(None, ALenum, POINTER(ALdouble)) # /usr/include/AL/al.h:669
LPALGETBOOLEAN = CFUNCTYPE(ALboolean, ALenum) # /usr/include/AL/al.h:670
LPALGETINTEGER = CFUNCTYPE(ALint, ALenum) # /usr/include/AL/al.h:671
LPALGETFLOAT = CFUNCTYPE(ALfloat, ALenum) # /usr/include/AL/al.h:672
LPALGETDOUBLE = CFUNCTYPE(ALdouble, ALenum) # /usr/include/AL/al.h:673
LPALGETERROR = CFUNCTYPE(ALenum) # /usr/include/AL/al.h:674
LPALISEXTENSIONPRESENT = CFUNCTYPE(ALboolean, POINTER(ALchar)) # /usr/include/AL/al.h:675
LPALGETPROCADDRESS = CFUNCTYPE(POINTER(c_void), POINTER(ALchar)) # /usr/include/AL/al.h:676
LPALGETENUMVALUE = CFUNCTYPE(ALenum, POINTER(ALchar)) # /usr/include/AL/al.h:677
LPALLISTENERF = CFUNCTYPE(None, ALenum, ALfloat) # /usr/include/AL/al.h:678
LPALLISTENER3F = CFUNCTYPE(None, ALenum, ALfloat, ALfloat, ALfloat) # /usr/include/AL/al.h:679
LPALLISTENERFV = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:680
LPALLISTENERI = CFUNCTYPE(None, ALenum, ALint) # /usr/include/AL/al.h:681
LPALLISTENER3I = CFUNCTYPE(None, ALenum, ALint, ALint, ALint) # /usr/include/AL/al.h:682
LPALLISTENERIV = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:683
LPALGETLISTENERF = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:684
LPALGETLISTENER3F = CFUNCTYPE(None, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)) # /usr/include/AL/al.h:685
LPALGETLISTENERFV = CFUNCTYPE(None, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:686
LPALGETLISTENERI = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:687
LPALGETLISTENER3I = CFUNCTYPE(None, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)) # /usr/include/AL/al.h:688
LPALGETLISTENERIV = CFUNCTYPE(None, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:689
LPALGENSOURCES = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:690
LPALDELETESOURCES = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:691
LPALISSOURCE = CFUNCTYPE(ALboolean, ALuint) # /usr/include/AL/al.h:692
LPALSOURCEF = CFUNCTYPE(None, ALuint, ALenum, ALfloat) # /usr/include/AL/al.h:693
LPALSOURCE3F = CFUNCTYPE(None, ALuint, ALenum, ALfloat, ALfloat, ALfloat) # /usr/include/AL/al.h:694
LPALSOURCEFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:695
LPALSOURCEI = CFUNCTYPE(None, ALuint, ALenum, ALint) # /usr/include/AL/al.h:696
LPALSOURCE3I = CFUNCTYPE(None, ALuint, ALenum, ALint, ALint, ALint) # /usr/include/AL/al.h:697
LPALSOURCEIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:698
LPALGETSOURCEF = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:699
LPALGETSOURCE3F = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)) # /usr/include/AL/al.h:700
LPALGETSOURCEFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:701
LPALGETSOURCEI = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:702
LPALGETSOURCE3I = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)) # /usr/include/AL/al.h:703
LPALGETSOURCEIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:704
LPALSOURCEPLAYV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:705
LPALSOURCESTOPV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:706
LPALSOURCEREWINDV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:707
LPALSOURCEPAUSEV = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:708
LPALSOURCEPLAY = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:709
LPALSOURCESTOP = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:710
LPALSOURCEREWIND = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:711
LPALSOURCEPAUSE = CFUNCTYPE(None, ALuint) # /usr/include/AL/al.h:712
LPALSOURCEQUEUEBUFFERS = CFUNCTYPE(None, ALuint, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:713
LPALSOURCEUNQUEUEBUFFERS = CFUNCTYPE(None, ALuint, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:714
LPALGENBUFFERS = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:715
LPALDELETEBUFFERS = CFUNCTYPE(None, ALsizei, POINTER(ALuint)) # /usr/include/AL/al.h:716
LPALISBUFFER = CFUNCTYPE(ALboolean, ALuint) # /usr/include/AL/al.h:717
LPALBUFFERDATA = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALvoid), ALsizei, ALsizei) # /usr/include/AL/al.h:718
LPALBUFFERF = CFUNCTYPE(None, ALuint, ALenum, ALfloat) # /usr/include/AL/al.h:719
LPALBUFFER3F = CFUNCTYPE(None, ALuint, ALenum, ALfloat, ALfloat, ALfloat) # /usr/include/AL/al.h:720
LPALBUFFERFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:721
LPALBUFFERI = CFUNCTYPE(None, ALuint, ALenum, ALint) # /usr/include/AL/al.h:722
LPALBUFFER3I = CFUNCTYPE(None, ALuint, ALenum, ALint, ALint, ALint) # /usr/include/AL/al.h:723
LPALBUFFERIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:724
LPALGETBUFFERF = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:725
LPALGETBUFFER3F = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat), POINTER(ALfloat), POINTER(ALfloat)) # /usr/include/AL/al.h:726
LPALGETBUFFERFV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALfloat)) # /usr/include/AL/al.h:727
LPALGETBUFFERI = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:728
LPALGETBUFFER3I = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint), POINTER(ALint), POINTER(ALint)) # /usr/include/AL/al.h:729
LPALGETBUFFERIV = CFUNCTYPE(None, ALuint, ALenum, POINTER(ALint)) # /usr/include/AL/al.h:730
LPALDOPPLERFACTOR = CFUNCTYPE(None, ALfloat) # /usr/include/AL/al.h:731
LPALDOPPLERVELOCITY = CFUNCTYPE(None, ALfloat) # /usr/include/AL/al.h:732
LPALSPEEDOFSOUND = CFUNCTYPE(None, ALfloat) # /usr/include/AL/al.h:733
LPALDISTANCEMODEL = CFUNCTYPE(None, ALenum) # /usr/include/AL/al.h:734
__all__ = ['AL_API', 'ALAPI', 'AL_INVALID', 'AL_ILLEGAL_ENUM',
'AL_ILLEGAL_COMMAND', 'ALboolean', 'ALchar', 'ALbyte', 'ALubyte', 'ALshort',
'ALushort', 'ALint', 'ALuint', 'ALsizei', 'ALenum', 'ALfloat', 'ALdouble',
'ALvoid', 'AL_NONE', 'AL_FALSE', 'AL_TRUE', 'AL_SOURCE_RELATIVE',
'AL_CONE_INNER_ANGLE', 'AL_CONE_OUTER_ANGLE', 'AL_PITCH', 'AL_POSITION',
'AL_DIRECTION', 'AL_VELOCITY', 'AL_LOOPING', 'AL_BUFFER', 'AL_GAIN',
'AL_MIN_GAIN', 'AL_MAX_GAIN', 'AL_ORIENTATION', 'AL_SOURCE_STATE',
'AL_INITIAL', 'AL_PLAYING', 'AL_PAUSED', 'AL_STOPPED', 'AL_BUFFERS_QUEUED',
'AL_BUFFERS_PROCESSED', 'AL_SEC_OFFSET', 'AL_SAMPLE_OFFSET', 'AL_BYTE_OFFSET',
'AL_SOURCE_TYPE', 'AL_STATIC', 'AL_STREAMING', 'AL_UNDETERMINED',
'AL_FORMAT_MONO8', 'AL_FORMAT_MONO16', 'AL_FORMAT_STEREO8',
'AL_FORMAT_STEREO16', 'AL_REFERENCE_DISTANCE', 'AL_ROLLOFF_FACTOR',
'AL_CONE_OUTER_GAIN', 'AL_MAX_DISTANCE', 'AL_FREQUENCY', 'AL_BITS',
'AL_CHANNELS', 'AL_SIZE', 'AL_UNUSED', 'AL_PENDING', 'AL_PROCESSED',
'AL_NO_ERROR', 'AL_INVALID_NAME', 'AL_INVALID_ENUM', 'AL_INVALID_VALUE',
'AL_INVALID_OPERATION', 'AL_OUT_OF_MEMORY', 'AL_VENDOR', 'AL_VERSION',
'AL_RENDERER', 'AL_EXTENSIONS', 'AL_DOPPLER_FACTOR', 'AL_DOPPLER_VELOCITY',
'AL_SPEED_OF_SOUND', 'AL_DISTANCE_MODEL', 'AL_INVERSE_DISTANCE',
'AL_INVERSE_DISTANCE_CLAMPED', 'AL_LINEAR_DISTANCE',
'AL_LINEAR_DISTANCE_CLAMPED', 'AL_EXPONENT_DISTANCE',
'AL_EXPONENT_DISTANCE_CLAMPED', 'alEnable', 'alDisable', 'alIsEnabled',
'alGetString', 'alGetBooleanv', 'alGetIntegerv', 'alGetFloatv',
'alGetDoublev', 'alGetBoolean', 'alGetInteger', 'alGetFloat', 'alGetDouble',
'alGetError', 'alIsExtensionPresent', 'alGetProcAddress', 'alGetEnumValue',
'alListenerf', 'alListener3f', 'alListenerfv', 'alListeneri', 'alListener3i',
'alListeneriv', 'alGetListenerf', 'alGetListener3f', 'alGetListenerfv',
'alGetListeneri', 'alGetListener3i', 'alGetListeneriv', 'alGenSources',
'alDeleteSources', 'alIsSource', 'alSourcef', 'alSource3f', 'alSourcefv',
'alSourcei', 'alSource3i', 'alSourceiv', 'alGetSourcef', 'alGetSource3f',
'alGetSourcefv', 'alGetSourcei', 'alGetSource3i', 'alGetSourceiv',
'alSourcePlayv', 'alSourceStopv', 'alSourceRewindv', 'alSourcePausev',
'alSourcePlay', 'alSourceStop', 'alSourceRewind', 'alSourcePause',
'alSourceQueueBuffers', 'alSourceUnqueueBuffers', 'alGenBuffers',
'alDeleteBuffers', 'alIsBuffer', 'alBufferData', 'alBufferf', 'alBuffer3f',
'alBufferfv', 'alBufferi', 'alBuffer3i', 'alBufferiv', 'alGetBufferf',
'alGetBuffer3f', 'alGetBufferfv', 'alGetBufferi', 'alGetBuffer3i',
'alGetBufferiv', 'alDopplerFactor', 'alDopplerVelocity', 'alSpeedOfSound',
'alDistanceModel', 'LPALENABLE', 'LPALDISABLE', 'LPALISENABLED',
'LPALGETSTRING', 'LPALGETBOOLEANV', 'LPALGETINTEGERV', 'LPALGETFLOATV',
'LPALGETDOUBLEV', 'LPALGETBOOLEAN', 'LPALGETINTEGER', 'LPALGETFLOAT',
'LPALGETDOUBLE', 'LPALGETERROR', 'LPALISEXTENSIONPRESENT',
'LPALGETPROCADDRESS', 'LPALGETENUMVALUE', 'LPALLISTENERF', 'LPALLISTENER3F',
'LPALLISTENERFV', 'LPALLISTENERI', 'LPALLISTENER3I', 'LPALLISTENERIV',
'LPALGETLISTENERF', 'LPALGETLISTENER3F', 'LPALGETLISTENERFV',
'LPALGETLISTENERI', 'LPALGETLISTENER3I', 'LPALGETLISTENERIV',
'LPALGENSOURCES', 'LPALDELETESOURCES', 'LPALISSOURCE', 'LPALSOURCEF',
'LPALSOURCE3F', 'LPALSOURCEFV', 'LPALSOURCEI', 'LPALSOURCE3I', 'LPALSOURCEIV',
'LPALGETSOURCEF', 'LPALGETSOURCE3F', 'LPALGETSOURCEFV', 'LPALGETSOURCEI',
'LPALGETSOURCE3I', 'LPALGETSOURCEIV', 'LPALSOURCEPLAYV', 'LPALSOURCESTOPV',
'LPALSOURCEREWINDV', 'LPALSOURCEPAUSEV', 'LPALSOURCEPLAY', 'LPALSOURCESTOP',
'LPALSOURCEREWIND', 'LPALSOURCEPAUSE', 'LPALSOURCEQUEUEBUFFERS',
'LPALSOURCEUNQUEUEBUFFERS', 'LPALGENBUFFERS', 'LPALDELETEBUFFERS',
'LPALISBUFFER', 'LPALBUFFERDATA', 'LPALBUFFERF', 'LPALBUFFER3F',
'LPALBUFFERFV', 'LPALBUFFERI', 'LPALBUFFER3I', 'LPALBUFFERIV',
'LPALGETBUFFERF', 'LPALGETBUFFER3F', 'LPALGETBUFFERFV', 'LPALGETBUFFERI',
'LPALGETBUFFER3I', 'LPALGETBUFFERIV', 'LPALDOPPLERFACTOR',
'LPALDOPPLERVELOCITY', 'LPALSPEEDOFSOUND', 'LPALDISTANCEMODEL']
| bsd-3-clause | 2677964b8e1a835152c2148e1a173b3c | 41.6 | 131 | 0.740422 | 2.579074 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/__init__.py | 10 | 1047 | """Thirdparty Packages for internal use.
"""
import sys
import os
def import_thirdparty(lib):
"""
Imports a thirdparty package "lib" by setting all paths correctly.
At the moment, there is only the "pyglet" library, so we just put
pyglet to sys.path temporarily, then import "lib" and then restore the path.
With more packages, we'll just put them to sys.path as well.
"""
seen = set()
def new_import(name, globals={}, locals={}, fromlist=[]):
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname( \
__file__)), "pyglet"))
try:
m = old_import(name, globals, locals, fromlist)
finally:
del sys.path[0]
return m
import __builtin__
old_import = __builtin__.__import__
__builtin__.__import__ = new_import
try:
m = __import__(lib)
finally:
__builtin__.__import__ = old_import
return m
| bsd-3-clause | 964f5c7cc2e5d251c445798775bb5050 | 28.083333 | 80 | 0.588348 | 3.892193 | false | false | false | false |
mattpap/sympy-polys | sympy/polys/tests/test_polyclasses.py | 2 | 15621 | """Tests for OO layer of several polynomial representations. """
from sympy.polys.polyclasses import (
GFP, init_normal_GFP,
DUP, init_normal_DUP,
DMP, init_normal_DMP,
SDP, init_normal_SDP,
DMF, init_normal_DMF,
ANP, init_normal_ANP,
)
from sympy.polys.algebratools import ZZ, QQ
from sympy.polys.specialpolys import f_4
from sympy.polys.polyerrors import (
ExactQuotientFailed,
)
from sympy import raises
def test_DUP___init__():
f = DUP([0,0,1,2,3], ZZ)
assert f.rep == [1,2,3]
assert f.dom == ZZ
f = DUP({2: QQ(1), 0: QQ(1)}, QQ)
assert f.rep == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
f = DUP(1, QQ)
assert f.rep == [QQ(1)]
assert f.dom == QQ
def test_DUP___eq__():
assert DUP([ZZ(1),ZZ(2),ZZ(3)], ZZ) == \
DUP([ZZ(1),ZZ(2),ZZ(3)], ZZ)
assert DUP([QQ(1),QQ(2),QQ(3)], QQ) == \
DUP([ZZ(1),ZZ(2),ZZ(3)], ZZ)
assert DUP([ZZ(1),ZZ(2),ZZ(3)], ZZ) == \
DUP([QQ(1),QQ(2),QQ(3)], QQ)
assert DUP([ZZ(1),ZZ(2),ZZ(4)], ZZ) != \
DUP([ZZ(1),ZZ(2),ZZ(3)], ZZ)
assert DUP([QQ(1),QQ(2),QQ(4)], QQ) != \
DUP([ZZ(1),ZZ(2),ZZ(3)], ZZ)
def test_DUP___bool__():
assert bool(DUP([], ZZ)) == False
assert bool(DUP([1], ZZ)) == True
def test_DUP_to_dict():
f = DUP([3,0,0,2,0,0,0,0,8], ZZ)
assert f.to_dict() == \
{8: 3, 5: 2, 0: 8}
assert f.to_sympy_dict() == \
{8: ZZ.to_sympy(3), 5: ZZ.to_sympy(2), 0: ZZ.to_sympy(8)}
def test_DUP_properties():
assert DUP([QQ(0)], QQ).is_zero == True
assert DUP([QQ(1)], QQ).is_zero == False
assert DUP([QQ(1)], QQ).is_one == True
assert DUP([QQ(2)], QQ).is_one == False
assert DUP([1], ZZ).is_ground == True
assert DUP([1,2,1], ZZ).is_ground == False
assert DUP([1,2,2], ZZ).is_sqf == True
assert DUP([1,2,1], ZZ).is_sqf == False
assert DUP([1,2,3], ZZ).is_monic == True
assert DUP([2,2,3], ZZ).is_monic == False
assert DUP([1,2,3], ZZ).is_primitive == True
assert DUP([2,4,6], ZZ).is_primitive == False
def test_DUP_arithmetics():
f = DUP([1,1,1], ZZ)
assert f.add_term(2, 1) == DUP([1,3,1], ZZ)
assert f.sub_term(2, 1) == DUP([1,-1,1], ZZ)
assert f.mul_term(2, 1) == DUP([2,2,2,0], ZZ)
raises(TypeError, "f.add_term(2, 'x')")
raises(TypeError, "f.sub_term(2, 'x')")
raises(TypeError, "f.mul_term(2, 'x')")
g = DUP([3,3,3], ZZ)
assert f.mul_ground(3) == g
assert g.exquo_ground(3) == f
raises(ExactQuotientFailed, "f.quo_ground(4)")
f = DUP([1,-2,3,-4], ZZ)
g = DUP([1,2,3,4], ZZ)
h = DUP([-1,2,-3,4], ZZ)
assert f.abs() == g
assert abs(f) == g
assert f.neg() == h
assert -f == h
h = DUP([2,0,6,0], ZZ)
assert f.add(g) == h
assert f + g == h
assert g + f == h
h = DUP([1,-2,3,1], ZZ)
assert f + 5 == h
assert 5 + f == h
h = DUP([-4,0,-8], ZZ)
assert f.sub(g) == h
assert f - g == h
assert g - f == -h
h = DUP([1,-2,3,-9], ZZ)
assert f - 5 == h
assert 5 - f == -h
g = DUP([2], ZZ)
h = DUP([2,-4,6,-8], ZZ)
assert f.mul(g) == h
assert f * g == h
assert g * f == h
assert f * 2 == h
assert 2 * f == h
h = DUP([4], ZZ)
assert g.sqr() == h
assert g.pow(2) == h
assert g**2 == h
raises(TypeError, "f.pow('x')")
f = DUP([3,1,1,5], ZZ)
g = DUP([5,-3,1], ZZ)
q = DUP([15, 14], ZZ)
r = DUP([52, 111], ZZ)
assert f.pdiv(g) == (q, r)
assert f.pexquo(g) == q
assert f.prem(g) == r
raises(ExactQuotientFailed, 'f.pquo(g)')
q, r = DUP([], ZZ), f
assert f.div(g) == (q, r)
assert f.exquo(g) == q
assert f.rem(g) == r
raises(ExactQuotientFailed, 'f.quo(g)')
def test_DUP_functionality():
f = DUP([1,2,3,4], ZZ)
g = DUP([3,4,3], ZZ)
assert f.degree() == 3
assert f.LC() == ZZ(1)
assert f.TC() == ZZ(4)
assert f.nth(1) == ZZ(3)
raises(TypeError, "f.nth('x')")
assert f.max_norm() == ZZ(4)
assert f.l1_norm() == ZZ(10)
assert f.diff(1) == g
assert f.eval(1) == ZZ(10)
raises(TypeError, "f.diff('x')")
f = DUP([QQ(2),QQ(0)], QQ)
g = DUP([QQ(1),QQ(0),QQ(-16)], QQ)
s = DUP([QQ(1,32),QQ(0)], QQ)
t = DUP([QQ(-1,16)], QQ)
h = DUP([QQ(1)], QQ)
assert f.half_gcdex(g) == (s, h)
assert f.gcdex(g) == (s, t, h)
assert f.invert(g) == s
f = DUP([1,-2,1], ZZ)
g = DUP([1,0,-1], ZZ)
a = DUP([2,-2], ZZ)
assert f.subresultants(g) == [f, g, a]
assert f.resultant(g) == 0
f = DUP([1,3,9,-13], ZZ)
assert f.discriminant() == -11664
f = DUP([1,2,1], ZZ)
g = DUP([1,1], ZZ)
h = DUP([1], ZZ)
assert f.cofactors(g) == (g, g, h)
assert f.gcd(g) == g
assert f.lcm(g) == f
assert f.sqf_part() == g
assert f.sqf_list() == (ZZ(1), [(g, 2)])
f = DUP([1,2,3,4,5,6], ZZ)
assert f.trunc(3) == DUP([1,-1,0,1,-1,0], ZZ)
f = DUP([QQ(3),QQ(-6)], QQ)
g = DUP([QQ(1),QQ(-2)], QQ)
assert f.monic() == g
f = DUP([3,-6], ZZ)
g = DUP([1,-2], ZZ)
assert f.content() == ZZ(3)
assert f.primitive() == (ZZ(3), g)
f = DUP([1,0,20,0,150,0,500,0,625,-2,0,-10,9], ZZ)
g = DUP([1,0,0,-2,9], ZZ)
h = DUP([1,0,5,0], ZZ)
assert g.compose(h) == f
assert f.decompose() == [g, h]
f = DUP([QQ(1),QQ(0)], QQ)
assert f.sturm() == [f, DUP([QQ(1)], QQ)]
def test_DMP___init__():
f = DMP([[0],[],[0,1,2],[3]], ZZ)
assert f.rep == [[1,2],[3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP([[1,2],[3]], ZZ, 1)
assert f.rep == [[1,2],[3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP({(1,1): 1, (0,0): 2}, ZZ, 1)
assert f.rep == [[1,0],[2]]
assert f.dom == ZZ
assert f.lev == 1
def test_DMP___eq__():
assert DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ) == \
DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ)
assert DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ) == \
DMP([[QQ(1),QQ(2)],[QQ(3)]], QQ)
assert DMP([[QQ(1),QQ(2)],[QQ(3)]], QQ) == \
DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ)
assert DMP([[[ZZ(1)]]], ZZ) != DMP([[ZZ(1)]], ZZ)
assert DMP([[ZZ(1)]], ZZ) != DMP([[[ZZ(1)]]], ZZ)
def test_DMP___bool__():
assert bool(DMP([[]], ZZ)) == False
assert bool(DMP([[1]], ZZ)) == True
def test_DUP_to_dict():
f = DMP([[3],[],[2],[],[8]], ZZ)
assert f.to_dict() == \
{(4, 0): 3, (2, 0): 2, (0, 0): 8}
assert f.to_sympy_dict() == \
{(4, 0): ZZ.to_sympy(3), (2, 0): ZZ.to_sympy(2), (0, 0): ZZ.to_sympy(8)}
def test_DMP_properties():
assert DMP([[]], ZZ).is_zero == True
assert DMP([[1]], ZZ).is_zero == False
assert DMP([[1]], ZZ).is_one == True
assert DMP([[2]], ZZ).is_one == False
assert DUP([[1]], ZZ).is_ground == True
assert DUP([[1],[2],[1]], ZZ).is_ground == False
assert DMP([[1],[2,0],[1,0]], ZZ).is_sqf == True
assert DMP([[1],[2,0],[1,0,0]], ZZ).is_sqf == False
assert DMP([[1,2],[3]], ZZ).is_monic == True
assert DMP([[2,2],[3]], ZZ).is_monic == False
assert DMP([[1,2],[3]], ZZ).is_primitive == True
assert DMP([[2,4],[6]], ZZ).is_primitive == False
def test_DMP_arithmetics():
f = DMP([[2],[2,0]], ZZ)
assert f.mul_ground(2) == DMP([[4],[4,0]], ZZ)
assert f.exquo_ground(2) == DMP([[1],[1,0]], ZZ)
raises(ExactQuotientFailed, 'f.quo_ground(3)')
f = DMP([[-5]], ZZ)
g = DMP([[5]], ZZ)
assert f.abs() == g
assert abs(f) == g
assert g.neg() == f
assert -g == f
h = DMP([[]], ZZ)
assert f.add(g) == h
assert f + g == h
assert g + f == h
assert f + 5 == h
assert 5 + f == h
h = DMP([[-10]], ZZ)
assert f.sub(g) == h
assert f - g == h
assert g - f == -h
assert f - 5 == h
assert 5 - f == -h
h = DMP([[-25]], ZZ)
assert f.mul(g) == h
assert f * g == h
assert g * f == h
assert f * 5 == h
assert 5 * f == h
h = DMP([[25]], ZZ)
assert f.sqr() == h
assert f.pow(2) == h
assert f**2 == h
raises(TypeError, "f.pow('x')")
f = DMP([[1],[],[1,0,0]], ZZ)
g = DMP([[2],[-2,0]], ZZ)
q = DMP([[2],[2,0]], ZZ)
r = DMP([[8,0,0]], ZZ)
assert f.pdiv(g) == (q, r)
assert f.pexquo(g) == q
assert f.prem(g) == r
raises(ExactQuotientFailed, 'f.pquo(g)')
f = DMP([[1],[],[1,0,0]], ZZ)
g = DMP([[1],[-1,0]], ZZ)
q = DMP([[1],[1,0]], ZZ)
r = DMP([[2,0,0]], ZZ)
assert f.div(g) == (q, r)
assert f.exquo(g) == q
assert f.rem(g) == r
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
raises(ExactQuotientFailed, 'f.quo(g)')
def test_DMP_functionality():
f = DMP([[1],[2,0],[1,0,0]], ZZ)
g = DMP([[1],[1,0]], ZZ)
h = DMP([[1]], ZZ)
assert f.degree() == 2
assert f.degree_list() == (2, 2)
assert f.total_degree() == 4
assert f.LC() == ZZ(1)
assert f.TC() == ZZ(0)
assert f.nth(1, 1) == ZZ(2)
raises(TypeError, "f.nth(0, 'x')")
assert f.max_norm() == 2
assert f.l1_norm() == 4
u = DMP([[2],[2,0]], ZZ)
assert f.diff(m=1, j=0) == u
assert f.diff(m=1, j=1) == u
raises(TypeError, "f.diff(m='x', j=0)")
u = DMP([1,2,1], ZZ)
v = DMP([1,2,1], ZZ)
assert f.eval(a=1, j=0) == u
assert f.eval(a=1, j=1) == v
assert f.eval(1).eval(1) == ZZ(4)
assert f.cofactors(g) == (g, g, h)
assert f.gcd(g) == g
assert f.lcm(g) == f
u = DMP([[QQ(45),QQ(30),QQ(5)]], QQ)
v = DMP([[QQ(1),QQ(2,3),QQ(1,9)]], QQ)
assert u.monic() == v
assert (4*f).content() == ZZ(4)
assert (4*f).primitive() == (ZZ(4), f)
f = DMP([[1],[2],[3],[4],[5],[6]], ZZ)
assert f.trunc(3) == DMP([[1],[-1],[],[1],[-1],[]], ZZ)
f = DMP(f_4, ZZ)
assert f.sqf_part() == -f
assert f.sqf_list() == (ZZ(-1), [(-f, 1)])
f = DMP([[-1],[],[],[5]], ZZ)
g = DMP([[3,1],[],[]], ZZ)
h = DMP([[45,30,5]], ZZ)
r = DMP([675,675,225,25], ZZ)
assert f.subresultants(g) == [f, g, h]
assert f.resultant(g) == r
f = DMP([1,3,9,-13], ZZ)
assert f.discriminant() == -11664
f = DMP([QQ(2),QQ(0)], QQ)
g = DMP([QQ(1),QQ(0),QQ(-16)], QQ)
s = DMP([QQ(1,32),QQ(0)], QQ)
t = DMP([QQ(-1,16)], QQ)
h = DMP([QQ(1)], QQ)
assert f.half_gcdex(g) == (s, h)
assert f.gcdex(g) == (s, t, h)
assert f.invert(g) == s
f = DMP([[1],[2],[3]], QQ)
raises(ValueError, "f.half_gcdex(f)")
raises(ValueError, "f.gcdex(f)")
raises(ValueError, "f.invert(f)")
f = DMP([1,0,20,0,150,0,500,0,625,-2,0,-10,9], ZZ)
g = DMP([1,0,0,-2,9], ZZ)
h = DMP([1,0,5,0], ZZ)
assert g.compose(h) == f
assert f.decompose() == [g, h]
f = DMP([[1],[2],[3]], QQ)
raises(ValueError, "f.decompose()")
raises(ValueError, "f.sturm()")
def test_DMF__init__():
f = DMF(([[0],[],[0,1,2],[3]], [[1,2,3]]), ZZ)
assert f.num == [[1,2],[3]]
assert f.den == [[1,2,3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1,2],[3]], [[1,2,3]]), ZZ, 1)
assert f.num == [[1,2],[3]]
assert f.den == [[1,2,3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[-1],[-2]],[[3],[-4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]],[[-3],[4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]],[[-3],[4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[]],[[-3],[4]]), ZZ)
assert f.num == [[]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(17, ZZ, 1)
assert f.num == [[17]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]]), ZZ)
assert f.num == [[1],[2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF([[0],[],[0,1,2],[3]], ZZ)
assert f.num == [[1,2],[3]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF({(1,1): 1, (0,0): 2}, ZZ, 1)
assert f.num == [[1,0],[2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
raises(ValueError, "DMF(([1], [[1]]), ZZ)")
raises(ZeroDivisionError, "DMF(([1], []), ZZ)")
def test_DMF__eq__():
pass
def test_DMF__bool__():
assert bool(DMF([[]], ZZ)) == False
assert bool(DMF([[1]], ZZ)) == True
def test_DMF_properties():
assert DMF([[]], ZZ).is_zero == True
assert DMF([[]], ZZ).is_one == False
assert DMF([[1]], ZZ).is_zero == False
assert DMF([[1]], ZZ).is_one == True
assert DMF(([[1]], [[2]]), ZZ).is_one == False
def test_DMF_arithmetics():
f = DMF([[7],[-9]], ZZ)
g = DMF([[-7],[9]], ZZ)
assert f.neg() == -f == g
f = DMF(([[1]], [[1],[]]), ZZ)
g = DMF(([[1]], [[1,0]]), ZZ)
h = DMF(([[1],[1,0]], [[1,0],[]]), ZZ)
assert f.add(g) == f + g == h
assert g.add(f) == g + f == h
h = DMF(([[-1],[1,0]], [[1,0],[]]), ZZ)
assert f.sub(g) == f - g == h
h = DMF(([[1]], [[1,0],[]]), ZZ)
assert f.mul(g) == f*g == h
assert g.mul(f) == g*f == h
h = DMF(([[1,0]], [[1],[]]), ZZ)
assert f.quo(g) == f/g == h
h = DMF(([[1]], [[1],[],[],[]]), ZZ)
assert f.pow(3) == f**3 == h
h = DMF(([[1]], [[1,0,0,0]]), ZZ)
assert g.pow(3) == g**3 == h
def test_ANP___init__():
rep = [QQ(1),QQ(1)]
mod = [QQ(1),QQ(0),QQ(1)]
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1),QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
rep = {1: QQ(1), 0: QQ(1)}
mod = {2: QQ(1), 0: QQ(1)}
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1),QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
f = ANP(1, mod, QQ)
assert f.rep == [QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
def test_ANP___eq__():
a = ANP([QQ(1), QQ(1)], [QQ(1),QQ(0),QQ(1)], QQ)
b = ANP([QQ(1), QQ(1)], [QQ(1),QQ(0),QQ(2)], QQ)
assert (a == a) == True
assert (a != a) == False
assert (a == b) == False
assert (a != b) == True
b = ANP([QQ(1), QQ(2)], [QQ(1),QQ(0),QQ(1)], QQ)
assert (a == b) == False
assert (a != b) == True
def test_ANP___bool__():
assert bool(ANP([], [QQ(1),QQ(0),QQ(1)], QQ)) == False
assert bool(ANP([QQ(1)], [QQ(1),QQ(0),QQ(1)], QQ)) == True
def test_ANP_properties():
mod = [QQ(1),QQ(0),QQ(1)]
assert ANP([QQ(0)], mod, QQ).is_zero == True
assert ANP([QQ(1)], mod, QQ).is_zero == False
assert ANP([QQ(1)], mod, QQ).is_one == True
assert ANP([QQ(2)], mod, QQ).is_one == False
def test_ANP_arithmetics():
mod = [QQ(1),QQ(0),QQ(0),QQ(-2)]
a = ANP([QQ(2),QQ(-1),QQ(1)], mod, QQ)
b = ANP([QQ(1),QQ(2)], mod, QQ)
c = ANP([QQ(-2), QQ(1), QQ(-1)], mod, QQ)
assert a.neg() == -a == c
c = ANP([QQ(2), QQ(0), QQ(3)], mod, QQ)
assert a.add(b) == a+b == c
assert b.add(a) == b+a == c
c = ANP([QQ(2), QQ(-2), QQ(-1)], mod, QQ)
assert a.sub(b) == a-b == c
c = ANP([QQ(-2), QQ(2), QQ(1)], mod, QQ)
assert b.sub(a) == b-a == c
c = ANP([QQ(3), QQ(-1), QQ(6)], mod, QQ)
assert a.mul(b) == a*b == c
assert b.mul(a) == b*a == c
c = ANP([QQ(-1,43), QQ(9,43), QQ(5,43)], mod, QQ)
assert a.pow(0) == a**(0) == ANP(1, mod, QQ)
assert a.pow(1) == a**(1) == a
assert a.pow(-1) == a**(-1) == c
assert a.quo(a) == a.mul(a.pow(-1)) == a*a**(-1) == ANP(1, mod, QQ)
| bsd-3-clause | 3caa51fbb2aca76a52e69a99b421c545 | 21.28388 | 80 | 0.454388 | 2.419235 | false | false | false | false |
mattpap/sympy-polys | sympy/plotting/plot_camera.py | 9 | 3674 | from pyglet.gl import *
from plot_rotation import get_spherical_rotatation
from util import get_model_matrix
from util import screen_to_model, model_to_screen
from util import vec_subs
class PlotCamera(object):
min_dist = 0.05
max_dist = 500.0
min_ortho_dist = 100.0
max_ortho_dist = 10000.0
_default_dist = 6.0
_default_ortho_dist = 600.0
rot_presets = {
'xy':(0,0,0),
'xz':(-90,0,0),
'yz':(0,90,0),
'perspective':(-45,0,-45)
}
def __init__(self, window, ortho = False):
self.window = window
self.axes = self.window.plot.axes
self.ortho = ortho
self.reset()
def init_rot_matrix(self):
glPushMatrix()
glLoadIdentity()
self._rot = get_model_matrix()
glPopMatrix()
def set_rot_preset(self, preset_name):
self.init_rot_matrix()
try: r = self.rot_presets[preset_name]
except:
raise ValueError("%s is not a valid rotation preset." % preset_name)
try:
self.euler_rotate(r[0], 1, 0, 0)
self.euler_rotate(r[1], 0, 1, 0)
self.euler_rotate(r[2], 0, 0, 1)
except: pass
def reset(self):
self._dist = 0.0
self._x, self._y = 0.0, 0.0
self._rot = None
if self.ortho:
self._dist = self._default_ortho_dist
else:
self._dist = self._default_dist
self.init_rot_matrix()
def mult_rot_matrix(self, rot):
glPushMatrix()
glLoadMatrixf(rot)
glMultMatrixf(self._rot)
self._rot = get_model_matrix()
glPopMatrix()
def setup_projection(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.ortho:
# yep, this is pseudo ortho (don't tell anyone)
gluPerspective(0.3, float(self.window.width)/float(self.window.height),
self.min_ortho_dist-0.01, self.max_ortho_dist+0.01)
else:
gluPerspective(30.0, float(self.window.width)/float(self.window.height),
self.min_dist-0.01, self.max_dist+0.01)
glMatrixMode(GL_MODELVIEW)
def _get_scale(self):
return 1.0, 1.0, 1.0
def apply_transformation(self):
glLoadIdentity()
glTranslatef(self._x, self._y, -self._dist)
if self._rot is not None:
glMultMatrixf(self._rot)
glScalef(*self._get_scale())
def spherical_rotate(self, p1, p2, sensitivity=1.0):
mat = get_spherical_rotatation(p1, p2, self.window.width, self.window.height, sensitivity)
if mat is not None: self.mult_rot_matrix(mat)
def euler_rotate(self, angle, x, y, z):
glPushMatrix()
glLoadMatrixf(self._rot)
glRotatef(angle, x, y, z)
self._rot = get_model_matrix()
glPopMatrix()
def zoom_relative(self, clicks, sensitivity):
if self.ortho:
dist_d = clicks * sensitivity * 50.0
min_dist = self.min_ortho_dist
max_dist = self.max_ortho_dist
else:
dist_d = clicks * sensitivity
min_dist = self.min_dist
max_dist = self.max_dist
new_dist = (self._dist - dist_d)
if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist:
self._dist = new_dist
def mouse_translate(self, x, y, dx, dy):
glPushMatrix()
glLoadIdentity()
glTranslatef(0,0,-self._dist)
z = model_to_screen(0,0,0)[2]
d = vec_subs(screen_to_model(x,y,z), screen_to_model(x-dx,y-dy,z))
glPopMatrix()
self._x += d[0]
self._y += d[1]
| bsd-3-clause | fb58b21f5089e5c6cbc2f103abc91a40 | 29.363636 | 98 | 0.557431 | 3.228471 | false | false | false | false |
mattpap/sympy-polys | sympy/assumptions/tests/test_assumptions_2.py | 3 | 2174 | """rename this to test_assumptions.py when the old assumptions system is deleted"""
from sympy.core import symbols
from sympy.assumptions import Assume, global_assumptions
from sympy.assumptions.assume import eliminate_assume
from sympy.printing import pretty
def test_assume():
x = symbols('x')
assump = Assume(x, 'integer')
assert assump.expr == x
assert assump.key == 'integer'
assert assump.value == True
def test_False():
"""Test Assume object with False keys"""
x = symbols('x')
assump = Assume(x, 'integer', False)
assert assump.expr == x
assert assump.key == 'integer'
assert assump.value == False
def test_equal():
"""Test for equality"""
x = symbols('x')
assert Assume(x, 'positive', True) == Assume(x, 'positive', True)
assert Assume(x, 'positive', True) != Assume(x, 'positive', False)
assert Assume(x, 'positive', False) == Assume(x, 'positive', False)
def test_pretty():
x = symbols('x')
assert pretty(Assume(x, 'positive', True)) == "Assume(x, 'positive', True)"
def test_eliminate_assumptions():
a, b, x, y = symbols('abxy')
assert eliminate_assume(Assume(x, 'a', True)) == a
assert eliminate_assume(Assume(x, 'a', True), symbol=x) == a
assert eliminate_assume(Assume(x, 'a', True), symbol=y) == None
assert eliminate_assume(Assume(x, 'a', False)) == ~a
assert eliminate_assume(Assume(x, 'a', False), symbol=y) == None
assert eliminate_assume(Assume(x, 'a', True) | Assume(x, 'b')) == a | b
assert eliminate_assume(Assume(x, 'a', True) | Assume(x, 'b', False)) == a | ~b
def test_global():
"""Test for global assumptions"""
x, y = symbols('x y')
global_assumptions.add(Assume(x>0))
assert Assume(x>0) in global_assumptions
global_assumptions.remove(Assume(x>0))
assert not Assume(x>0) in global_assumptions
# same with multiple of assumptions
global_assumptions.add(Assume(x>0), Assume(y>0))
assert Assume(x>0) in global_assumptions
assert Assume(y>0) in global_assumptions
global_assumptions.clear()
assert not Assume(x>0) in global_assumptions
assert not Assume(y>0) in global_assumptions
| bsd-3-clause | b9c2433af80612fca72a592165be9c9b | 37.821429 | 83 | 0.657774 | 3.264264 | false | true | false | false |
mattpap/sympy-polys | sympy/core/tests/test_arit.py | 3 | 29690 | from sympy import Symbol, sin, cos, exp, O, sqrt, Rational, Real, re, pi, \
sympify, sqrt, Add, Mul, Pow, I, log
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
def test_bug1():
assert re(x) != x
x.series(x,0,1)
assert re(x) != x
a = Symbol("a")
b = Symbol("b", positive=True)
c = Symbol("c")
def test_Symbol():
e=a*b
assert e==a*b
assert a*b*b==a*b**2
assert a*b*b+c==c+a*b**2
assert a*b*b-c==-c+a*b**2
def test_arit0():
p = Rational(5)
e=a*b
assert e == a*b
e=a*b+b*a
assert e == 2*a*b
e=a*b+b*a+a*b+p*b*a
assert e == 8*a*b
e=a*b+b*a+a*b+p*b*a+a
assert e == a+8*a*b
e=a+a
assert e == 2*a
e=a+b+a
assert e == b+2*a
e=a+b*b+a+b*b
assert e == 2*a+2*b**2
e=a+Rational(2)+b*b+a+b*b+p
assert e == 7+2*a+2*b**2
e=(a+b*b+a+b*b)*p
assert e == 5*(2*a+2*b**2)
e=(a*b*c+c*b*a+b*a*c)*p
assert e == 15*a*b*c
e=(a*b*c+c*b*a+b*a*c)*p-Rational(15)*a*b*c
assert e == Rational(0)
e = Rational(50)*(a-a)
assert e == Rational(0)
e=b*a-b-a*b+b
assert e == Rational(0)
e=a*b+c**p
assert e == a*b+c**5
e=a/b
assert e == a*b**(-1)
e=a*2*2
assert e == 4*a
e=2+a*2/2
assert e == 2+a
e=2-a-2
assert e == -a
e=2*a*2
assert e == 4*a
e=2/a/2
assert e == a**(-1)
e=2**a**2
assert e == 2**(a**2)
e = -(1+a)
assert e == -1 -a
e = Rational(1,2)*(1+a)
assert e == Rational(1,2) + a/2
def test_div():
e=a/b
assert e == a*b**(-1)
e=a/b+c/2
assert e == a*b**(-1)+Rational(1)/2*c
e=(1-b)/(b-1)
assert e == (1+-b)*((-1)+b)**(-1)
def test_pow():
n1 = Rational(1)
n2 = Rational(2)
n5 = Rational(5)
e=a*a
assert e == a**2
e=a*a*a
assert e == a**3
e=a*a*a*a**Rational(6)
assert e == a**9
e=a*a*a*a**Rational(6)-a**Rational(9)
assert e == Rational(0)
e=a**(b-b)
assert e == Rational(1)
e=(a-a)**b
assert e == Rational(0)
e=(a+Rational(1)-a)**b
assert e == Rational(1)
e=(a+b+c)**n2
assert e == (a+b+c)**2
assert e.expand() == 2*b*c+2*a*c+2*a*b+a**2+c**2+b**2
e=(a+b)**n2
assert e == (a+b)**2
assert e.expand() == 2*a*b+a**2+b**2
e=(a+b)**(n1/n2)
assert e == (a+b)**(Rational(1)/2)
assert e.expand() == (a+b)**(Rational(1)/2)
n=n5**(n1/n2)
assert n == Rational(5)**(Rational(1)/2)
e=n*a*b-n*b*a
assert e == Rational(0)
e=n*a*b+n*b*a
assert e == 2*a*b*5**(Rational(1)/2)
assert e.diff(a) == 2*b*5**(Rational(1)/2)
assert e.diff(a) == 2*b*5**(Rational(1)/2)
e=a/b**2
assert e == a*b**(-2)
assert sqrt(2*(1+sqrt(2))) == (2*(1+2**(Rational(1,2))))**(Rational(1,2))
x = Symbol('x')
y = Symbol('y')
assert ((x*y)**3).expand() == y**3 * x**3
assert ((x*y)**-3).expand() == y**-3 * x**-3
assert (x**5*(3*x)**(3)).expand() == 27 * x**8
assert (x**5*(-3*x)**(3)).expand() == -27 * x**8
assert (x**5*(3*x)**(-3)).expand() == Rational(1,27) * x**2
assert (x**5*(-3*x)**(-3)).expand() == -Rational(1,27) * x**2
# expand_power_exp
assert (x**(y**(x+exp(x+y))+z)).expand(deep=False) == x**z*x**(y**(x + exp(x + y)))
assert (x**(y**(x+exp(x+y))+z)).expand() == x**z*x**(y**x*y**(exp(x)*exp(y)))
n = Symbol('k', even=False)
k = Symbol('k', even=True)
assert (-1)**x == (-1)**x
assert (-1)**n == (-1)**n
assert (-2)**k == 2**k
assert (-1)**k == 1
@XFAIL
def test_pow2():
# XXX These fail - they are maybe discutable,
# let's see SAGE and similar.
assert ((-x)**2)**Rational(1,3) == ((-x)**Rational(1,3))**2
assert (-x)**Rational(2,3) == x**Rational(2,3)
assert (-x)**Rational(5,7) == -x**Rational(5,7)
def test_pow_issue417():
assert 4**Rational(1, 4) == 2**Rational(1, 2)
def test_pow3():
assert 2**(Rational(3)/2) == 2 * 2**Rational(1, 2)
assert 2**(Rational(3)/2) == sqrt(8)
def test_expand():
p = Rational(5)
e = (a+b)*c
assert e == c*(a+b)
assert (e.expand()-a*c-b*c) == Rational(0)
e=(a+b)*(a+b)
assert e == (a+b)**2
assert e.expand() == 2*a*b+a**2+b**2
e=(a+b)*(a+b)**Rational(2)
assert e == (a+b)**3
assert e.expand() == 3*b*a**2+3*a*b**2+a**3+b**3
assert e.expand() == 3*b*a**2+3*a*b**2+a**3+b**3
e=(a+b)*(a+c)*(b+c)
assert e == (a+c)*(a+b)*(b+c)
assert e.expand() == 2*a*b*c+b*a**2+c*a**2+b*c**2+a*c**2+c*b**2+a*b**2
e=(a+Rational(1))**p
assert e == (1+a)**5
assert e.expand() == 1+5*a+10*a**2+10*a**3+5*a**4+a**5
e=(a+b+c)*(a+c+p)
assert e == (5+a+c)*(a+b+c)
assert e.expand() == 5*a+5*b+5*c+2*a*c+b*c+a*b+a**2+c**2
x=Symbol("x")
s=exp(x*x)-1
e=s.series(x,0,3)/x**2
assert e.expand() == 1+x**2/2+O(x**4)
e = (x*(y+z))**(x*(y+z))*(x+y)
assert e.expand(power_exp=False, power_base=False) == x*(x*y + x*z)**(x*y + x*z) + y*(x*y + x*z)**(x*y + x*z)
assert e.expand(power_exp=False, power_base=False, deep=False) == x*(x*(y + z))**(x*(y + z)) + y*(x*(y + z))**(x*(y + z))
e = (x*(y+z))**z
assert e.expand(power_base=True, mul=True, deep=True) in [x**z*(y + z)**z, (x*y + x*z)**z]
# Check that this isn't too slow
x = Symbol('x')
W = 1
for i in range(1, 21):
W = W * (x-i)
W = W.expand()
assert W.has(-1672280820*x**15)
def test_power_expand():
"""Test for Pow.expand()"""
a = Symbol('a')
b = Symbol('b')
p = (a+b)**2
assert p.expand() == a**2 + b**2 + 2*a*b
p = (1+2*(1+a))**2
assert p.expand() == 9 + 4*(a**2) + 12*a
def test_real_mul():
Real(0) * pi * x == Real(0)
Real(1) * pi * x == pi * x
len((Real(2) * pi * x).args) == 3
def test_ncmul():
A = Symbol("A", commutative=False)
B = Symbol("B", commutative=False)
C = Symbol("C", commutative=False)
assert A*B != B*A
assert A*B*C != C*B*A
assert A*b*B*3*C == 3*b*A*B*C
assert A*b*B*3*C != 3*b*B*A*C
assert A*b*B*3*C == 3*A*B*C*b
assert A+B == B+A
assert (A+B)*C != C*(A+B)
assert C*(A+B)*C != C*C*(A+B)
assert (C*(A+B)).expand() == C*A+C*B
assert (C*(A+B)).expand() != A*C+B*C
assert A*A == A**2
assert (A+B)*(A+B) == (A+B)**2
assert ((A+B)**2).expand() == A**2 + A*B + B*A +B**2
assert A**-1 * A == 1
assert A/A == 1
assert A/(A**2) == 1/A
assert A/(1+A) == A/(1+A)
def test_ncpow():
x = Symbol('x', commutative=False)
y = Symbol('y', commutative=False)
assert (x**2)*(y**2) != (y**2)*(x**2)
assert (x**-2)*y != y*(x**2)
def test_powerbug():
x=Symbol("x")
assert x**1 != (-x)**1
assert x**2 == (-x)**2
assert x**3 != (-x)**3
assert x**4 == (-x)**4
assert x**5 != (-x)**5
assert x**6 == (-x)**6
assert x**128 == (-x)**128
assert x**129 != (-x)**129
assert (2*x)**2 == (-2*x)**2
def test_Mul_doesnt_expand_exp():
x = Symbol('x')
y = Symbol('y')
assert exp(x)*exp(y) == exp(x)*exp(y)
assert 2**x*2**y == 2**x*2**y
assert x**2*x**3 == x**5
assert 2**x*3**x == 6**x
assert x**(y)*x**(2*y) == x**(3*y)
assert 2**Rational(1,2)*2**Rational(1,2) == 2
assert 2**x*2**(2*x) == 2**(3*x)
assert 2**Rational(1,2)*2**Rational(1,4)*5**Rational(3,4) == 10**Rational(3,4)
assert (x**(-log(5)/log(3))*x)/(x*x**( - log(5)/log(3))) == sympify(1)
def test_Add_Mul_is_integer():
x = Symbol('x')
k = Symbol('k', integer=True)
n = Symbol('n', integer=True)
assert (2*k).is_integer == True
assert (-k).is_integer == True
assert (k/3).is_integer == False
assert (x*k*n).is_integer == None
assert (k+n).is_integer == True
assert (k+x).is_integer == None
assert (k+n*x).is_integer == None
assert (k+n/3).is_integer == False
def test_Add_Mul_is_bounded():
x = Symbol('x', real=True, bounded=False)
assert sin(x).is_bounded == True
assert (x*sin(x)).is_bounded == False
assert (1024*sin(x)).is_bounded == True
assert (sin(x)*exp(x)).is_bounded == False
assert (sin(x)*cos(x)).is_bounded == True
assert (x*sin(x)*exp(x)).is_bounded == False
assert (sin(x)-67).is_bounded == True
assert (sin(x)+exp(x)).is_bounded == False
def test_Mul_is_even_odd():
x = Symbol('x', integer=True)
k = Symbol('k', odd=True)
n = Symbol('n', odd=True)
m = Symbol('m', even=True)
assert (2*x).is_even == True
assert (2*x).is_odd == False
assert (3*x).is_even == None
assert (3*x).is_odd == None
assert (k/3).is_integer == False
assert (k/3).is_even == False
assert (k/3).is_odd == False
assert (2*n).is_even == True
assert (2*n).is_odd == False
assert (2*m).is_even == True
assert (2*m).is_odd == False
assert (-n).is_even == False
assert (-n).is_odd == True
assert (k*n).is_even == False
assert (k*n).is_odd == True
assert (k*m).is_even == True
assert (k*m).is_odd == False
assert (k*n*m).is_even == True
assert (k*n*m).is_odd == False
assert (k*m*x).is_even == True
assert (k*m*x).is_odd == False
def test_Add_is_even_odd():
x = Symbol('x', integer=True)
k = Symbol('k', odd=True)
n = Symbol('n', even=True)
assert (2+k).is_even == False
assert (2+k).is_odd == True
assert (7-k).is_even == True
assert (7-k).is_odd == False
assert (11-n).is_even == False
assert (11-n).is_odd == True
assert (-8+n).is_even == True
assert (-8+n).is_odd == False
assert (n+k).is_even == False
assert (n+k).is_odd == True
assert (n-k).is_even == False
assert (n-k).is_odd == True
assert (n+2*k).is_even == True
assert (n+2*k).is_odd == False
assert (k+n+x).is_odd == None
assert (k+n-x).is_even == None
assert (2*k+n*x).is_odd == None
assert (2*k+n*x).is_even == None
def test_Mul_is_negative_positive():
x = Symbol('x', real=True)
y = Symbol('y', real=False)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert k.is_negative == True
assert (-k).is_negative == False
assert (2*k).is_negative == True
assert (2*n)._eval_is_negative() == False
assert (2*n).is_negative == False
assert n.is_negative == False
assert (-n).is_negative == True
assert (2*n).is_negative == False
assert (n*k).is_negative == True
assert (2*n*k).is_negative == True
assert (-n*k).is_negative == False
assert (n*k*y).is_negative == False # y.is_real=F; !real -> !neg
assert u.is_negative == False
assert (-u).is_negative == None
assert (2*u).is_negative == False
assert v.is_negative == None
assert (-v).is_negative == False
assert (2*v).is_negative == None
assert (u*v).is_negative == None
assert (k*u).is_negative == None
assert (k*v).is_negative == False
assert (n*u).is_negative == False
assert (n*v).is_negative == None
assert (v*k*u).is_negative == False
assert (v*n*u).is_negative == None
assert (-v*k*u).is_negative == None
assert (-v*n*u).is_negative == False
assert (17*v*k*u).is_negative == False
assert (17*v*n*u).is_negative == None
assert (k*v*n*u).is_negative == False
assert (x*k).is_negative == None
assert (u*v*n*x*k).is_negative == None
assert k.is_positive == False
assert (-k).is_positive == True
assert (2*k).is_positive == False
assert n.is_positive == True
assert (-n).is_positive == False
assert (2*n).is_positive == True
assert (n*k).is_positive == False
assert (2*n*k).is_positive == False
assert (-n*k).is_positive == True
assert (-n*k*y).is_positive == False # y.is_real=F; !real -> !neg
assert u.is_positive == None
assert (-u).is_positive == False
assert (2*u).is_positive == None
assert v.is_positive == False
assert (-v).is_positive == None
assert (2*v).is_positive == False
assert (u*v).is_positive == False
assert (k*u).is_positive == False
assert (k*v).is_positive == None
assert (n*u).is_positive == None
assert (n*v).is_positive == False
assert (v*k*u).is_positive == None
assert (v*n*u).is_positive == False
assert (-v*k*u).is_positive == False
assert (-v*n*u).is_positive == None
assert (17*v*k*u).is_positive == None
assert (17*v*n*u).is_positive == False
assert (k*v*n*u).is_positive == None
assert (x*k).is_positive == None
assert (u*v*n*x*k).is_positive == None
def test_Mul_is_negative_positive_2():
a = Symbol('a', nonnegative=True)
b = Symbol('b', nonnegative=True)
c = Symbol('c', nonpositive=True)
d = Symbol('d', nonpositive=True)
assert (a*b).is_nonnegative == True
assert (a*b).is_negative == False
assert (a*b).is_zero == None
assert (a*b).is_positive == None
assert (c*d).is_nonnegative == True
assert (c*d).is_negative == False
assert (c*d).is_zero == None
assert (c*d).is_positive == None
assert (a*c).is_nonpositive == True
assert (a*c).is_positive == False
assert (a*c).is_zero == None
assert (a*c).is_negative == None
def test_Mul_is_nonpositive_nonnegative():
x = Symbol('x', real=True)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert k.is_nonpositive == True
assert (-k).is_nonpositive == False
assert (2*k).is_nonpositive == True
assert n.is_nonpositive == False
assert (-n).is_nonpositive == True
assert (2*n).is_nonpositive == False
assert (n*k).is_nonpositive == True
assert (2*n*k).is_nonpositive == True
assert (-n*k).is_nonpositive == False
assert u.is_nonpositive == None
assert (-u).is_nonpositive == True
assert (2*u).is_nonpositive == None
assert v.is_nonpositive == True
assert (-v).is_nonpositive == None
assert (2*v).is_nonpositive == True
assert (u*v).is_nonpositive == True
assert (k*u).is_nonpositive == True
assert (k*v).is_nonpositive == None
assert (n*u).is_nonpositive == None
assert (n*v).is_nonpositive == True
assert (v*k*u).is_nonpositive == None
assert (v*n*u).is_nonpositive == True
assert (-v*k*u).is_nonpositive == True
assert (-v*n*u).is_nonpositive == None
assert (17*v*k*u).is_nonpositive == None
assert (17*v*n*u).is_nonpositive == True
assert (k*v*n*u).is_nonpositive == None
assert (x*k).is_nonpositive == None
assert (u*v*n*x*k).is_nonpositive == None
assert k.is_nonnegative == False
assert (-k).is_nonnegative == True
assert (2*k).is_nonnegative == False
assert n.is_nonnegative == True
assert (-n).is_nonnegative == False
assert (2*n).is_nonnegative == True
assert (n*k).is_nonnegative == False
assert (2*n*k).is_nonnegative == False
assert (-n*k).is_nonnegative == True
assert u.is_nonnegative == True
assert (-u).is_nonnegative == None
assert (2*u).is_nonnegative == True
assert v.is_nonnegative == None
assert (-v).is_nonnegative == True
assert (2*v).is_nonnegative == None
assert (u*v).is_nonnegative == None
assert (k*u).is_nonnegative == None
assert (k*v).is_nonnegative == True
assert (n*u).is_nonnegative == True
assert (n*v).is_nonnegative == None
assert (v*k*u).is_nonnegative == True
assert (v*n*u).is_nonnegative == None
assert (-v*k*u).is_nonnegative == None
assert (-v*n*u).is_nonnegative == True
assert (17*v*k*u).is_nonnegative == True
assert (17*v*n*u).is_nonnegative == None
assert (k*v*n*u).is_nonnegative == True
assert (x*k).is_nonnegative == None
assert (u*v*n*x*k).is_nonnegative == None
def test_Add_is_even_odd():
x = Symbol('x', integer=True)
k = Symbol('k', odd=True)
n = Symbol('n', odd=True)
m = Symbol('m', even=True)
assert (k+7).is_even == True
assert (k+7).is_odd == False
assert (-k+7).is_even == True
assert (-k+7).is_odd == False
assert (k-12).is_even == False
assert (k-12).is_odd == True
assert (-k-12).is_even == False
assert (-k-12).is_odd == True
assert (k+n).is_even == True
assert (k+n).is_odd == False
assert (k+m).is_even == False
assert (k+m).is_odd == True
assert (k+n+m).is_even == True
assert (k+n+m).is_odd == False
assert (k+n+x+m).is_even == None
assert (k+n+x+m).is_odd == None
def test_Add_is_negative_positive():
x = Symbol('x', real=True)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert (k-2).is_negative == True
assert (k+17).is_negative == None
assert (-k-5).is_negative == None
assert (-k+123).is_negative == False
assert (k-n).is_negative == True
assert (k+n).is_negative == None
assert (-k-n).is_negative == None
assert (-k+n).is_negative == False
assert (k-n-2).is_negative == True
assert (k+n+17).is_negative == None
assert (-k-n-5).is_negative == None
assert (-k+n+123).is_negative == False
assert (-2*k+123*n+17).is_negative == False
assert (k+u).is_negative == None
assert (k+v).is_negative == True
assert (n+u).is_negative == False
assert (n+v).is_negative == None
assert (u-v).is_negative == False
assert (u+v).is_negative == None
assert (-u-v).is_negative == None
assert (-u+v).is_negative == None
assert (u-v+n+2).is_negative == False
assert (u+v+n+2).is_negative == None
assert (-u-v+n+2).is_negative == None
assert (-u+v+n+2).is_negative == None
assert (k+x).is_negative == None
assert (k+x-n).is_negative == None
assert (k-2).is_positive == False
assert (k+17).is_positive == None
assert (-k-5).is_positive == None
assert (-k+123).is_positive == True
assert (k-n).is_positive == False
assert (k+n).is_positive == None
assert (-k-n).is_positive == None
assert (-k+n).is_positive == True
assert (k-n-2).is_positive == False
assert (k+n+17).is_positive == None
assert (-k-n-5).is_positive == None
assert (-k+n+123).is_positive == True
assert (-2*k+123*n+17).is_positive == True
assert (k+u).is_positive == None
assert (k+v).is_positive == False
assert (n+u).is_positive == True
assert (n+v).is_positive == None
assert (u-v).is_positive == None
assert (u+v).is_positive == None
assert (-u-v).is_positive == None
assert (-u+v).is_positive == False
assert (u-v-n-2).is_positive == None
assert (u+v-n-2).is_positive == None
assert (-u-v-n-2).is_positive == None
assert (-u+v-n-2).is_positive == False
assert (n+x).is_positive == None
assert (n+x-k).is_positive == None
def test_Add_is_nonpositive_nonnegative():
x = Symbol('x', real=True)
k = Symbol('k', negative=True)
n = Symbol('n', positive=True)
u = Symbol('u', nonnegative=True)
v = Symbol('v', nonpositive=True)
assert (u-2).is_nonpositive == None
assert (u+17).is_nonpositive == False
assert (-u-5).is_nonpositive == True
assert (-u+123).is_nonpositive == None
assert (u-v).is_nonpositive == None
assert (u+v).is_nonpositive == None
assert (-u-v).is_nonpositive == None
assert (-u+v).is_nonpositive == True
assert (u-v-2).is_nonpositive == None
assert (u+v+17).is_nonpositive == None
assert (-u-v-5).is_nonpositive == None
assert (-u+v-123).is_nonpositive == True
assert (-2*u+123*v-17).is_nonpositive == True
assert (k+u).is_nonpositive == None
assert (k+v).is_nonpositive == True
assert (n+u).is_nonpositive == False
assert (n+v).is_nonpositive == None
assert (k-n).is_nonpositive == True
assert (k+n).is_nonpositive == None
assert (-k-n).is_nonpositive == None
assert (-k+n).is_nonpositive == False
assert (k-n+u+2).is_nonpositive == None
assert (k+n+u+2).is_nonpositive == None
assert (-k-n+u+2).is_nonpositive == None
assert (-k+n+u+2).is_nonpositive == False
assert (u+x).is_nonpositive == None
assert (v-x-n).is_nonpositive == None
assert (u-2).is_nonnegative == None
assert (u+17).is_nonnegative == True
assert (-u-5).is_nonnegative == False
assert (-u+123).is_nonnegative == None
assert (u-v).is_nonnegative == True
assert (u+v).is_nonnegative == None
assert (-u-v).is_nonnegative == None
assert (-u+v).is_nonnegative == None
assert (u-v+2).is_nonnegative == True
assert (u+v+17).is_nonnegative == None
assert (-u-v-5).is_nonnegative == None
assert (-u+v-123).is_nonnegative == False
assert (2*u-123*v+17).is_nonnegative == True
assert (k+u).is_nonnegative == None
assert (k+v).is_nonnegative == False
assert (n+u).is_nonnegative == True
assert (n+v).is_nonnegative == None
assert (k-n).is_nonnegative == False
assert (k+n).is_nonnegative == None
assert (-k-n).is_nonnegative == None
assert (-k+n).is_nonnegative == True
assert (k-n-u-2).is_nonnegative == False
assert (k+n-u-2).is_nonnegative == None
assert (-k-n-u-2).is_nonnegative == None
assert (-k+n-u-2).is_nonnegative == None
assert (u-x).is_nonnegative == None
assert (v+x+n).is_nonnegative == None
def test_Pow_is_integer():
x = Symbol('x')
k = Symbol('k', integer=True)
n = Symbol('n', integer=True, nonnegative=True)
m = Symbol('m', integer=True, positive=True)
assert (k**2).is_integer == True
assert (k**(-2)).is_integer == False
assert (2**k).is_integer == None
assert (2**(-k)).is_integer == None
assert (2**n).is_integer == True
assert (2**(-n)).is_integer == None
assert (2**m).is_integer == True
assert (2**(-m)).is_integer == False
assert (x**2).is_integer == None
assert (2**x).is_integer == None
assert (k**n).is_integer == True
assert (k**(-n)).is_integer == None
assert (k**x).is_integer == None
assert (x**k).is_integer == None
assert (k**(n*m)).is_integer == True
assert (k**(-n*m)).is_integer == None
def test_Pow_is_real():
x = Symbol('x', real=True)
y = Symbol('y', real=True, positive=True)
assert (x**2).is_real == True
assert (x**3).is_real == True
assert (x**x).is_real == None
assert (y**x).is_real == True
assert (x**Rational(1,3)).is_real == None
assert (y**Rational(1,3)).is_real == True
assert sqrt(-1 - sqrt(2)).is_real == False
@XFAIL
def test_Pow_is_bounded():
x = Symbol('x', real=True)
assert (x**2).is_bounded == None
assert (sin(x)**2).is_bounded == True
assert (sin(x)**x).is_bounded == None
assert (sin(x)**exp(x)).is_bounded == None
# XXX This first one fails
assert (1/sin(x)).is_bounded == False
assert (1/exp(x)).is_bounded == False
def test_Pow_is_even_odd():
x = Symbol('x')
k = Symbol('k', even=True)
n = Symbol('n', odd=True)
m = Symbol('m', integer=True, nonnegative=True)
assert (k**2).is_even == True
assert (n**2).is_even == False
assert (2**k).is_even == None
assert (x**2).is_even == None
assert (k**m).is_even == True
assert (n**m).is_even == False
assert (k**x).is_even == None
assert (n**x).is_even == None
assert (k**2).is_odd == False
assert (n**2).is_odd == True
assert (3**k).is_odd == None
assert (k**m).is_odd == False
assert (n**m).is_odd == True
assert (k**x).is_odd == None
assert (n**x).is_odd == None
def test_Pow_is_negative_positive():
x = Symbol('x', real=True)
k = Symbol('k', integer=True, positive=True)
n = Symbol('n', even=True)
m = Symbol('m', odd=True)
z = Symbol('z')
assert (2**x).is_positive == True
assert ((-2)**x).is_positive == None
assert ((-2)**n).is_positive == True
assert ((-2)**m).is_positive == False
assert (k**2).is_positive == True
assert (k**(-2)).is_positive == True
assert (k**x).is_positive == True
assert ((-k)**x).is_positive == None
assert ((-k)**n).is_positive == True
assert ((-k)**m).is_positive == False
assert (2**x).is_negative == False
assert ((-2)**x).is_negative == None
assert ((-2)**n).is_negative == False
assert ((-2)**m).is_negative == True
assert (k**2).is_negative == False
assert (k**(-2)).is_negative == False
assert (k**x).is_negative == False
assert ((-k)**x).is_negative == None
assert ((-k)**n).is_negative == False
assert ((-k)**m).is_negative == True
assert (2**z).is_positive == None
assert (2**z).is_negative == None
def test_Pow_is_nonpositive_nonnegative():
x = Symbol('x', real=True)
k = Symbol('k', integer=True, nonnegative=True)
l = Symbol('l', integer=True, positive=True)
n = Symbol('n', even=True)
m = Symbol('m', odd=True)
assert (2**x).is_nonnegative == True
assert ((-2)**x).is_nonnegative == None
assert ((-2)**n).is_nonnegative == True
assert ((-2)**m).is_nonnegative == False
assert (k**2).is_nonnegative == True
assert (k**(-2)).is_nonnegative == True
assert (k**x).is_nonnegative == None # NOTE (0**x).is_real = U
assert (l**x).is_nonnegative == True
assert (l**x).is_positive == True
assert ((-k)**x).is_nonnegative == None
assert ((-k)**n).is_nonnegative == True
assert ((-k)**m).is_nonnegative == None
assert (2**x).is_nonpositive == False
assert ((-2)**x).is_nonpositive == None
assert ((-2)**n).is_nonpositive == False
assert ((-2)**m).is_nonpositive == True
assert (k**2).is_nonpositive == None
assert (k**(-2)).is_nonpositive == None
assert (k**x).is_nonpositive == None
assert ((-k)**x).is_nonpositive == None
assert ((-k)**n).is_nonpositive == None
assert ((-k)**m).is_nonpositive == True
def test_Mul_is_imaginary_real():
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
ii= Symbol('ii',imaginary=True)
x = Symbol('x')
assert I .is_imaginary == True
assert I .is_real == False
assert (-I) .is_imaginary == True
assert (-I) .is_real == False
assert (3*I) .is_imaginary == True
assert (3*I) .is_real == False
assert (I*I) .is_imaginary == False
assert (I*I) .is_real == True
assert (r*i) .is_imaginary == True
assert (r*i) .is_real == False
assert (x*i) .is_imaginary == None
assert (x*i) .is_real == None
assert (i*ii).is_imaginary == False
assert (i*ii).is_real == True
assert (r*i*ii).is_imaginary == False
assert (r*i*ii).is_real == True
def test_Add_is_comparable():
assert (x+y).is_comparable == False
assert (x+1).is_comparable == False
assert (Rational(1,3) - sqrt(8)).is_comparable == True
def test_Mul_is_comparable():
assert (x*y).is_comparable == False
assert (x*2).is_comparable == False
assert (sqrt(2)*Rational(1,3)).is_comparable == True
def test_Pow_is_comparable():
assert (x**y).is_comparable == False
assert (x**2).is_comparable == False
assert (Rational(1,3)**Rational(1,2)).is_comparable == True
def test_Add_is_positive_2():
e = Rational(1,3) - sqrt(8)
assert e.is_positive == False
assert e.is_negative == True
e = pi - 1
assert e.is_positive == True
assert e.is_negative == False
def test_Add_is_irrational():
i = Symbol('i', irrational=True)
assert i.is_irrational == True
assert i.is_rational == False
assert (i+1).is_irrational == True
assert (i+1).is_rational == False
def test_issue432():
class MightyNumeric(tuple):
def __rdiv__(self, other):
return "something"
def __rtruediv__(self, other):
return "something"
assert sympify(1)/MightyNumeric((1,2)) == "something"
def test_issue432b():
class Foo:
def __init__(self):
self.field = 1.0
def __mul__(self, other):
self.field = self.field * other
def __rmul__(self, other):
self.field = other * self.field
f = Foo()
x = Symbol("x")
assert f*x == x*f
def test_bug3():
a = Symbol("a")
b = Symbol("b", positive=True)
e = 2*a + b
f = b + 2*a
assert e == f
def test_suppressed_evaluation():
a = Add(1,3,2,evaluate=False)
b = Mul(1,3,2,evaluate=False)
c = Pow(3,2,evaluate=False)
assert a != 6
assert a.func is Add
assert a.args == (1,3,2)
assert b != 6
assert b.func is Mul
assert b.args == (1,3,2)
assert c != 9
assert c.func is Pow
assert c.args == (3,2)
def test_Add_as_coeff_terms():
assert (x+1).as_coeff_terms() == ( 1, (x+1,) )
assert (x+2).as_coeff_terms() == ( 1, (x+2,) )
assert (x+3).as_coeff_terms() == ( 1, (x+3,) )
assert (x-1).as_coeff_terms() == (-1, (1-x,) )
assert (x-2).as_coeff_terms() == (-1, (2-x,) )
assert (x-3).as_coeff_terms() == (-1, (3-x,) )
n = Symbol('n', integer=True)
assert (n+1).as_coeff_terms() == ( 1, (n+1,) )
assert (n+2).as_coeff_terms() == ( 1, (n+2,) )
assert (n+3).as_coeff_terms() == ( 1, (n+3,) )
assert (n-1).as_coeff_terms() == (-1, (1-n,) )
assert (n-2).as_coeff_terms() == (-1, (2-n,) )
assert (n-3).as_coeff_terms() == (-1, (3-n,) )
def test_Pow_as_coeff_terms_doesnt_expand():
assert exp(x + y).as_coeff_terms() == (1, (exp(x + y),))
assert exp(x + exp(x + y)) != exp(x + exp(x)*exp(y))
def test_issue974():
assert -1/(-1-x) == 1/(1+x)
| bsd-3-clause | cb2b22778d75c2bbd1b2cac150b04321 | 26.618605 | 125 | 0.548265 | 2.745769 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/gl/glext_arb.py | 7 | 529031 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/glext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: glext_arb.py 1579 2008-01-15 14:47:19Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_GL as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/glext.h
# GLEXT_LEGACY (/usr/include/GL/gl.h:70)
GL_GLEXT_VERSION = 29 # GL/glext.h:57
# VERSION_1_2 (GL/glext.h:59)
GL_UNSIGNED_BYTE_3_3_2 = 32818 # GL/glext.h:60
GL_UNSIGNED_SHORT_4_4_4_4 = 32819 # GL/glext.h:61
GL_UNSIGNED_SHORT_5_5_5_1 = 32820 # GL/glext.h:62
GL_UNSIGNED_INT_8_8_8_8 = 32821 # GL/glext.h:63
GL_UNSIGNED_INT_10_10_10_2 = 32822 # GL/glext.h:64
GL_RESCALE_NORMAL = 32826 # GL/glext.h:65
GL_TEXTURE_BINDING_3D = 32874 # GL/glext.h:66
GL_PACK_SKIP_IMAGES = 32875 # GL/glext.h:67
GL_PACK_IMAGE_HEIGHT = 32876 # GL/glext.h:68
GL_UNPACK_SKIP_IMAGES = 32877 # GL/glext.h:69
GL_UNPACK_IMAGE_HEIGHT = 32878 # GL/glext.h:70
GL_TEXTURE_3D = 32879 # GL/glext.h:71
GL_PROXY_TEXTURE_3D = 32880 # GL/glext.h:72
GL_TEXTURE_DEPTH = 32881 # GL/glext.h:73
GL_TEXTURE_WRAP_R = 32882 # GL/glext.h:74
GL_MAX_3D_TEXTURE_SIZE = 32883 # GL/glext.h:75
GL_UNSIGNED_BYTE_2_3_3_REV = 33634 # GL/glext.h:76
GL_UNSIGNED_SHORT_5_6_5 = 33635 # GL/glext.h:77
GL_UNSIGNED_SHORT_5_6_5_REV = 33636 # GL/glext.h:78
GL_UNSIGNED_SHORT_4_4_4_4_REV = 33637 # GL/glext.h:79
GL_UNSIGNED_SHORT_1_5_5_5_REV = 33638 # GL/glext.h:80
GL_UNSIGNED_INT_8_8_8_8_REV = 33639 # GL/glext.h:81
GL_UNSIGNED_INT_2_10_10_10_REV = 33640 # GL/glext.h:82
GL_BGR = 32992 # GL/glext.h:83
GL_BGRA = 32993 # GL/glext.h:84
GL_MAX_ELEMENTS_VERTICES = 33000 # GL/glext.h:85
GL_MAX_ELEMENTS_INDICES = 33001 # GL/glext.h:86
GL_CLAMP_TO_EDGE = 33071 # GL/glext.h:87
GL_TEXTURE_MIN_LOD = 33082 # GL/glext.h:88
GL_TEXTURE_MAX_LOD = 33083 # GL/glext.h:89
GL_TEXTURE_BASE_LEVEL = 33084 # GL/glext.h:90
GL_TEXTURE_MAX_LEVEL = 33085 # GL/glext.h:91
GL_LIGHT_MODEL_COLOR_CONTROL = 33272 # GL/glext.h:92
GL_SINGLE_COLOR = 33273 # GL/glext.h:93
GL_SEPARATE_SPECULAR_COLOR = 33274 # GL/glext.h:94
GL_SMOOTH_POINT_SIZE_RANGE = 2834 # GL/glext.h:95
GL_SMOOTH_POINT_SIZE_GRANULARITY = 2835 # GL/glext.h:96
GL_SMOOTH_LINE_WIDTH_RANGE = 2850 # GL/glext.h:97
GL_SMOOTH_LINE_WIDTH_GRANULARITY = 2851 # GL/glext.h:98
GL_ALIASED_POINT_SIZE_RANGE = 33901 # GL/glext.h:99
GL_ALIASED_LINE_WIDTH_RANGE = 33902 # GL/glext.h:100
# ARB_imaging (GL/glext.h:103)
GL_CONSTANT_COLOR = 32769 # GL/glext.h:104
GL_ONE_MINUS_CONSTANT_COLOR = 32770 # GL/glext.h:105
GL_CONSTANT_ALPHA = 32771 # GL/glext.h:106
GL_ONE_MINUS_CONSTANT_ALPHA = 32772 # GL/glext.h:107
GL_BLEND_COLOR = 32773 # GL/glext.h:108
GL_FUNC_ADD = 32774 # GL/glext.h:109
GL_MIN = 32775 # GL/glext.h:110
GL_MAX = 32776 # GL/glext.h:111
GL_BLEND_EQUATION = 32777 # GL/glext.h:112
GL_FUNC_SUBTRACT = 32778 # GL/glext.h:113
GL_FUNC_REVERSE_SUBTRACT = 32779 # GL/glext.h:114
GL_CONVOLUTION_1D = 32784 # GL/glext.h:115
GL_CONVOLUTION_2D = 32785 # GL/glext.h:116
GL_SEPARABLE_2D = 32786 # GL/glext.h:117
GL_CONVOLUTION_BORDER_MODE = 32787 # GL/glext.h:118
GL_CONVOLUTION_FILTER_SCALE = 32788 # GL/glext.h:119
GL_CONVOLUTION_FILTER_BIAS = 32789 # GL/glext.h:120
GL_REDUCE = 32790 # GL/glext.h:121
GL_CONVOLUTION_FORMAT = 32791 # GL/glext.h:122
GL_CONVOLUTION_WIDTH = 32792 # GL/glext.h:123
GL_CONVOLUTION_HEIGHT = 32793 # GL/glext.h:124
GL_MAX_CONVOLUTION_WIDTH = 32794 # GL/glext.h:125
GL_MAX_CONVOLUTION_HEIGHT = 32795 # GL/glext.h:126
GL_POST_CONVOLUTION_RED_SCALE = 32796 # GL/glext.h:127
GL_POST_CONVOLUTION_GREEN_SCALE = 32797 # GL/glext.h:128
GL_POST_CONVOLUTION_BLUE_SCALE = 32798 # GL/glext.h:129
GL_POST_CONVOLUTION_ALPHA_SCALE = 32799 # GL/glext.h:130
GL_POST_CONVOLUTION_RED_BIAS = 32800 # GL/glext.h:131
GL_POST_CONVOLUTION_GREEN_BIAS = 32801 # GL/glext.h:132
GL_POST_CONVOLUTION_BLUE_BIAS = 32802 # GL/glext.h:133
GL_POST_CONVOLUTION_ALPHA_BIAS = 32803 # GL/glext.h:134
GL_HISTOGRAM = 32804 # GL/glext.h:135
GL_PROXY_HISTOGRAM = 32805 # GL/glext.h:136
GL_HISTOGRAM_WIDTH = 32806 # GL/glext.h:137
GL_HISTOGRAM_FORMAT = 32807 # GL/glext.h:138
GL_HISTOGRAM_RED_SIZE = 32808 # GL/glext.h:139
GL_HISTOGRAM_GREEN_SIZE = 32809 # GL/glext.h:140
GL_HISTOGRAM_BLUE_SIZE = 32810 # GL/glext.h:141
GL_HISTOGRAM_ALPHA_SIZE = 32811 # GL/glext.h:142
GL_HISTOGRAM_LUMINANCE_SIZE = 32812 # GL/glext.h:143
GL_HISTOGRAM_SINK = 32813 # GL/glext.h:144
GL_MINMAX = 32814 # GL/glext.h:145
GL_MINMAX_FORMAT = 32815 # GL/glext.h:146
GL_MINMAX_SINK = 32816 # GL/glext.h:147
GL_TABLE_TOO_LARGE = 32817 # GL/glext.h:148
GL_COLOR_MATRIX = 32945 # GL/glext.h:149
GL_COLOR_MATRIX_STACK_DEPTH = 32946 # GL/glext.h:150
GL_MAX_COLOR_MATRIX_STACK_DEPTH = 32947 # GL/glext.h:151
GL_POST_COLOR_MATRIX_RED_SCALE = 32948 # GL/glext.h:152
GL_POST_COLOR_MATRIX_GREEN_SCALE = 32949 # GL/glext.h:153
GL_POST_COLOR_MATRIX_BLUE_SCALE = 32950 # GL/glext.h:154
GL_POST_COLOR_MATRIX_ALPHA_SCALE = 32951 # GL/glext.h:155
GL_POST_COLOR_MATRIX_RED_BIAS = 32952 # GL/glext.h:156
GL_POST_COLOR_MATRIX_GREEN_BIAS = 32953 # GL/glext.h:157
GL_POST_COLOR_MATRIX_BLUE_BIAS = 32954 # GL/glext.h:158
GL_POST_COLOR_MATRIX_ALPHA_BIAS = 32955 # GL/glext.h:159
GL_COLOR_TABLE = 32976 # GL/glext.h:160
GL_POST_CONVOLUTION_COLOR_TABLE = 32977 # GL/glext.h:161
GL_POST_COLOR_MATRIX_COLOR_TABLE = 32978 # GL/glext.h:162
GL_PROXY_COLOR_TABLE = 32979 # GL/glext.h:163
GL_PROXY_POST_CONVOLUTION_COLOR_TABLE = 32980 # GL/glext.h:164
GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE = 32981 # GL/glext.h:165
GL_COLOR_TABLE_SCALE = 32982 # GL/glext.h:166
GL_COLOR_TABLE_BIAS = 32983 # GL/glext.h:167
GL_COLOR_TABLE_FORMAT = 32984 # GL/glext.h:168
GL_COLOR_TABLE_WIDTH = 32985 # GL/glext.h:169
GL_COLOR_TABLE_RED_SIZE = 32986 # GL/glext.h:170
GL_COLOR_TABLE_GREEN_SIZE = 32987 # GL/glext.h:171
GL_COLOR_TABLE_BLUE_SIZE = 32988 # GL/glext.h:172
GL_COLOR_TABLE_ALPHA_SIZE = 32989 # GL/glext.h:173
GL_COLOR_TABLE_LUMINANCE_SIZE = 32990 # GL/glext.h:174
GL_COLOR_TABLE_INTENSITY_SIZE = 32991 # GL/glext.h:175
GL_CONSTANT_BORDER = 33105 # GL/glext.h:176
GL_REPLICATE_BORDER = 33107 # GL/glext.h:177
GL_CONVOLUTION_BORDER_COLOR = 33108 # GL/glext.h:178
# VERSION_1_3 (GL/glext.h:181)
GL_TEXTURE0 = 33984 # GL/glext.h:182
GL_TEXTURE1 = 33985 # GL/glext.h:183
GL_TEXTURE2 = 33986 # GL/glext.h:184
GL_TEXTURE3 = 33987 # GL/glext.h:185
GL_TEXTURE4 = 33988 # GL/glext.h:186
GL_TEXTURE5 = 33989 # GL/glext.h:187
GL_TEXTURE6 = 33990 # GL/glext.h:188
GL_TEXTURE7 = 33991 # GL/glext.h:189
GL_TEXTURE8 = 33992 # GL/glext.h:190
GL_TEXTURE9 = 33993 # GL/glext.h:191
GL_TEXTURE10 = 33994 # GL/glext.h:192
GL_TEXTURE11 = 33995 # GL/glext.h:193
GL_TEXTURE12 = 33996 # GL/glext.h:194
GL_TEXTURE13 = 33997 # GL/glext.h:195
GL_TEXTURE14 = 33998 # GL/glext.h:196
GL_TEXTURE15 = 33999 # GL/glext.h:197
GL_TEXTURE16 = 34000 # GL/glext.h:198
GL_TEXTURE17 = 34001 # GL/glext.h:199
GL_TEXTURE18 = 34002 # GL/glext.h:200
GL_TEXTURE19 = 34003 # GL/glext.h:201
GL_TEXTURE20 = 34004 # GL/glext.h:202
GL_TEXTURE21 = 34005 # GL/glext.h:203
GL_TEXTURE22 = 34006 # GL/glext.h:204
GL_TEXTURE23 = 34007 # GL/glext.h:205
GL_TEXTURE24 = 34008 # GL/glext.h:206
GL_TEXTURE25 = 34009 # GL/glext.h:207
GL_TEXTURE26 = 34010 # GL/glext.h:208
GL_TEXTURE27 = 34011 # GL/glext.h:209
GL_TEXTURE28 = 34012 # GL/glext.h:210
GL_TEXTURE29 = 34013 # GL/glext.h:211
GL_TEXTURE30 = 34014 # GL/glext.h:212
GL_TEXTURE31 = 34015 # GL/glext.h:213
GL_ACTIVE_TEXTURE = 34016 # GL/glext.h:214
GL_CLIENT_ACTIVE_TEXTURE = 34017 # GL/glext.h:215
GL_MAX_TEXTURE_UNITS = 34018 # GL/glext.h:216
GL_TRANSPOSE_MODELVIEW_MATRIX = 34019 # GL/glext.h:217
GL_TRANSPOSE_PROJECTION_MATRIX = 34020 # GL/glext.h:218
GL_TRANSPOSE_TEXTURE_MATRIX = 34021 # GL/glext.h:219
GL_TRANSPOSE_COLOR_MATRIX = 34022 # GL/glext.h:220
GL_MULTISAMPLE = 32925 # GL/glext.h:221
GL_SAMPLE_ALPHA_TO_COVERAGE = 32926 # GL/glext.h:222
GL_SAMPLE_ALPHA_TO_ONE = 32927 # GL/glext.h:223
GL_SAMPLE_COVERAGE = 32928 # GL/glext.h:224
GL_SAMPLE_BUFFERS = 32936 # GL/glext.h:225
GL_SAMPLES = 32937 # GL/glext.h:226
GL_SAMPLE_COVERAGE_VALUE = 32938 # GL/glext.h:227
GL_SAMPLE_COVERAGE_INVERT = 32939 # GL/glext.h:228
GL_MULTISAMPLE_BIT = 536870912 # GL/glext.h:229
GL_NORMAL_MAP = 34065 # GL/glext.h:230
GL_REFLECTION_MAP = 34066 # GL/glext.h:231
GL_TEXTURE_CUBE_MAP = 34067 # GL/glext.h:232
GL_TEXTURE_BINDING_CUBE_MAP = 34068 # GL/glext.h:233
GL_TEXTURE_CUBE_MAP_POSITIVE_X = 34069 # GL/glext.h:234
GL_TEXTURE_CUBE_MAP_NEGATIVE_X = 34070 # GL/glext.h:235
GL_TEXTURE_CUBE_MAP_POSITIVE_Y = 34071 # GL/glext.h:236
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y = 34072 # GL/glext.h:237
GL_TEXTURE_CUBE_MAP_POSITIVE_Z = 34073 # GL/glext.h:238
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z = 34074 # GL/glext.h:239
GL_PROXY_TEXTURE_CUBE_MAP = 34075 # GL/glext.h:240
GL_MAX_CUBE_MAP_TEXTURE_SIZE = 34076 # GL/glext.h:241
GL_COMPRESSED_ALPHA = 34025 # GL/glext.h:242
GL_COMPRESSED_LUMINANCE = 34026 # GL/glext.h:243
GL_COMPRESSED_LUMINANCE_ALPHA = 34027 # GL/glext.h:244
GL_COMPRESSED_INTENSITY = 34028 # GL/glext.h:245
GL_COMPRESSED_RGB = 34029 # GL/glext.h:246
GL_COMPRESSED_RGBA = 34030 # GL/glext.h:247
GL_TEXTURE_COMPRESSION_HINT = 34031 # GL/glext.h:248
GL_TEXTURE_COMPRESSED_IMAGE_SIZE = 34464 # GL/glext.h:249
GL_TEXTURE_COMPRESSED = 34465 # GL/glext.h:250
GL_NUM_COMPRESSED_TEXTURE_FORMATS = 34466 # GL/glext.h:251
GL_COMPRESSED_TEXTURE_FORMATS = 34467 # GL/glext.h:252
GL_CLAMP_TO_BORDER = 33069 # GL/glext.h:253
GL_COMBINE = 34160 # GL/glext.h:254
GL_COMBINE_RGB = 34161 # GL/glext.h:255
GL_COMBINE_ALPHA = 34162 # GL/glext.h:256
GL_SOURCE0_RGB = 34176 # GL/glext.h:257
GL_SOURCE1_RGB = 34177 # GL/glext.h:258
GL_SOURCE2_RGB = 34178 # GL/glext.h:259
GL_SOURCE0_ALPHA = 34184 # GL/glext.h:260
GL_SOURCE1_ALPHA = 34185 # GL/glext.h:261
GL_SOURCE2_ALPHA = 34186 # GL/glext.h:262
GL_OPERAND0_RGB = 34192 # GL/glext.h:263
GL_OPERAND1_RGB = 34193 # GL/glext.h:264
GL_OPERAND2_RGB = 34194 # GL/glext.h:265
GL_OPERAND0_ALPHA = 34200 # GL/glext.h:266
GL_OPERAND1_ALPHA = 34201 # GL/glext.h:267
GL_OPERAND2_ALPHA = 34202 # GL/glext.h:268
GL_RGB_SCALE = 34163 # GL/glext.h:269
GL_ADD_SIGNED = 34164 # GL/glext.h:270
GL_INTERPOLATE = 34165 # GL/glext.h:271
GL_SUBTRACT = 34023 # GL/glext.h:272
GL_CONSTANT = 34166 # GL/glext.h:273
GL_PRIMARY_COLOR = 34167 # GL/glext.h:274
GL_PREVIOUS = 34168 # GL/glext.h:275
GL_DOT3_RGB = 34478 # GL/glext.h:276
GL_DOT3_RGBA = 34479 # GL/glext.h:277
# VERSION_1_4 (GL/glext.h:280)
GL_BLEND_DST_RGB = 32968 # GL/glext.h:281
GL_BLEND_SRC_RGB = 32969 # GL/glext.h:282
GL_BLEND_DST_ALPHA = 32970 # GL/glext.h:283
GL_BLEND_SRC_ALPHA = 32971 # GL/glext.h:284
GL_POINT_SIZE_MIN = 33062 # GL/glext.h:285
GL_POINT_SIZE_MAX = 33063 # GL/glext.h:286
GL_POINT_FADE_THRESHOLD_SIZE = 33064 # GL/glext.h:287
GL_POINT_DISTANCE_ATTENUATION = 33065 # GL/glext.h:288
GL_GENERATE_MIPMAP = 33169 # GL/glext.h:289
GL_GENERATE_MIPMAP_HINT = 33170 # GL/glext.h:290
GL_DEPTH_COMPONENT16 = 33189 # GL/glext.h:291
GL_DEPTH_COMPONENT24 = 33190 # GL/glext.h:292
GL_DEPTH_COMPONENT32 = 33191 # GL/glext.h:293
GL_MIRRORED_REPEAT = 33648 # GL/glext.h:294
GL_FOG_COORDINATE_SOURCE = 33872 # GL/glext.h:295
GL_FOG_COORDINATE = 33873 # GL/glext.h:296
GL_FRAGMENT_DEPTH = 33874 # GL/glext.h:297
GL_CURRENT_FOG_COORDINATE = 33875 # GL/glext.h:298
GL_FOG_COORDINATE_ARRAY_TYPE = 33876 # GL/glext.h:299
GL_FOG_COORDINATE_ARRAY_STRIDE = 33877 # GL/glext.h:300
GL_FOG_COORDINATE_ARRAY_POINTER = 33878 # GL/glext.h:301
GL_FOG_COORDINATE_ARRAY = 33879 # GL/glext.h:302
GL_COLOR_SUM = 33880 # GL/glext.h:303
GL_CURRENT_SECONDARY_COLOR = 33881 # GL/glext.h:304
GL_SECONDARY_COLOR_ARRAY_SIZE = 33882 # GL/glext.h:305
GL_SECONDARY_COLOR_ARRAY_TYPE = 33883 # GL/glext.h:306
GL_SECONDARY_COLOR_ARRAY_STRIDE = 33884 # GL/glext.h:307
GL_SECONDARY_COLOR_ARRAY_POINTER = 33885 # GL/glext.h:308
GL_SECONDARY_COLOR_ARRAY = 33886 # GL/glext.h:309
GL_MAX_TEXTURE_LOD_BIAS = 34045 # GL/glext.h:310
GL_TEXTURE_FILTER_CONTROL = 34048 # GL/glext.h:311
GL_TEXTURE_LOD_BIAS = 34049 # GL/glext.h:312
GL_INCR_WRAP = 34055 # GL/glext.h:313
GL_DECR_WRAP = 34056 # GL/glext.h:314
GL_TEXTURE_DEPTH_SIZE = 34890 # GL/glext.h:315
GL_DEPTH_TEXTURE_MODE = 34891 # GL/glext.h:316
GL_TEXTURE_COMPARE_MODE = 34892 # GL/glext.h:317
GL_TEXTURE_COMPARE_FUNC = 34893 # GL/glext.h:318
GL_COMPARE_R_TO_TEXTURE = 34894 # GL/glext.h:319
# VERSION_1_5 (GL/glext.h:322)
GL_BUFFER_SIZE = 34660 # GL/glext.h:323
GL_BUFFER_USAGE = 34661 # GL/glext.h:324
GL_QUERY_COUNTER_BITS = 34916 # GL/glext.h:325
GL_CURRENT_QUERY = 34917 # GL/glext.h:326
GL_QUERY_RESULT = 34918 # GL/glext.h:327
GL_QUERY_RESULT_AVAILABLE = 34919 # GL/glext.h:328
GL_ARRAY_BUFFER = 34962 # GL/glext.h:329
GL_ELEMENT_ARRAY_BUFFER = 34963 # GL/glext.h:330
GL_ARRAY_BUFFER_BINDING = 34964 # GL/glext.h:331
GL_ELEMENT_ARRAY_BUFFER_BINDING = 34965 # GL/glext.h:332
GL_VERTEX_ARRAY_BUFFER_BINDING = 34966 # GL/glext.h:333
GL_NORMAL_ARRAY_BUFFER_BINDING = 34967 # GL/glext.h:334
GL_COLOR_ARRAY_BUFFER_BINDING = 34968 # GL/glext.h:335
GL_INDEX_ARRAY_BUFFER_BINDING = 34969 # GL/glext.h:336
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING = 34970 # GL/glext.h:337
GL_EDGE_FLAG_ARRAY_BUFFER_BINDING = 34971 # GL/glext.h:338
GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING = 34972 # GL/glext.h:339
GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING = 34973 # GL/glext.h:340
GL_WEIGHT_ARRAY_BUFFER_BINDING = 34974 # GL/glext.h:341
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING = 34975 # GL/glext.h:342
GL_READ_ONLY = 35000 # GL/glext.h:343
GL_WRITE_ONLY = 35001 # GL/glext.h:344
GL_READ_WRITE = 35002 # GL/glext.h:345
GL_BUFFER_ACCESS = 35003 # GL/glext.h:346
GL_BUFFER_MAPPED = 35004 # GL/glext.h:347
GL_BUFFER_MAP_POINTER = 35005 # GL/glext.h:348
GL_STREAM_DRAW = 35040 # GL/glext.h:349
GL_STREAM_READ = 35041 # GL/glext.h:350
GL_STREAM_COPY = 35042 # GL/glext.h:351
GL_STATIC_DRAW = 35044 # GL/glext.h:352
GL_STATIC_READ = 35045 # GL/glext.h:353
GL_STATIC_COPY = 35046 # GL/glext.h:354
GL_DYNAMIC_DRAW = 35048 # GL/glext.h:355
GL_DYNAMIC_READ = 35049 # GL/glext.h:356
GL_DYNAMIC_COPY = 35050 # GL/glext.h:357
GL_SAMPLES_PASSED = 35092 # GL/glext.h:358
GL_FOG_COORD_SRC = 33872 # GL/glext.h:359
GL_FOG_COORD = 33873 # GL/glext.h:360
GL_CURRENT_FOG_COORD = 33875 # GL/glext.h:361
GL_FOG_COORD_ARRAY_TYPE = 33876 # GL/glext.h:362
GL_FOG_COORD_ARRAY_STRIDE = 33877 # GL/glext.h:363
GL_FOG_COORD_ARRAY_POINTER = 33878 # GL/glext.h:364
GL_FOG_COORD_ARRAY = 33879 # GL/glext.h:365
GL_FOG_COORD_ARRAY_BUFFER_BINDING = 34973 # GL/glext.h:366
GL_SRC0_RGB = 34176 # GL/glext.h:367
GL_SRC1_RGB = 34177 # GL/glext.h:368
GL_SRC2_RGB = 34178 # GL/glext.h:369
GL_SRC0_ALPHA = 34184 # GL/glext.h:370
GL_SRC1_ALPHA = 34185 # GL/glext.h:371
GL_SRC2_ALPHA = 34186 # GL/glext.h:372
# VERSION_2_0 (GL/glext.h:375)
GL_BLEND_EQUATION_RGB = 32777 # GL/glext.h:376
GL_VERTEX_ATTRIB_ARRAY_ENABLED = 34338 # GL/glext.h:377
GL_VERTEX_ATTRIB_ARRAY_SIZE = 34339 # GL/glext.h:378
GL_VERTEX_ATTRIB_ARRAY_STRIDE = 34340 # GL/glext.h:379
GL_VERTEX_ATTRIB_ARRAY_TYPE = 34341 # GL/glext.h:380
GL_CURRENT_VERTEX_ATTRIB = 34342 # GL/glext.h:381
GL_VERTEX_PROGRAM_POINT_SIZE = 34370 # GL/glext.h:382
GL_VERTEX_PROGRAM_TWO_SIDE = 34371 # GL/glext.h:383
GL_VERTEX_ATTRIB_ARRAY_POINTER = 34373 # GL/glext.h:384
GL_STENCIL_BACK_FUNC = 34816 # GL/glext.h:385
GL_STENCIL_BACK_FAIL = 34817 # GL/glext.h:386
GL_STENCIL_BACK_PASS_DEPTH_FAIL = 34818 # GL/glext.h:387
GL_STENCIL_BACK_PASS_DEPTH_PASS = 34819 # GL/glext.h:388
GL_MAX_DRAW_BUFFERS = 34852 # GL/glext.h:389
GL_DRAW_BUFFER0 = 34853 # GL/glext.h:390
GL_DRAW_BUFFER1 = 34854 # GL/glext.h:391
GL_DRAW_BUFFER2 = 34855 # GL/glext.h:392
GL_DRAW_BUFFER3 = 34856 # GL/glext.h:393
GL_DRAW_BUFFER4 = 34857 # GL/glext.h:394
GL_DRAW_BUFFER5 = 34858 # GL/glext.h:395
GL_DRAW_BUFFER6 = 34859 # GL/glext.h:396
GL_DRAW_BUFFER7 = 34860 # GL/glext.h:397
GL_DRAW_BUFFER8 = 34861 # GL/glext.h:398
GL_DRAW_BUFFER9 = 34862 # GL/glext.h:399
GL_DRAW_BUFFER10 = 34863 # GL/glext.h:400
GL_DRAW_BUFFER11 = 34864 # GL/glext.h:401
GL_DRAW_BUFFER12 = 34865 # GL/glext.h:402
GL_DRAW_BUFFER13 = 34866 # GL/glext.h:403
GL_DRAW_BUFFER14 = 34867 # GL/glext.h:404
GL_DRAW_BUFFER15 = 34868 # GL/glext.h:405
GL_BLEND_EQUATION_ALPHA = 34877 # GL/glext.h:406
GL_POINT_SPRITE = 34913 # GL/glext.h:407
GL_COORD_REPLACE = 34914 # GL/glext.h:408
GL_MAX_VERTEX_ATTRIBS = 34921 # GL/glext.h:409
GL_VERTEX_ATTRIB_ARRAY_NORMALIZED = 34922 # GL/glext.h:410
GL_MAX_TEXTURE_COORDS = 34929 # GL/glext.h:411
GL_MAX_TEXTURE_IMAGE_UNITS = 34930 # GL/glext.h:412
GL_FRAGMENT_SHADER = 35632 # GL/glext.h:413
GL_VERTEX_SHADER = 35633 # GL/glext.h:414
GL_MAX_FRAGMENT_UNIFORM_COMPONENTS = 35657 # GL/glext.h:415
GL_MAX_VERTEX_UNIFORM_COMPONENTS = 35658 # GL/glext.h:416
GL_MAX_VARYING_FLOATS = 35659 # GL/glext.h:417
GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS = 35660 # GL/glext.h:418
GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS = 35661 # GL/glext.h:419
GL_SHADER_TYPE = 35663 # GL/glext.h:420
GL_FLOAT_VEC2 = 35664 # GL/glext.h:421
GL_FLOAT_VEC3 = 35665 # GL/glext.h:422
GL_FLOAT_VEC4 = 35666 # GL/glext.h:423
GL_INT_VEC2 = 35667 # GL/glext.h:424
GL_INT_VEC3 = 35668 # GL/glext.h:425
GL_INT_VEC4 = 35669 # GL/glext.h:426
GL_BOOL = 35670 # GL/glext.h:427
GL_BOOL_VEC2 = 35671 # GL/glext.h:428
GL_BOOL_VEC3 = 35672 # GL/glext.h:429
GL_BOOL_VEC4 = 35673 # GL/glext.h:430
GL_FLOAT_MAT2 = 35674 # GL/glext.h:431
GL_FLOAT_MAT3 = 35675 # GL/glext.h:432
GL_FLOAT_MAT4 = 35676 # GL/glext.h:433
GL_SAMPLER_1D = 35677 # GL/glext.h:434
GL_SAMPLER_2D = 35678 # GL/glext.h:435
GL_SAMPLER_3D = 35679 # GL/glext.h:436
GL_SAMPLER_CUBE = 35680 # GL/glext.h:437
GL_SAMPLER_1D_SHADOW = 35681 # GL/glext.h:438
GL_SAMPLER_2D_SHADOW = 35682 # GL/glext.h:439
GL_DELETE_STATUS = 35712 # GL/glext.h:440
GL_COMPILE_STATUS = 35713 # GL/glext.h:441
GL_LINK_STATUS = 35714 # GL/glext.h:442
GL_VALIDATE_STATUS = 35715 # GL/glext.h:443
GL_INFO_LOG_LENGTH = 35716 # GL/glext.h:444
GL_ATTACHED_SHADERS = 35717 # GL/glext.h:445
GL_ACTIVE_UNIFORMS = 35718 # GL/glext.h:446
GL_ACTIVE_UNIFORM_MAX_LENGTH = 35719 # GL/glext.h:447
GL_SHADER_SOURCE_LENGTH = 35720 # GL/glext.h:448
GL_ACTIVE_ATTRIBUTES = 35721 # GL/glext.h:449
GL_ACTIVE_ATTRIBUTE_MAX_LENGTH = 35722 # GL/glext.h:450
GL_FRAGMENT_SHADER_DERIVATIVE_HINT = 35723 # GL/glext.h:451
GL_SHADING_LANGUAGE_VERSION = 35724 # GL/glext.h:452
GL_CURRENT_PROGRAM = 35725 # GL/glext.h:453
GL_POINT_SPRITE_COORD_ORIGIN = 36000 # GL/glext.h:454
GL_LOWER_LEFT = 36001 # GL/glext.h:455
GL_UPPER_LEFT = 36002 # GL/glext.h:456
GL_STENCIL_BACK_REF = 36003 # GL/glext.h:457
GL_STENCIL_BACK_VALUE_MASK = 36004 # GL/glext.h:458
GL_STENCIL_BACK_WRITEMASK = 36005 # GL/glext.h:459
# ARB_multitexture (GL/glext.h:462)
GL_TEXTURE0_ARB = 33984 # GL/glext.h:463
GL_TEXTURE1_ARB = 33985 # GL/glext.h:464
GL_TEXTURE2_ARB = 33986 # GL/glext.h:465
GL_TEXTURE3_ARB = 33987 # GL/glext.h:466
GL_TEXTURE4_ARB = 33988 # GL/glext.h:467
GL_TEXTURE5_ARB = 33989 # GL/glext.h:468
GL_TEXTURE6_ARB = 33990 # GL/glext.h:469
GL_TEXTURE7_ARB = 33991 # GL/glext.h:470
GL_TEXTURE8_ARB = 33992 # GL/glext.h:471
GL_TEXTURE9_ARB = 33993 # GL/glext.h:472
GL_TEXTURE10_ARB = 33994 # GL/glext.h:473
GL_TEXTURE11_ARB = 33995 # GL/glext.h:474
GL_TEXTURE12_ARB = 33996 # GL/glext.h:475
GL_TEXTURE13_ARB = 33997 # GL/glext.h:476
GL_TEXTURE14_ARB = 33998 # GL/glext.h:477
GL_TEXTURE15_ARB = 33999 # GL/glext.h:478
GL_TEXTURE16_ARB = 34000 # GL/glext.h:479
GL_TEXTURE17_ARB = 34001 # GL/glext.h:480
GL_TEXTURE18_ARB = 34002 # GL/glext.h:481
GL_TEXTURE19_ARB = 34003 # GL/glext.h:482
GL_TEXTURE20_ARB = 34004 # GL/glext.h:483
GL_TEXTURE21_ARB = 34005 # GL/glext.h:484
GL_TEXTURE22_ARB = 34006 # GL/glext.h:485
GL_TEXTURE23_ARB = 34007 # GL/glext.h:486
GL_TEXTURE24_ARB = 34008 # GL/glext.h:487
GL_TEXTURE25_ARB = 34009 # GL/glext.h:488
GL_TEXTURE26_ARB = 34010 # GL/glext.h:489
GL_TEXTURE27_ARB = 34011 # GL/glext.h:490
GL_TEXTURE28_ARB = 34012 # GL/glext.h:491
GL_TEXTURE29_ARB = 34013 # GL/glext.h:492
GL_TEXTURE30_ARB = 34014 # GL/glext.h:493
GL_TEXTURE31_ARB = 34015 # GL/glext.h:494
GL_ACTIVE_TEXTURE_ARB = 34016 # GL/glext.h:495
GL_CLIENT_ACTIVE_TEXTURE_ARB = 34017 # GL/glext.h:496
GL_MAX_TEXTURE_UNITS_ARB = 34018 # GL/glext.h:497
# ARB_transpose_matrix (GL/glext.h:500)
GL_TRANSPOSE_MODELVIEW_MATRIX_ARB = 34019 # GL/glext.h:501
GL_TRANSPOSE_PROJECTION_MATRIX_ARB = 34020 # GL/glext.h:502
GL_TRANSPOSE_TEXTURE_MATRIX_ARB = 34021 # GL/glext.h:503
GL_TRANSPOSE_COLOR_MATRIX_ARB = 34022 # GL/glext.h:504
# ARB_multisample (GL/glext.h:507)
GL_MULTISAMPLE_ARB = 32925 # GL/glext.h:508
GL_SAMPLE_ALPHA_TO_COVERAGE_ARB = 32926 # GL/glext.h:509
GL_SAMPLE_ALPHA_TO_ONE_ARB = 32927 # GL/glext.h:510
GL_SAMPLE_COVERAGE_ARB = 32928 # GL/glext.h:511
GL_SAMPLE_BUFFERS_ARB = 32936 # GL/glext.h:512
GL_SAMPLES_ARB = 32937 # GL/glext.h:513
GL_SAMPLE_COVERAGE_VALUE_ARB = 32938 # GL/glext.h:514
GL_SAMPLE_COVERAGE_INVERT_ARB = 32939 # GL/glext.h:515
GL_MULTISAMPLE_BIT_ARB = 536870912 # GL/glext.h:516
# ARB_texture_env_add (GL/glext.h:519)
# ARB_texture_cube_map (GL/glext.h:522)
GL_NORMAL_MAP_ARB = 34065 # GL/glext.h:523
GL_REFLECTION_MAP_ARB = 34066 # GL/glext.h:524
GL_TEXTURE_CUBE_MAP_ARB = 34067 # GL/glext.h:525
GL_TEXTURE_BINDING_CUBE_MAP_ARB = 34068 # GL/glext.h:526
GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB = 34069 # GL/glext.h:527
GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB = 34070 # GL/glext.h:528
GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB = 34071 # GL/glext.h:529
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB = 34072 # GL/glext.h:530
GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB = 34073 # GL/glext.h:531
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB = 34074 # GL/glext.h:532
GL_PROXY_TEXTURE_CUBE_MAP_ARB = 34075 # GL/glext.h:533
GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB = 34076 # GL/glext.h:534
# ARB_texture_compression (GL/glext.h:537)
GL_COMPRESSED_ALPHA_ARB = 34025 # GL/glext.h:538
GL_COMPRESSED_LUMINANCE_ARB = 34026 # GL/glext.h:539
GL_COMPRESSED_LUMINANCE_ALPHA_ARB = 34027 # GL/glext.h:540
GL_COMPRESSED_INTENSITY_ARB = 34028 # GL/glext.h:541
GL_COMPRESSED_RGB_ARB = 34029 # GL/glext.h:542
GL_COMPRESSED_RGBA_ARB = 34030 # GL/glext.h:543
GL_TEXTURE_COMPRESSION_HINT_ARB = 34031 # GL/glext.h:544
GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB = 34464 # GL/glext.h:545
GL_TEXTURE_COMPRESSED_ARB = 34465 # GL/glext.h:546
GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB = 34466 # GL/glext.h:547
GL_COMPRESSED_TEXTURE_FORMATS_ARB = 34467 # GL/glext.h:548
# ARB_texture_border_clamp (GL/glext.h:551)
GL_CLAMP_TO_BORDER_ARB = 33069 # GL/glext.h:552
# ARB_point_parameters (GL/glext.h:555)
GL_POINT_SIZE_MIN_ARB = 33062 # GL/glext.h:556
GL_POINT_SIZE_MAX_ARB = 33063 # GL/glext.h:557
GL_POINT_FADE_THRESHOLD_SIZE_ARB = 33064 # GL/glext.h:558
GL_POINT_DISTANCE_ATTENUATION_ARB = 33065 # GL/glext.h:559
# ARB_vertex_blend (GL/glext.h:562)
GL_MAX_VERTEX_UNITS_ARB = 34468 # GL/glext.h:563
GL_ACTIVE_VERTEX_UNITS_ARB = 34469 # GL/glext.h:564
GL_WEIGHT_SUM_UNITY_ARB = 34470 # GL/glext.h:565
GL_VERTEX_BLEND_ARB = 34471 # GL/glext.h:566
GL_CURRENT_WEIGHT_ARB = 34472 # GL/glext.h:567
GL_WEIGHT_ARRAY_TYPE_ARB = 34473 # GL/glext.h:568
GL_WEIGHT_ARRAY_STRIDE_ARB = 34474 # GL/glext.h:569
GL_WEIGHT_ARRAY_SIZE_ARB = 34475 # GL/glext.h:570
GL_WEIGHT_ARRAY_POINTER_ARB = 34476 # GL/glext.h:571
GL_WEIGHT_ARRAY_ARB = 34477 # GL/glext.h:572
GL_MODELVIEW0_ARB = 5888 # GL/glext.h:573
GL_MODELVIEW1_ARB = 34058 # GL/glext.h:574
GL_MODELVIEW2_ARB = 34594 # GL/glext.h:575
GL_MODELVIEW3_ARB = 34595 # GL/glext.h:576
GL_MODELVIEW4_ARB = 34596 # GL/glext.h:577
GL_MODELVIEW5_ARB = 34597 # GL/glext.h:578
GL_MODELVIEW6_ARB = 34598 # GL/glext.h:579
GL_MODELVIEW7_ARB = 34599 # GL/glext.h:580
GL_MODELVIEW8_ARB = 34600 # GL/glext.h:581
GL_MODELVIEW9_ARB = 34601 # GL/glext.h:582
GL_MODELVIEW10_ARB = 34602 # GL/glext.h:583
GL_MODELVIEW11_ARB = 34603 # GL/glext.h:584
GL_MODELVIEW12_ARB = 34604 # GL/glext.h:585
GL_MODELVIEW13_ARB = 34605 # GL/glext.h:586
GL_MODELVIEW14_ARB = 34606 # GL/glext.h:587
GL_MODELVIEW15_ARB = 34607 # GL/glext.h:588
GL_MODELVIEW16_ARB = 34608 # GL/glext.h:589
GL_MODELVIEW17_ARB = 34609 # GL/glext.h:590
GL_MODELVIEW18_ARB = 34610 # GL/glext.h:591
GL_MODELVIEW19_ARB = 34611 # GL/glext.h:592
GL_MODELVIEW20_ARB = 34612 # GL/glext.h:593
GL_MODELVIEW21_ARB = 34613 # GL/glext.h:594
GL_MODELVIEW22_ARB = 34614 # GL/glext.h:595
GL_MODELVIEW23_ARB = 34615 # GL/glext.h:596
GL_MODELVIEW24_ARB = 34616 # GL/glext.h:597
GL_MODELVIEW25_ARB = 34617 # GL/glext.h:598
GL_MODELVIEW26_ARB = 34618 # GL/glext.h:599
GL_MODELVIEW27_ARB = 34619 # GL/glext.h:600
GL_MODELVIEW28_ARB = 34620 # GL/glext.h:601
GL_MODELVIEW29_ARB = 34621 # GL/glext.h:602
GL_MODELVIEW30_ARB = 34622 # GL/glext.h:603
GL_MODELVIEW31_ARB = 34623 # GL/glext.h:604
# ARB_matrix_palette (GL/glext.h:607)
GL_MATRIX_PALETTE_ARB = 34880 # GL/glext.h:608
GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB = 34881 # GL/glext.h:609
GL_MAX_PALETTE_MATRICES_ARB = 34882 # GL/glext.h:610
GL_CURRENT_PALETTE_MATRIX_ARB = 34883 # GL/glext.h:611
GL_MATRIX_INDEX_ARRAY_ARB = 34884 # GL/glext.h:612
GL_CURRENT_MATRIX_INDEX_ARB = 34885 # GL/glext.h:613
GL_MATRIX_INDEX_ARRAY_SIZE_ARB = 34886 # GL/glext.h:614
GL_MATRIX_INDEX_ARRAY_TYPE_ARB = 34887 # GL/glext.h:615
GL_MATRIX_INDEX_ARRAY_STRIDE_ARB = 34888 # GL/glext.h:616
GL_MATRIX_INDEX_ARRAY_POINTER_ARB = 34889 # GL/glext.h:617
# ARB_texture_env_combine (GL/glext.h:620)
GL_COMBINE_ARB = 34160 # GL/glext.h:621
GL_COMBINE_RGB_ARB = 34161 # GL/glext.h:622
GL_COMBINE_ALPHA_ARB = 34162 # GL/glext.h:623
GL_SOURCE0_RGB_ARB = 34176 # GL/glext.h:624
GL_SOURCE1_RGB_ARB = 34177 # GL/glext.h:625
GL_SOURCE2_RGB_ARB = 34178 # GL/glext.h:626
GL_SOURCE0_ALPHA_ARB = 34184 # GL/glext.h:627
GL_SOURCE1_ALPHA_ARB = 34185 # GL/glext.h:628
GL_SOURCE2_ALPHA_ARB = 34186 # GL/glext.h:629
GL_OPERAND0_RGB_ARB = 34192 # GL/glext.h:630
GL_OPERAND1_RGB_ARB = 34193 # GL/glext.h:631
GL_OPERAND2_RGB_ARB = 34194 # GL/glext.h:632
GL_OPERAND0_ALPHA_ARB = 34200 # GL/glext.h:633
GL_OPERAND1_ALPHA_ARB = 34201 # GL/glext.h:634
GL_OPERAND2_ALPHA_ARB = 34202 # GL/glext.h:635
GL_RGB_SCALE_ARB = 34163 # GL/glext.h:636
GL_ADD_SIGNED_ARB = 34164 # GL/glext.h:637
GL_INTERPOLATE_ARB = 34165 # GL/glext.h:638
GL_SUBTRACT_ARB = 34023 # GL/glext.h:639
GL_CONSTANT_ARB = 34166 # GL/glext.h:640
GL_PRIMARY_COLOR_ARB = 34167 # GL/glext.h:641
GL_PREVIOUS_ARB = 34168 # GL/glext.h:642
# ARB_texture_env_crossbar (GL/glext.h:645)
# ARB_texture_env_dot3 (GL/glext.h:648)
GL_DOT3_RGB_ARB = 34478 # GL/glext.h:649
GL_DOT3_RGBA_ARB = 34479 # GL/glext.h:650
# ARB_texture_mirrored_repeat (GL/glext.h:653)
GL_MIRRORED_REPEAT_ARB = 33648 # GL/glext.h:654
# ARB_depth_texture (GL/glext.h:657)
GL_DEPTH_COMPONENT16_ARB = 33189 # GL/glext.h:658
GL_DEPTH_COMPONENT24_ARB = 33190 # GL/glext.h:659
GL_DEPTH_COMPONENT32_ARB = 33191 # GL/glext.h:660
GL_TEXTURE_DEPTH_SIZE_ARB = 34890 # GL/glext.h:661
GL_DEPTH_TEXTURE_MODE_ARB = 34891 # GL/glext.h:662
# ARB_shadow (GL/glext.h:665)
GL_TEXTURE_COMPARE_MODE_ARB = 34892 # GL/glext.h:666
GL_TEXTURE_COMPARE_FUNC_ARB = 34893 # GL/glext.h:667
GL_COMPARE_R_TO_TEXTURE_ARB = 34894 # GL/glext.h:668
# ARB_shadow_ambient (GL/glext.h:671)
GL_TEXTURE_COMPARE_FAIL_VALUE_ARB = 32959 # GL/glext.h:672
# ARB_window_pos (GL/glext.h:675)
# ARB_vertex_program (GL/glext.h:678)
GL_COLOR_SUM_ARB = 33880 # GL/glext.h:679
GL_VERTEX_PROGRAM_ARB = 34336 # GL/glext.h:680
GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB = 34338 # GL/glext.h:681
GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB = 34339 # GL/glext.h:682
GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB = 34340 # GL/glext.h:683
GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB = 34341 # GL/glext.h:684
GL_CURRENT_VERTEX_ATTRIB_ARB = 34342 # GL/glext.h:685
GL_PROGRAM_LENGTH_ARB = 34343 # GL/glext.h:686
GL_PROGRAM_STRING_ARB = 34344 # GL/glext.h:687
GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB = 34350 # GL/glext.h:688
GL_MAX_PROGRAM_MATRICES_ARB = 34351 # GL/glext.h:689
GL_CURRENT_MATRIX_STACK_DEPTH_ARB = 34368 # GL/glext.h:690
GL_CURRENT_MATRIX_ARB = 34369 # GL/glext.h:691
GL_VERTEX_PROGRAM_POINT_SIZE_ARB = 34370 # GL/glext.h:692
GL_VERTEX_PROGRAM_TWO_SIDE_ARB = 34371 # GL/glext.h:693
GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB = 34373 # GL/glext.h:694
GL_PROGRAM_ERROR_POSITION_ARB = 34379 # GL/glext.h:695
GL_PROGRAM_BINDING_ARB = 34423 # GL/glext.h:696
GL_MAX_VERTEX_ATTRIBS_ARB = 34921 # GL/glext.h:697
GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB = 34922 # GL/glext.h:698
GL_PROGRAM_ERROR_STRING_ARB = 34932 # GL/glext.h:699
GL_PROGRAM_FORMAT_ASCII_ARB = 34933 # GL/glext.h:700
GL_PROGRAM_FORMAT_ARB = 34934 # GL/glext.h:701
GL_PROGRAM_INSTRUCTIONS_ARB = 34976 # GL/glext.h:702
GL_MAX_PROGRAM_INSTRUCTIONS_ARB = 34977 # GL/glext.h:703
GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB = 34978 # GL/glext.h:704
GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB = 34979 # GL/glext.h:705
GL_PROGRAM_TEMPORARIES_ARB = 34980 # GL/glext.h:706
GL_MAX_PROGRAM_TEMPORARIES_ARB = 34981 # GL/glext.h:707
GL_PROGRAM_NATIVE_TEMPORARIES_ARB = 34982 # GL/glext.h:708
GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB = 34983 # GL/glext.h:709
GL_PROGRAM_PARAMETERS_ARB = 34984 # GL/glext.h:710
GL_MAX_PROGRAM_PARAMETERS_ARB = 34985 # GL/glext.h:711
GL_PROGRAM_NATIVE_PARAMETERS_ARB = 34986 # GL/glext.h:712
GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB = 34987 # GL/glext.h:713
GL_PROGRAM_ATTRIBS_ARB = 34988 # GL/glext.h:714
GL_MAX_PROGRAM_ATTRIBS_ARB = 34989 # GL/glext.h:715
GL_PROGRAM_NATIVE_ATTRIBS_ARB = 34990 # GL/glext.h:716
GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB = 34991 # GL/glext.h:717
GL_PROGRAM_ADDRESS_REGISTERS_ARB = 34992 # GL/glext.h:718
GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB = 34993 # GL/glext.h:719
GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB = 34994 # GL/glext.h:720
GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB = 34995 # GL/glext.h:721
GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB = 34996 # GL/glext.h:722
GL_MAX_PROGRAM_ENV_PARAMETERS_ARB = 34997 # GL/glext.h:723
GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB = 34998 # GL/glext.h:724
GL_TRANSPOSE_CURRENT_MATRIX_ARB = 34999 # GL/glext.h:725
GL_MATRIX0_ARB = 35008 # GL/glext.h:726
GL_MATRIX1_ARB = 35009 # GL/glext.h:727
GL_MATRIX2_ARB = 35010 # GL/glext.h:728
GL_MATRIX3_ARB = 35011 # GL/glext.h:729
GL_MATRIX4_ARB = 35012 # GL/glext.h:730
GL_MATRIX5_ARB = 35013 # GL/glext.h:731
GL_MATRIX6_ARB = 35014 # GL/glext.h:732
GL_MATRIX7_ARB = 35015 # GL/glext.h:733
GL_MATRIX8_ARB = 35016 # GL/glext.h:734
GL_MATRIX9_ARB = 35017 # GL/glext.h:735
GL_MATRIX10_ARB = 35018 # GL/glext.h:736
GL_MATRIX11_ARB = 35019 # GL/glext.h:737
GL_MATRIX12_ARB = 35020 # GL/glext.h:738
GL_MATRIX13_ARB = 35021 # GL/glext.h:739
GL_MATRIX14_ARB = 35022 # GL/glext.h:740
GL_MATRIX15_ARB = 35023 # GL/glext.h:741
GL_MATRIX16_ARB = 35024 # GL/glext.h:742
GL_MATRIX17_ARB = 35025 # GL/glext.h:743
GL_MATRIX18_ARB = 35026 # GL/glext.h:744
GL_MATRIX19_ARB = 35027 # GL/glext.h:745
GL_MATRIX20_ARB = 35028 # GL/glext.h:746
GL_MATRIX21_ARB = 35029 # GL/glext.h:747
GL_MATRIX22_ARB = 35030 # GL/glext.h:748
GL_MATRIX23_ARB = 35031 # GL/glext.h:749
GL_MATRIX24_ARB = 35032 # GL/glext.h:750
GL_MATRIX25_ARB = 35033 # GL/glext.h:751
GL_MATRIX26_ARB = 35034 # GL/glext.h:752
GL_MATRIX27_ARB = 35035 # GL/glext.h:753
GL_MATRIX28_ARB = 35036 # GL/glext.h:754
GL_MATRIX29_ARB = 35037 # GL/glext.h:755
GL_MATRIX30_ARB = 35038 # GL/glext.h:756
GL_MATRIX31_ARB = 35039 # GL/glext.h:757
# ARB_fragment_program (GL/glext.h:760)
GL_FRAGMENT_PROGRAM_ARB = 34820 # GL/glext.h:761
GL_PROGRAM_ALU_INSTRUCTIONS_ARB = 34821 # GL/glext.h:762
GL_PROGRAM_TEX_INSTRUCTIONS_ARB = 34822 # GL/glext.h:763
GL_PROGRAM_TEX_INDIRECTIONS_ARB = 34823 # GL/glext.h:764
GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB = 34824 # GL/glext.h:765
GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB = 34825 # GL/glext.h:766
GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB = 34826 # GL/glext.h:767
GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB = 34827 # GL/glext.h:768
GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB = 34828 # GL/glext.h:769
GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB = 34829 # GL/glext.h:770
GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB = 34830 # GL/glext.h:771
GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB = 34831 # GL/glext.h:772
GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB = 34832 # GL/glext.h:773
GL_MAX_TEXTURE_COORDS_ARB = 34929 # GL/glext.h:774
GL_MAX_TEXTURE_IMAGE_UNITS_ARB = 34930 # GL/glext.h:775
# ARB_vertex_buffer_object (GL/glext.h:778)
GL_BUFFER_SIZE_ARB = 34660 # GL/glext.h:779
GL_BUFFER_USAGE_ARB = 34661 # GL/glext.h:780
GL_ARRAY_BUFFER_ARB = 34962 # GL/glext.h:781
GL_ELEMENT_ARRAY_BUFFER_ARB = 34963 # GL/glext.h:782
GL_ARRAY_BUFFER_BINDING_ARB = 34964 # GL/glext.h:783
GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB = 34965 # GL/glext.h:784
GL_VERTEX_ARRAY_BUFFER_BINDING_ARB = 34966 # GL/glext.h:785
GL_NORMAL_ARRAY_BUFFER_BINDING_ARB = 34967 # GL/glext.h:786
GL_COLOR_ARRAY_BUFFER_BINDING_ARB = 34968 # GL/glext.h:787
GL_INDEX_ARRAY_BUFFER_BINDING_ARB = 34969 # GL/glext.h:788
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB = 34970 # GL/glext.h:789
GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB = 34971 # GL/glext.h:790
GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB = 34972 # GL/glext.h:791
GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB = 34973 # GL/glext.h:792
GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB = 34974 # GL/glext.h:793
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB = 34975 # GL/glext.h:794
GL_READ_ONLY_ARB = 35000 # GL/glext.h:795
GL_WRITE_ONLY_ARB = 35001 # GL/glext.h:796
GL_READ_WRITE_ARB = 35002 # GL/glext.h:797
GL_BUFFER_ACCESS_ARB = 35003 # GL/glext.h:798
GL_BUFFER_MAPPED_ARB = 35004 # GL/glext.h:799
GL_BUFFER_MAP_POINTER_ARB = 35005 # GL/glext.h:800
GL_STREAM_DRAW_ARB = 35040 # GL/glext.h:801
GL_STREAM_READ_ARB = 35041 # GL/glext.h:802
GL_STREAM_COPY_ARB = 35042 # GL/glext.h:803
GL_STATIC_DRAW_ARB = 35044 # GL/glext.h:804
GL_STATIC_READ_ARB = 35045 # GL/glext.h:805
GL_STATIC_COPY_ARB = 35046 # GL/glext.h:806
GL_DYNAMIC_DRAW_ARB = 35048 # GL/glext.h:807
GL_DYNAMIC_READ_ARB = 35049 # GL/glext.h:808
GL_DYNAMIC_COPY_ARB = 35050 # GL/glext.h:809
# ARB_occlusion_query (GL/glext.h:812)
GL_QUERY_COUNTER_BITS_ARB = 34916 # GL/glext.h:813
GL_CURRENT_QUERY_ARB = 34917 # GL/glext.h:814
GL_QUERY_RESULT_ARB = 34918 # GL/glext.h:815
GL_QUERY_RESULT_AVAILABLE_ARB = 34919 # GL/glext.h:816
GL_SAMPLES_PASSED_ARB = 35092 # GL/glext.h:817
# ARB_shader_objects (GL/glext.h:820)
GL_PROGRAM_OBJECT_ARB = 35648 # GL/glext.h:821
GL_SHADER_OBJECT_ARB = 35656 # GL/glext.h:822
GL_OBJECT_TYPE_ARB = 35662 # GL/glext.h:823
GL_OBJECT_SUBTYPE_ARB = 35663 # GL/glext.h:824
GL_FLOAT_VEC2_ARB = 35664 # GL/glext.h:825
GL_FLOAT_VEC3_ARB = 35665 # GL/glext.h:826
GL_FLOAT_VEC4_ARB = 35666 # GL/glext.h:827
GL_INT_VEC2_ARB = 35667 # GL/glext.h:828
GL_INT_VEC3_ARB = 35668 # GL/glext.h:829
GL_INT_VEC4_ARB = 35669 # GL/glext.h:830
GL_BOOL_ARB = 35670 # GL/glext.h:831
GL_BOOL_VEC2_ARB = 35671 # GL/glext.h:832
GL_BOOL_VEC3_ARB = 35672 # GL/glext.h:833
GL_BOOL_VEC4_ARB = 35673 # GL/glext.h:834
GL_FLOAT_MAT2_ARB = 35674 # GL/glext.h:835
GL_FLOAT_MAT3_ARB = 35675 # GL/glext.h:836
GL_FLOAT_MAT4_ARB = 35676 # GL/glext.h:837
GL_SAMPLER_1D_ARB = 35677 # GL/glext.h:838
GL_SAMPLER_2D_ARB = 35678 # GL/glext.h:839
GL_SAMPLER_3D_ARB = 35679 # GL/glext.h:840
GL_SAMPLER_CUBE_ARB = 35680 # GL/glext.h:841
GL_SAMPLER_1D_SHADOW_ARB = 35681 # GL/glext.h:842
GL_SAMPLER_2D_SHADOW_ARB = 35682 # GL/glext.h:843
GL_SAMPLER_2D_RECT_ARB = 35683 # GL/glext.h:844
GL_SAMPLER_2D_RECT_SHADOW_ARB = 35684 # GL/glext.h:845
GL_OBJECT_DELETE_STATUS_ARB = 35712 # GL/glext.h:846
GL_OBJECT_COMPILE_STATUS_ARB = 35713 # GL/glext.h:847
GL_OBJECT_LINK_STATUS_ARB = 35714 # GL/glext.h:848
GL_OBJECT_VALIDATE_STATUS_ARB = 35715 # GL/glext.h:849
GL_OBJECT_INFO_LOG_LENGTH_ARB = 35716 # GL/glext.h:850
GL_OBJECT_ATTACHED_OBJECTS_ARB = 35717 # GL/glext.h:851
GL_OBJECT_ACTIVE_UNIFORMS_ARB = 35718 # GL/glext.h:852
GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB = 35719 # GL/glext.h:853
GL_OBJECT_SHADER_SOURCE_LENGTH_ARB = 35720 # GL/glext.h:854
# ARB_vertex_shader (GL/glext.h:857)
GL_VERTEX_SHADER_ARB = 35633 # GL/glext.h:858
GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB = 35658 # GL/glext.h:859
GL_MAX_VARYING_FLOATS_ARB = 35659 # GL/glext.h:860
GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB = 35660 # GL/glext.h:861
GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB = 35661 # GL/glext.h:862
GL_OBJECT_ACTIVE_ATTRIBUTES_ARB = 35721 # GL/glext.h:863
GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB = 35722 # GL/glext.h:864
# ARB_fragment_shader (GL/glext.h:867)
GL_FRAGMENT_SHADER_ARB = 35632 # GL/glext.h:868
GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB = 35657 # GL/glext.h:869
GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB = 35723 # GL/glext.h:870
# ARB_shading_language_100 (GL/glext.h:873)
GL_SHADING_LANGUAGE_VERSION_ARB = 35724 # GL/glext.h:874
# ARB_texture_non_power_of_two (GL/glext.h:877)
# ARB_point_sprite (GL/glext.h:880)
GL_POINT_SPRITE_ARB = 34913 # GL/glext.h:881
GL_COORD_REPLACE_ARB = 34914 # GL/glext.h:882
# ARB_fragment_program_shadow (GL/glext.h:885)
# ARB_draw_buffers (GL/glext.h:888)
GL_MAX_DRAW_BUFFERS_ARB = 34852 # GL/glext.h:889
GL_DRAW_BUFFER0_ARB = 34853 # GL/glext.h:890
GL_DRAW_BUFFER1_ARB = 34854 # GL/glext.h:891
GL_DRAW_BUFFER2_ARB = 34855 # GL/glext.h:892
GL_DRAW_BUFFER3_ARB = 34856 # GL/glext.h:893
GL_DRAW_BUFFER4_ARB = 34857 # GL/glext.h:894
GL_DRAW_BUFFER5_ARB = 34858 # GL/glext.h:895
GL_DRAW_BUFFER6_ARB = 34859 # GL/glext.h:896
GL_DRAW_BUFFER7_ARB = 34860 # GL/glext.h:897
GL_DRAW_BUFFER8_ARB = 34861 # GL/glext.h:898
GL_DRAW_BUFFER9_ARB = 34862 # GL/glext.h:899
GL_DRAW_BUFFER10_ARB = 34863 # GL/glext.h:900
GL_DRAW_BUFFER11_ARB = 34864 # GL/glext.h:901
GL_DRAW_BUFFER12_ARB = 34865 # GL/glext.h:902
GL_DRAW_BUFFER13_ARB = 34866 # GL/glext.h:903
GL_DRAW_BUFFER14_ARB = 34867 # GL/glext.h:904
GL_DRAW_BUFFER15_ARB = 34868 # GL/glext.h:905
# ARB_texture_rectangle (GL/glext.h:908)
GL_TEXTURE_RECTANGLE_ARB = 34037 # GL/glext.h:909
GL_TEXTURE_BINDING_RECTANGLE_ARB = 34038 # GL/glext.h:910
GL_PROXY_TEXTURE_RECTANGLE_ARB = 34039 # GL/glext.h:911
GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB = 34040 # GL/glext.h:912
# ARB_color_buffer_float (GL/glext.h:915)
GL_RGBA_FLOAT_MODE_ARB = 34848 # GL/glext.h:916
GL_CLAMP_VERTEX_COLOR_ARB = 35098 # GL/glext.h:917
GL_CLAMP_FRAGMENT_COLOR_ARB = 35099 # GL/glext.h:918
GL_CLAMP_READ_COLOR_ARB = 35100 # GL/glext.h:919
GL_FIXED_ONLY_ARB = 35101 # GL/glext.h:920
# ARB_half_float_pixel (GL/glext.h:923)
GL_HALF_FLOAT_ARB = 5131 # GL/glext.h:924
# ARB_texture_float (GL/glext.h:927)
GL_TEXTURE_RED_TYPE_ARB = 35856 # GL/glext.h:928
GL_TEXTURE_GREEN_TYPE_ARB = 35857 # GL/glext.h:929
GL_TEXTURE_BLUE_TYPE_ARB = 35858 # GL/glext.h:930
GL_TEXTURE_ALPHA_TYPE_ARB = 35859 # GL/glext.h:931
GL_TEXTURE_LUMINANCE_TYPE_ARB = 35860 # GL/glext.h:932
GL_TEXTURE_INTENSITY_TYPE_ARB = 35861 # GL/glext.h:933
GL_TEXTURE_DEPTH_TYPE_ARB = 35862 # GL/glext.h:934
GL_UNSIGNED_NORMALIZED_ARB = 35863 # GL/glext.h:935
GL_RGBA32F_ARB = 34836 # GL/glext.h:936
GL_RGB32F_ARB = 34837 # GL/glext.h:937
GL_ALPHA32F_ARB = 34838 # GL/glext.h:938
GL_INTENSITY32F_ARB = 34839 # GL/glext.h:939
GL_LUMINANCE32F_ARB = 34840 # GL/glext.h:940
GL_LUMINANCE_ALPHA32F_ARB = 34841 # GL/glext.h:941
GL_RGBA16F_ARB = 34842 # GL/glext.h:942
GL_RGB16F_ARB = 34843 # GL/glext.h:943
GL_ALPHA16F_ARB = 34844 # GL/glext.h:944
GL_INTENSITY16F_ARB = 34845 # GL/glext.h:945
GL_LUMINANCE16F_ARB = 34846 # GL/glext.h:946
GL_LUMINANCE_ALPHA16F_ARB = 34847 # GL/glext.h:947
# ARB_pixel_buffer_object (GL/glext.h:950)
GL_PIXEL_PACK_BUFFER_ARB = 35051 # GL/glext.h:951
GL_PIXEL_UNPACK_BUFFER_ARB = 35052 # GL/glext.h:952
GL_PIXEL_PACK_BUFFER_BINDING_ARB = 35053 # GL/glext.h:953
GL_PIXEL_UNPACK_BUFFER_BINDING_ARB = 35055 # GL/glext.h:954
# EXT_abgr (GL/glext.h:957)
GL_ABGR_EXT = 32768 # GL/glext.h:958
# EXT_blend_color (GL/glext.h:961)
GL_CONSTANT_COLOR_EXT = 32769 # GL/glext.h:962
GL_ONE_MINUS_CONSTANT_COLOR_EXT = 32770 # GL/glext.h:963
GL_CONSTANT_ALPHA_EXT = 32771 # GL/glext.h:964
GL_ONE_MINUS_CONSTANT_ALPHA_EXT = 32772 # GL/glext.h:965
GL_BLEND_COLOR_EXT = 32773 # GL/glext.h:966
# EXT_polygon_offset (GL/glext.h:969)
GL_POLYGON_OFFSET_EXT = 32823 # GL/glext.h:970
GL_POLYGON_OFFSET_FACTOR_EXT = 32824 # GL/glext.h:971
GL_POLYGON_OFFSET_BIAS_EXT = 32825 # GL/glext.h:972
# EXT_texture (GL/glext.h:975)
GL_ALPHA4_EXT = 32827 # GL/glext.h:976
GL_ALPHA8_EXT = 32828 # GL/glext.h:977
GL_ALPHA12_EXT = 32829 # GL/glext.h:978
GL_ALPHA16_EXT = 32830 # GL/glext.h:979
GL_LUMINANCE4_EXT = 32831 # GL/glext.h:980
GL_LUMINANCE8_EXT = 32832 # GL/glext.h:981
GL_LUMINANCE12_EXT = 32833 # GL/glext.h:982
GL_LUMINANCE16_EXT = 32834 # GL/glext.h:983
GL_LUMINANCE4_ALPHA4_EXT = 32835 # GL/glext.h:984
GL_LUMINANCE6_ALPHA2_EXT = 32836 # GL/glext.h:985
GL_LUMINANCE8_ALPHA8_EXT = 32837 # GL/glext.h:986
GL_LUMINANCE12_ALPHA4_EXT = 32838 # GL/glext.h:987
GL_LUMINANCE12_ALPHA12_EXT = 32839 # GL/glext.h:988
GL_LUMINANCE16_ALPHA16_EXT = 32840 # GL/glext.h:989
GL_INTENSITY_EXT = 32841 # GL/glext.h:990
GL_INTENSITY4_EXT = 32842 # GL/glext.h:991
GL_INTENSITY8_EXT = 32843 # GL/glext.h:992
GL_INTENSITY12_EXT = 32844 # GL/glext.h:993
GL_INTENSITY16_EXT = 32845 # GL/glext.h:994
GL_RGB2_EXT = 32846 # GL/glext.h:995
GL_RGB4_EXT = 32847 # GL/glext.h:996
GL_RGB5_EXT = 32848 # GL/glext.h:997
GL_RGB8_EXT = 32849 # GL/glext.h:998
GL_RGB10_EXT = 32850 # GL/glext.h:999
GL_RGB12_EXT = 32851 # GL/glext.h:1000
GL_RGB16_EXT = 32852 # GL/glext.h:1001
GL_RGBA2_EXT = 32853 # GL/glext.h:1002
GL_RGBA4_EXT = 32854 # GL/glext.h:1003
GL_RGB5_A1_EXT = 32855 # GL/glext.h:1004
GL_RGBA8_EXT = 32856 # GL/glext.h:1005
GL_RGB10_A2_EXT = 32857 # GL/glext.h:1006
GL_RGBA12_EXT = 32858 # GL/glext.h:1007
GL_RGBA16_EXT = 32859 # GL/glext.h:1008
GL_TEXTURE_RED_SIZE_EXT = 32860 # GL/glext.h:1009
GL_TEXTURE_GREEN_SIZE_EXT = 32861 # GL/glext.h:1010
GL_TEXTURE_BLUE_SIZE_EXT = 32862 # GL/glext.h:1011
GL_TEXTURE_ALPHA_SIZE_EXT = 32863 # GL/glext.h:1012
GL_TEXTURE_LUMINANCE_SIZE_EXT = 32864 # GL/glext.h:1013
GL_TEXTURE_INTENSITY_SIZE_EXT = 32865 # GL/glext.h:1014
GL_REPLACE_EXT = 32866 # GL/glext.h:1015
GL_PROXY_TEXTURE_1D_EXT = 32867 # GL/glext.h:1016
GL_PROXY_TEXTURE_2D_EXT = 32868 # GL/glext.h:1017
GL_TEXTURE_TOO_LARGE_EXT = 32869 # GL/glext.h:1018
# EXT_texture3D (GL/glext.h:1021)
GL_PACK_SKIP_IMAGES_EXT = 32875 # GL/glext.h:1022
GL_PACK_IMAGE_HEIGHT_EXT = 32876 # GL/glext.h:1023
GL_UNPACK_SKIP_IMAGES_EXT = 32877 # GL/glext.h:1024
GL_UNPACK_IMAGE_HEIGHT_EXT = 32878 # GL/glext.h:1025
GL_TEXTURE_3D_EXT = 32879 # GL/glext.h:1026
GL_PROXY_TEXTURE_3D_EXT = 32880 # GL/glext.h:1027
GL_TEXTURE_DEPTH_EXT = 32881 # GL/glext.h:1028
GL_TEXTURE_WRAP_R_EXT = 32882 # GL/glext.h:1029
GL_MAX_3D_TEXTURE_SIZE_EXT = 32883 # GL/glext.h:1030
# SGIS_texture_filter4 (GL/glext.h:1033)
GL_FILTER4_SGIS = 33094 # GL/glext.h:1034
GL_TEXTURE_FILTER4_SIZE_SGIS = 33095 # GL/glext.h:1035
# EXT_subtexture (GL/glext.h:1038)
# EXT_copy_texture (GL/glext.h:1041)
# EXT_histogram (GL/glext.h:1044)
GL_HISTOGRAM_EXT = 32804 # GL/glext.h:1045
GL_PROXY_HISTOGRAM_EXT = 32805 # GL/glext.h:1046
GL_HISTOGRAM_WIDTH_EXT = 32806 # GL/glext.h:1047
GL_HISTOGRAM_FORMAT_EXT = 32807 # GL/glext.h:1048
GL_HISTOGRAM_RED_SIZE_EXT = 32808 # GL/glext.h:1049
GL_HISTOGRAM_GREEN_SIZE_EXT = 32809 # GL/glext.h:1050
GL_HISTOGRAM_BLUE_SIZE_EXT = 32810 # GL/glext.h:1051
GL_HISTOGRAM_ALPHA_SIZE_EXT = 32811 # GL/glext.h:1052
GL_HISTOGRAM_LUMINANCE_SIZE_EXT = 32812 # GL/glext.h:1053
GL_HISTOGRAM_SINK_EXT = 32813 # GL/glext.h:1054
GL_MINMAX_EXT = 32814 # GL/glext.h:1055
GL_MINMAX_FORMAT_EXT = 32815 # GL/glext.h:1056
GL_MINMAX_SINK_EXT = 32816 # GL/glext.h:1057
GL_TABLE_TOO_LARGE_EXT = 32817 # GL/glext.h:1058
# EXT_convolution (GL/glext.h:1061)
GL_CONVOLUTION_1D_EXT = 32784 # GL/glext.h:1062
GL_CONVOLUTION_2D_EXT = 32785 # GL/glext.h:1063
GL_SEPARABLE_2D_EXT = 32786 # GL/glext.h:1064
GL_CONVOLUTION_BORDER_MODE_EXT = 32787 # GL/glext.h:1065
GL_CONVOLUTION_FILTER_SCALE_EXT = 32788 # GL/glext.h:1066
GL_CONVOLUTION_FILTER_BIAS_EXT = 32789 # GL/glext.h:1067
GL_REDUCE_EXT = 32790 # GL/glext.h:1068
GL_CONVOLUTION_FORMAT_EXT = 32791 # GL/glext.h:1069
GL_CONVOLUTION_WIDTH_EXT = 32792 # GL/glext.h:1070
GL_CONVOLUTION_HEIGHT_EXT = 32793 # GL/glext.h:1071
GL_MAX_CONVOLUTION_WIDTH_EXT = 32794 # GL/glext.h:1072
GL_MAX_CONVOLUTION_HEIGHT_EXT = 32795 # GL/glext.h:1073
GL_POST_CONVOLUTION_RED_SCALE_EXT = 32796 # GL/glext.h:1074
GL_POST_CONVOLUTION_GREEN_SCALE_EXT = 32797 # GL/glext.h:1075
GL_POST_CONVOLUTION_BLUE_SCALE_EXT = 32798 # GL/glext.h:1076
GL_POST_CONVOLUTION_ALPHA_SCALE_EXT = 32799 # GL/glext.h:1077
GL_POST_CONVOLUTION_RED_BIAS_EXT = 32800 # GL/glext.h:1078
GL_POST_CONVOLUTION_GREEN_BIAS_EXT = 32801 # GL/glext.h:1079
GL_POST_CONVOLUTION_BLUE_BIAS_EXT = 32802 # GL/glext.h:1080
GL_POST_CONVOLUTION_ALPHA_BIAS_EXT = 32803 # GL/glext.h:1081
# SGI_color_matrix (GL/glext.h:1084)
GL_COLOR_MATRIX_SGI = 32945 # GL/glext.h:1085
GL_COLOR_MATRIX_STACK_DEPTH_SGI = 32946 # GL/glext.h:1086
GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI = 32947 # GL/glext.h:1087
GL_POST_COLOR_MATRIX_RED_SCALE_SGI = 32948 # GL/glext.h:1088
GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI = 32949 # GL/glext.h:1089
GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI = 32950 # GL/glext.h:1090
GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI = 32951 # GL/glext.h:1091
GL_POST_COLOR_MATRIX_RED_BIAS_SGI = 32952 # GL/glext.h:1092
GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI = 32953 # GL/glext.h:1093
GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI = 32954 # GL/glext.h:1094
GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI = 32955 # GL/glext.h:1095
# SGI_color_table (GL/glext.h:1098)
GL_COLOR_TABLE_SGI = 32976 # GL/glext.h:1099
GL_POST_CONVOLUTION_COLOR_TABLE_SGI = 32977 # GL/glext.h:1100
GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI = 32978 # GL/glext.h:1101
GL_PROXY_COLOR_TABLE_SGI = 32979 # GL/glext.h:1102
GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI = 32980 # GL/glext.h:1103
GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI = 32981 # GL/glext.h:1104
GL_COLOR_TABLE_SCALE_SGI = 32982 # GL/glext.h:1105
GL_COLOR_TABLE_BIAS_SGI = 32983 # GL/glext.h:1106
GL_COLOR_TABLE_FORMAT_SGI = 32984 # GL/glext.h:1107
GL_COLOR_TABLE_WIDTH_SGI = 32985 # GL/glext.h:1108
GL_COLOR_TABLE_RED_SIZE_SGI = 32986 # GL/glext.h:1109
GL_COLOR_TABLE_GREEN_SIZE_SGI = 32987 # GL/glext.h:1110
GL_COLOR_TABLE_BLUE_SIZE_SGI = 32988 # GL/glext.h:1111
GL_COLOR_TABLE_ALPHA_SIZE_SGI = 32989 # GL/glext.h:1112
GL_COLOR_TABLE_LUMINANCE_SIZE_SGI = 32990 # GL/glext.h:1113
GL_COLOR_TABLE_INTENSITY_SIZE_SGI = 32991 # GL/glext.h:1114
# SGIS_pixel_texture (GL/glext.h:1117)
GL_PIXEL_TEXTURE_SGIS = 33619 # GL/glext.h:1118
GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS = 33620 # GL/glext.h:1119
GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS = 33621 # GL/glext.h:1120
GL_PIXEL_GROUP_COLOR_SGIS = 33622 # GL/glext.h:1121
# SGIX_pixel_texture (GL/glext.h:1124)
GL_PIXEL_TEX_GEN_SGIX = 33081 # GL/glext.h:1125
GL_PIXEL_TEX_GEN_MODE_SGIX = 33579 # GL/glext.h:1126
# SGIS_texture4D (GL/glext.h:1129)
GL_PACK_SKIP_VOLUMES_SGIS = 33072 # GL/glext.h:1130
GL_PACK_IMAGE_DEPTH_SGIS = 33073 # GL/glext.h:1131
GL_UNPACK_SKIP_VOLUMES_SGIS = 33074 # GL/glext.h:1132
GL_UNPACK_IMAGE_DEPTH_SGIS = 33075 # GL/glext.h:1133
GL_TEXTURE_4D_SGIS = 33076 # GL/glext.h:1134
GL_PROXY_TEXTURE_4D_SGIS = 33077 # GL/glext.h:1135
GL_TEXTURE_4DSIZE_SGIS = 33078 # GL/glext.h:1136
GL_TEXTURE_WRAP_Q_SGIS = 33079 # GL/glext.h:1137
GL_MAX_4D_TEXTURE_SIZE_SGIS = 33080 # GL/glext.h:1138
GL_TEXTURE_4D_BINDING_SGIS = 33103 # GL/glext.h:1139
# SGI_texture_color_table (GL/glext.h:1142)
GL_TEXTURE_COLOR_TABLE_SGI = 32956 # GL/glext.h:1143
GL_PROXY_TEXTURE_COLOR_TABLE_SGI = 32957 # GL/glext.h:1144
# EXT_cmyka (GL/glext.h:1147)
GL_CMYK_EXT = 32780 # GL/glext.h:1148
GL_CMYKA_EXT = 32781 # GL/glext.h:1149
GL_PACK_CMYK_HINT_EXT = 32782 # GL/glext.h:1150
GL_UNPACK_CMYK_HINT_EXT = 32783 # GL/glext.h:1151
# EXT_texture_object (GL/glext.h:1154)
GL_TEXTURE_PRIORITY_EXT = 32870 # GL/glext.h:1155
GL_TEXTURE_RESIDENT_EXT = 32871 # GL/glext.h:1156
GL_TEXTURE_1D_BINDING_EXT = 32872 # GL/glext.h:1157
GL_TEXTURE_2D_BINDING_EXT = 32873 # GL/glext.h:1158
GL_TEXTURE_3D_BINDING_EXT = 32874 # GL/glext.h:1159
# SGIS_detail_texture (GL/glext.h:1162)
GL_DETAIL_TEXTURE_2D_SGIS = 32917 # GL/glext.h:1163
GL_DETAIL_TEXTURE_2D_BINDING_SGIS = 32918 # GL/glext.h:1164
GL_LINEAR_DETAIL_SGIS = 32919 # GL/glext.h:1165
GL_LINEAR_DETAIL_ALPHA_SGIS = 32920 # GL/glext.h:1166
GL_LINEAR_DETAIL_COLOR_SGIS = 32921 # GL/glext.h:1167
GL_DETAIL_TEXTURE_LEVEL_SGIS = 32922 # GL/glext.h:1168
GL_DETAIL_TEXTURE_MODE_SGIS = 32923 # GL/glext.h:1169
GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS = 32924 # GL/glext.h:1170
# SGIS_sharpen_texture (GL/glext.h:1173)
GL_LINEAR_SHARPEN_SGIS = 32941 # GL/glext.h:1174
GL_LINEAR_SHARPEN_ALPHA_SGIS = 32942 # GL/glext.h:1175
GL_LINEAR_SHARPEN_COLOR_SGIS = 32943 # GL/glext.h:1176
GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS = 32944 # GL/glext.h:1177
# EXT_packed_pixels (GL/glext.h:1180)
GL_UNSIGNED_BYTE_3_3_2_EXT = 32818 # GL/glext.h:1181
GL_UNSIGNED_SHORT_4_4_4_4_EXT = 32819 # GL/glext.h:1182
GL_UNSIGNED_SHORT_5_5_5_1_EXT = 32820 # GL/glext.h:1183
GL_UNSIGNED_INT_8_8_8_8_EXT = 32821 # GL/glext.h:1184
GL_UNSIGNED_INT_10_10_10_2_EXT = 32822 # GL/glext.h:1185
# SGIS_texture_lod (GL/glext.h:1188)
GL_TEXTURE_MIN_LOD_SGIS = 33082 # GL/glext.h:1189
GL_TEXTURE_MAX_LOD_SGIS = 33083 # GL/glext.h:1190
GL_TEXTURE_BASE_LEVEL_SGIS = 33084 # GL/glext.h:1191
GL_TEXTURE_MAX_LEVEL_SGIS = 33085 # GL/glext.h:1192
# SGIS_multisample (GL/glext.h:1195)
GL_MULTISAMPLE_SGIS = 32925 # GL/glext.h:1196
GL_SAMPLE_ALPHA_TO_MASK_SGIS = 32926 # GL/glext.h:1197
GL_SAMPLE_ALPHA_TO_ONE_SGIS = 32927 # GL/glext.h:1198
GL_SAMPLE_MASK_SGIS = 32928 # GL/glext.h:1199
GL_1PASS_SGIS = 32929 # GL/glext.h:1200
GL_2PASS_0_SGIS = 32930 # GL/glext.h:1201
GL_2PASS_1_SGIS = 32931 # GL/glext.h:1202
GL_4PASS_0_SGIS = 32932 # GL/glext.h:1203
GL_4PASS_1_SGIS = 32933 # GL/glext.h:1204
GL_4PASS_2_SGIS = 32934 # GL/glext.h:1205
GL_4PASS_3_SGIS = 32935 # GL/glext.h:1206
GL_SAMPLE_BUFFERS_SGIS = 32936 # GL/glext.h:1207
GL_SAMPLES_SGIS = 32937 # GL/glext.h:1208
GL_SAMPLE_MASK_VALUE_SGIS = 32938 # GL/glext.h:1209
GL_SAMPLE_MASK_INVERT_SGIS = 32939 # GL/glext.h:1210
GL_SAMPLE_PATTERN_SGIS = 32940 # GL/glext.h:1211
# EXT_rescale_normal (GL/glext.h:1214)
GL_RESCALE_NORMAL_EXT = 32826 # GL/glext.h:1215
# EXT_vertex_array (GL/glext.h:1218)
GL_VERTEX_ARRAY_EXT = 32884 # GL/glext.h:1219
GL_NORMAL_ARRAY_EXT = 32885 # GL/glext.h:1220
GL_COLOR_ARRAY_EXT = 32886 # GL/glext.h:1221
GL_INDEX_ARRAY_EXT = 32887 # GL/glext.h:1222
GL_TEXTURE_COORD_ARRAY_EXT = 32888 # GL/glext.h:1223
GL_EDGE_FLAG_ARRAY_EXT = 32889 # GL/glext.h:1224
GL_VERTEX_ARRAY_SIZE_EXT = 32890 # GL/glext.h:1225
GL_VERTEX_ARRAY_TYPE_EXT = 32891 # GL/glext.h:1226
GL_VERTEX_ARRAY_STRIDE_EXT = 32892 # GL/glext.h:1227
GL_VERTEX_ARRAY_COUNT_EXT = 32893 # GL/glext.h:1228
GL_NORMAL_ARRAY_TYPE_EXT = 32894 # GL/glext.h:1229
GL_NORMAL_ARRAY_STRIDE_EXT = 32895 # GL/glext.h:1230
GL_NORMAL_ARRAY_COUNT_EXT = 32896 # GL/glext.h:1231
GL_COLOR_ARRAY_SIZE_EXT = 32897 # GL/glext.h:1232
GL_COLOR_ARRAY_TYPE_EXT = 32898 # GL/glext.h:1233
GL_COLOR_ARRAY_STRIDE_EXT = 32899 # GL/glext.h:1234
GL_COLOR_ARRAY_COUNT_EXT = 32900 # GL/glext.h:1235
GL_INDEX_ARRAY_TYPE_EXT = 32901 # GL/glext.h:1236
GL_INDEX_ARRAY_STRIDE_EXT = 32902 # GL/glext.h:1237
GL_INDEX_ARRAY_COUNT_EXT = 32903 # GL/glext.h:1238
GL_TEXTURE_COORD_ARRAY_SIZE_EXT = 32904 # GL/glext.h:1239
GL_TEXTURE_COORD_ARRAY_TYPE_EXT = 32905 # GL/glext.h:1240
GL_TEXTURE_COORD_ARRAY_STRIDE_EXT = 32906 # GL/glext.h:1241
GL_TEXTURE_COORD_ARRAY_COUNT_EXT = 32907 # GL/glext.h:1242
GL_EDGE_FLAG_ARRAY_STRIDE_EXT = 32908 # GL/glext.h:1243
GL_EDGE_FLAG_ARRAY_COUNT_EXT = 32909 # GL/glext.h:1244
GL_VERTEX_ARRAY_POINTER_EXT = 32910 # GL/glext.h:1245
GL_NORMAL_ARRAY_POINTER_EXT = 32911 # GL/glext.h:1246
GL_COLOR_ARRAY_POINTER_EXT = 32912 # GL/glext.h:1247
GL_INDEX_ARRAY_POINTER_EXT = 32913 # GL/glext.h:1248
GL_TEXTURE_COORD_ARRAY_POINTER_EXT = 32914 # GL/glext.h:1249
GL_EDGE_FLAG_ARRAY_POINTER_EXT = 32915 # GL/glext.h:1250
# EXT_misc_attribute (GL/glext.h:1253)
# SGIS_generate_mipmap (GL/glext.h:1256)
GL_GENERATE_MIPMAP_SGIS = 33169 # GL/glext.h:1257
GL_GENERATE_MIPMAP_HINT_SGIS = 33170 # GL/glext.h:1258
# SGIX_clipmap (GL/glext.h:1261)
GL_LINEAR_CLIPMAP_LINEAR_SGIX = 33136 # GL/glext.h:1262
GL_TEXTURE_CLIPMAP_CENTER_SGIX = 33137 # GL/glext.h:1263
GL_TEXTURE_CLIPMAP_FRAME_SGIX = 33138 # GL/glext.h:1264
GL_TEXTURE_CLIPMAP_OFFSET_SGIX = 33139 # GL/glext.h:1265
GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX = 33140 # GL/glext.h:1266
GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX = 33141 # GL/glext.h:1267
GL_TEXTURE_CLIPMAP_DEPTH_SGIX = 33142 # GL/glext.h:1268
GL_MAX_CLIPMAP_DEPTH_SGIX = 33143 # GL/glext.h:1269
GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX = 33144 # GL/glext.h:1270
GL_NEAREST_CLIPMAP_NEAREST_SGIX = 33869 # GL/glext.h:1271
GL_NEAREST_CLIPMAP_LINEAR_SGIX = 33870 # GL/glext.h:1272
GL_LINEAR_CLIPMAP_NEAREST_SGIX = 33871 # GL/glext.h:1273
# SGIX_shadow (GL/glext.h:1276)
GL_TEXTURE_COMPARE_SGIX = 33178 # GL/glext.h:1277
GL_TEXTURE_COMPARE_OPERATOR_SGIX = 33179 # GL/glext.h:1278
GL_TEXTURE_LEQUAL_R_SGIX = 33180 # GL/glext.h:1279
GL_TEXTURE_GEQUAL_R_SGIX = 33181 # GL/glext.h:1280
# SGIS_texture_edge_clamp (GL/glext.h:1283)
GL_CLAMP_TO_EDGE_SGIS = 33071 # GL/glext.h:1284
# SGIS_texture_border_clamp (GL/glext.h:1287)
GL_CLAMP_TO_BORDER_SGIS = 33069 # GL/glext.h:1288
# EXT_blend_minmax (GL/glext.h:1291)
GL_FUNC_ADD_EXT = 32774 # GL/glext.h:1292
GL_MIN_EXT = 32775 # GL/glext.h:1293
GL_MAX_EXT = 32776 # GL/glext.h:1294
GL_BLEND_EQUATION_EXT = 32777 # GL/glext.h:1295
# EXT_blend_subtract (GL/glext.h:1298)
GL_FUNC_SUBTRACT_EXT = 32778 # GL/glext.h:1299
GL_FUNC_REVERSE_SUBTRACT_EXT = 32779 # GL/glext.h:1300
# EXT_blend_logic_op (GL/glext.h:1303)
# SGIX_interlace (GL/glext.h:1306)
GL_INTERLACE_SGIX = 32916 # GL/glext.h:1307
# SGIX_pixel_tiles (GL/glext.h:1310)
GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX = 33086 # GL/glext.h:1311
GL_PIXEL_TILE_CACHE_INCREMENT_SGIX = 33087 # GL/glext.h:1312
GL_PIXEL_TILE_WIDTH_SGIX = 33088 # GL/glext.h:1313
GL_PIXEL_TILE_HEIGHT_SGIX = 33089 # GL/glext.h:1314
GL_PIXEL_TILE_GRID_WIDTH_SGIX = 33090 # GL/glext.h:1315
GL_PIXEL_TILE_GRID_HEIGHT_SGIX = 33091 # GL/glext.h:1316
GL_PIXEL_TILE_GRID_DEPTH_SGIX = 33092 # GL/glext.h:1317
GL_PIXEL_TILE_CACHE_SIZE_SGIX = 33093 # GL/glext.h:1318
# SGIS_texture_select (GL/glext.h:1321)
GL_DUAL_ALPHA4_SGIS = 33040 # GL/glext.h:1322
GL_DUAL_ALPHA8_SGIS = 33041 # GL/glext.h:1323
GL_DUAL_ALPHA12_SGIS = 33042 # GL/glext.h:1324
GL_DUAL_ALPHA16_SGIS = 33043 # GL/glext.h:1325
GL_DUAL_LUMINANCE4_SGIS = 33044 # GL/glext.h:1326
GL_DUAL_LUMINANCE8_SGIS = 33045 # GL/glext.h:1327
GL_DUAL_LUMINANCE12_SGIS = 33046 # GL/glext.h:1328
GL_DUAL_LUMINANCE16_SGIS = 33047 # GL/glext.h:1329
GL_DUAL_INTENSITY4_SGIS = 33048 # GL/glext.h:1330
GL_DUAL_INTENSITY8_SGIS = 33049 # GL/glext.h:1331
GL_DUAL_INTENSITY12_SGIS = 33050 # GL/glext.h:1332
GL_DUAL_INTENSITY16_SGIS = 33051 # GL/glext.h:1333
GL_DUAL_LUMINANCE_ALPHA4_SGIS = 33052 # GL/glext.h:1334
GL_DUAL_LUMINANCE_ALPHA8_SGIS = 33053 # GL/glext.h:1335
GL_QUAD_ALPHA4_SGIS = 33054 # GL/glext.h:1336
GL_QUAD_ALPHA8_SGIS = 33055 # GL/glext.h:1337
GL_QUAD_LUMINANCE4_SGIS = 33056 # GL/glext.h:1338
GL_QUAD_LUMINANCE8_SGIS = 33057 # GL/glext.h:1339
GL_QUAD_INTENSITY4_SGIS = 33058 # GL/glext.h:1340
GL_QUAD_INTENSITY8_SGIS = 33059 # GL/glext.h:1341
GL_DUAL_TEXTURE_SELECT_SGIS = 33060 # GL/glext.h:1342
GL_QUAD_TEXTURE_SELECT_SGIS = 33061 # GL/glext.h:1343
# SGIX_sprite (GL/glext.h:1346)
GL_SPRITE_SGIX = 33096 # GL/glext.h:1347
GL_SPRITE_MODE_SGIX = 33097 # GL/glext.h:1348
GL_SPRITE_AXIS_SGIX = 33098 # GL/glext.h:1349
GL_SPRITE_TRANSLATION_SGIX = 33099 # GL/glext.h:1350
GL_SPRITE_AXIAL_SGIX = 33100 # GL/glext.h:1351
GL_SPRITE_OBJECT_ALIGNED_SGIX = 33101 # GL/glext.h:1352
GL_SPRITE_EYE_ALIGNED_SGIX = 33102 # GL/glext.h:1353
# SGIX_texture_multi_buffer (GL/glext.h:1356)
GL_TEXTURE_MULTI_BUFFER_HINT_SGIX = 33070 # GL/glext.h:1357
# EXT_point_parameters (GL/glext.h:1360)
GL_POINT_SIZE_MIN_EXT = 33062 # GL/glext.h:1361
GL_POINT_SIZE_MAX_EXT = 33063 # GL/glext.h:1362
GL_POINT_FADE_THRESHOLD_SIZE_EXT = 33064 # GL/glext.h:1363
GL_DISTANCE_ATTENUATION_EXT = 33065 # GL/glext.h:1364
# SGIS_point_parameters (GL/glext.h:1367)
GL_POINT_SIZE_MIN_SGIS = 33062 # GL/glext.h:1368
GL_POINT_SIZE_MAX_SGIS = 33063 # GL/glext.h:1369
GL_POINT_FADE_THRESHOLD_SIZE_SGIS = 33064 # GL/glext.h:1370
GL_DISTANCE_ATTENUATION_SGIS = 33065 # GL/glext.h:1371
# SGIX_instruments (GL/glext.h:1374)
GL_INSTRUMENT_BUFFER_POINTER_SGIX = 33152 # GL/glext.h:1375
GL_INSTRUMENT_MEASUREMENTS_SGIX = 33153 # GL/glext.h:1376
# SGIX_texture_scale_bias (GL/glext.h:1379)
GL_POST_TEXTURE_FILTER_BIAS_SGIX = 33145 # GL/glext.h:1380
GL_POST_TEXTURE_FILTER_SCALE_SGIX = 33146 # GL/glext.h:1381
GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX = 33147 # GL/glext.h:1382
GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX = 33148 # GL/glext.h:1383
# SGIX_framezoom (GL/glext.h:1386)
GL_FRAMEZOOM_SGIX = 33163 # GL/glext.h:1387
GL_FRAMEZOOM_FACTOR_SGIX = 33164 # GL/glext.h:1388
GL_MAX_FRAMEZOOM_FACTOR_SGIX = 33165 # GL/glext.h:1389
# SGIX_tag_sample_buffer (GL/glext.h:1392)
# FfdMaskSGIX (GL/glext.h:1395)
GL_TEXTURE_DEFORMATION_BIT_SGIX = 1 # GL/glext.h:1396
GL_GEOMETRY_DEFORMATION_BIT_SGIX = 2 # GL/glext.h:1397
# SGIX_polynomial_ffd (GL/glext.h:1400)
GL_GEOMETRY_DEFORMATION_SGIX = 33172 # GL/glext.h:1401
GL_TEXTURE_DEFORMATION_SGIX = 33173 # GL/glext.h:1402
GL_DEFORMATIONS_MASK_SGIX = 33174 # GL/glext.h:1403
GL_MAX_DEFORMATION_ORDER_SGIX = 33175 # GL/glext.h:1404
# SGIX_reference_plane (GL/glext.h:1407)
GL_REFERENCE_PLANE_SGIX = 33149 # GL/glext.h:1408
GL_REFERENCE_PLANE_EQUATION_SGIX = 33150 # GL/glext.h:1409
# SGIX_flush_raster (GL/glext.h:1412)
# SGIX_depth_texture (GL/glext.h:1415)
GL_DEPTH_COMPONENT16_SGIX = 33189 # GL/glext.h:1416
GL_DEPTH_COMPONENT24_SGIX = 33190 # GL/glext.h:1417
GL_DEPTH_COMPONENT32_SGIX = 33191 # GL/glext.h:1418
# SGIS_fog_function (GL/glext.h:1421)
GL_FOG_FUNC_SGIS = 33066 # GL/glext.h:1422
GL_FOG_FUNC_POINTS_SGIS = 33067 # GL/glext.h:1423
GL_MAX_FOG_FUNC_POINTS_SGIS = 33068 # GL/glext.h:1424
# SGIX_fog_offset (GL/glext.h:1427)
GL_FOG_OFFSET_SGIX = 33176 # GL/glext.h:1428
GL_FOG_OFFSET_VALUE_SGIX = 33177 # GL/glext.h:1429
# HP_image_transform (GL/glext.h:1432)
GL_IMAGE_SCALE_X_HP = 33109 # GL/glext.h:1433
GL_IMAGE_SCALE_Y_HP = 33110 # GL/glext.h:1434
GL_IMAGE_TRANSLATE_X_HP = 33111 # GL/glext.h:1435
GL_IMAGE_TRANSLATE_Y_HP = 33112 # GL/glext.h:1436
GL_IMAGE_ROTATE_ANGLE_HP = 33113 # GL/glext.h:1437
GL_IMAGE_ROTATE_ORIGIN_X_HP = 33114 # GL/glext.h:1438
GL_IMAGE_ROTATE_ORIGIN_Y_HP = 33115 # GL/glext.h:1439
GL_IMAGE_MAG_FILTER_HP = 33116 # GL/glext.h:1440
GL_IMAGE_MIN_FILTER_HP = 33117 # GL/glext.h:1441
GL_IMAGE_CUBIC_WEIGHT_HP = 33118 # GL/glext.h:1442
GL_CUBIC_HP = 33119 # GL/glext.h:1443
GL_AVERAGE_HP = 33120 # GL/glext.h:1444
GL_IMAGE_TRANSFORM_2D_HP = 33121 # GL/glext.h:1445
GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP = 33122 # GL/glext.h:1446
GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP = 33123 # GL/glext.h:1447
# HP_convolution_border_modes (GL/glext.h:1450)
GL_IGNORE_BORDER_HP = 33104 # GL/glext.h:1451
GL_CONSTANT_BORDER_HP = 33105 # GL/glext.h:1452
GL_REPLICATE_BORDER_HP = 33107 # GL/glext.h:1453
GL_CONVOLUTION_BORDER_COLOR_HP = 33108 # GL/glext.h:1454
# INGR_palette_buffer (GL/glext.h:1457)
# SGIX_texture_add_env (GL/glext.h:1460)
GL_TEXTURE_ENV_BIAS_SGIX = 32958 # GL/glext.h:1461
# EXT_color_subtable (GL/glext.h:1464)
# PGI_vertex_hints (GL/glext.h:1467)
GL_VERTEX_DATA_HINT_PGI = 107050 # GL/glext.h:1468
GL_VERTEX_CONSISTENT_HINT_PGI = 107051 # GL/glext.h:1469
GL_MATERIAL_SIDE_HINT_PGI = 107052 # GL/glext.h:1470
GL_MAX_VERTEX_HINT_PGI = 107053 # GL/glext.h:1471
GL_COLOR3_BIT_PGI = 65536 # GL/glext.h:1472
GL_COLOR4_BIT_PGI = 131072 # GL/glext.h:1473
GL_EDGEFLAG_BIT_PGI = 262144 # GL/glext.h:1474
GL_INDEX_BIT_PGI = 524288 # GL/glext.h:1475
GL_MAT_AMBIENT_BIT_PGI = 1048576 # GL/glext.h:1476
GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI = 2097152 # GL/glext.h:1477
GL_MAT_DIFFUSE_BIT_PGI = 4194304 # GL/glext.h:1478
GL_MAT_EMISSION_BIT_PGI = 8388608 # GL/glext.h:1479
GL_MAT_COLOR_INDEXES_BIT_PGI = 16777216 # GL/glext.h:1480
GL_MAT_SHININESS_BIT_PGI = 33554432 # GL/glext.h:1481
GL_MAT_SPECULAR_BIT_PGI = 67108864 # GL/glext.h:1482
GL_NORMAL_BIT_PGI = 134217728 # GL/glext.h:1483
GL_TEXCOORD1_BIT_PGI = 268435456 # GL/glext.h:1484
GL_TEXCOORD2_BIT_PGI = 536870912 # GL/glext.h:1485
GL_TEXCOORD3_BIT_PGI = 1073741824 # GL/glext.h:1486
GL_TEXCOORD4_BIT_PGI = 2147483648 # GL/glext.h:1487
GL_VERTEX23_BIT_PGI = 4 # GL/glext.h:1488
GL_VERTEX4_BIT_PGI = 8 # GL/glext.h:1489
# PGI_misc_hints (GL/glext.h:1492)
GL_PREFER_DOUBLEBUFFER_HINT_PGI = 107000 # GL/glext.h:1493
GL_CONSERVE_MEMORY_HINT_PGI = 107005 # GL/glext.h:1494
GL_RECLAIM_MEMORY_HINT_PGI = 107006 # GL/glext.h:1495
GL_NATIVE_GRAPHICS_HANDLE_PGI = 107010 # GL/glext.h:1496
GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI = 107011 # GL/glext.h:1497
GL_NATIVE_GRAPHICS_END_HINT_PGI = 107012 # GL/glext.h:1498
GL_ALWAYS_FAST_HINT_PGI = 107020 # GL/glext.h:1499
GL_ALWAYS_SOFT_HINT_PGI = 107021 # GL/glext.h:1500
GL_ALLOW_DRAW_OBJ_HINT_PGI = 107022 # GL/glext.h:1501
GL_ALLOW_DRAW_WIN_HINT_PGI = 107023 # GL/glext.h:1502
GL_ALLOW_DRAW_FRG_HINT_PGI = 107024 # GL/glext.h:1503
GL_ALLOW_DRAW_MEM_HINT_PGI = 107025 # GL/glext.h:1504
GL_STRICT_DEPTHFUNC_HINT_PGI = 107030 # GL/glext.h:1505
GL_STRICT_LIGHTING_HINT_PGI = 107031 # GL/glext.h:1506
GL_STRICT_SCISSOR_HINT_PGI = 107032 # GL/glext.h:1507
GL_FULL_STIPPLE_HINT_PGI = 107033 # GL/glext.h:1508
GL_CLIP_NEAR_HINT_PGI = 107040 # GL/glext.h:1509
GL_CLIP_FAR_HINT_PGI = 107041 # GL/glext.h:1510
GL_WIDE_LINE_HINT_PGI = 107042 # GL/glext.h:1511
GL_BACK_NORMALS_HINT_PGI = 107043 # GL/glext.h:1512
# EXT_paletted_texture (GL/glext.h:1515)
GL_COLOR_INDEX1_EXT = 32994 # GL/glext.h:1516
GL_COLOR_INDEX2_EXT = 32995 # GL/glext.h:1517
GL_COLOR_INDEX4_EXT = 32996 # GL/glext.h:1518
GL_COLOR_INDEX8_EXT = 32997 # GL/glext.h:1519
GL_COLOR_INDEX12_EXT = 32998 # GL/glext.h:1520
GL_COLOR_INDEX16_EXT = 32999 # GL/glext.h:1521
GL_TEXTURE_INDEX_SIZE_EXT = 33005 # GL/glext.h:1522
# EXT_clip_volume_hint (GL/glext.h:1525)
GL_CLIP_VOLUME_CLIPPING_HINT_EXT = 33008 # GL/glext.h:1526
# SGIX_list_priority (GL/glext.h:1529)
GL_LIST_PRIORITY_SGIX = 33154 # GL/glext.h:1530
# SGIX_ir_instrument1 (GL/glext.h:1533)
GL_IR_INSTRUMENT1_SGIX = 33151 # GL/glext.h:1534
# SGIX_calligraphic_fragment (GL/glext.h:1537)
GL_CALLIGRAPHIC_FRAGMENT_SGIX = 33155 # GL/glext.h:1538
# SGIX_texture_lod_bias (GL/glext.h:1541)
GL_TEXTURE_LOD_BIAS_S_SGIX = 33166 # GL/glext.h:1542
GL_TEXTURE_LOD_BIAS_T_SGIX = 33167 # GL/glext.h:1543
GL_TEXTURE_LOD_BIAS_R_SGIX = 33168 # GL/glext.h:1544
# SGIX_shadow_ambient (GL/glext.h:1547)
GL_SHADOW_AMBIENT_SGIX = 32959 # GL/glext.h:1548
# EXT_index_texture (GL/glext.h:1551)
# EXT_index_material (GL/glext.h:1554)
GL_INDEX_MATERIAL_EXT = 33208 # GL/glext.h:1555
GL_INDEX_MATERIAL_PARAMETER_EXT = 33209 # GL/glext.h:1556
GL_INDEX_MATERIAL_FACE_EXT = 33210 # GL/glext.h:1557
# EXT_index_func (GL/glext.h:1560)
GL_INDEX_TEST_EXT = 33205 # GL/glext.h:1561
GL_INDEX_TEST_FUNC_EXT = 33206 # GL/glext.h:1562
GL_INDEX_TEST_REF_EXT = 33207 # GL/glext.h:1563
# EXT_index_array_formats (GL/glext.h:1566)
GL_IUI_V2F_EXT = 33197 # GL/glext.h:1567
GL_IUI_V3F_EXT = 33198 # GL/glext.h:1568
GL_IUI_N3F_V2F_EXT = 33199 # GL/glext.h:1569
GL_IUI_N3F_V3F_EXT = 33200 # GL/glext.h:1570
GL_T2F_IUI_V2F_EXT = 33201 # GL/glext.h:1571
GL_T2F_IUI_V3F_EXT = 33202 # GL/glext.h:1572
GL_T2F_IUI_N3F_V2F_EXT = 33203 # GL/glext.h:1573
GL_T2F_IUI_N3F_V3F_EXT = 33204 # GL/glext.h:1574
# EXT_compiled_vertex_array (GL/glext.h:1577)
GL_ARRAY_ELEMENT_LOCK_FIRST_EXT = 33192 # GL/glext.h:1578
GL_ARRAY_ELEMENT_LOCK_COUNT_EXT = 33193 # GL/glext.h:1579
# EXT_cull_vertex (GL/glext.h:1582)
GL_CULL_VERTEX_EXT = 33194 # GL/glext.h:1583
GL_CULL_VERTEX_EYE_POSITION_EXT = 33195 # GL/glext.h:1584
GL_CULL_VERTEX_OBJECT_POSITION_EXT = 33196 # GL/glext.h:1585
# SGIX_ycrcb (GL/glext.h:1588)
GL_YCRCB_422_SGIX = 33211 # GL/glext.h:1589
GL_YCRCB_444_SGIX = 33212 # GL/glext.h:1590
# SGIX_fragment_lighting (GL/glext.h:1593)
GL_FRAGMENT_LIGHTING_SGIX = 33792 # GL/glext.h:1594
GL_FRAGMENT_COLOR_MATERIAL_SGIX = 33793 # GL/glext.h:1595
GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX = 33794 # GL/glext.h:1596
GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX = 33795 # GL/glext.h:1597
GL_MAX_FRAGMENT_LIGHTS_SGIX = 33796 # GL/glext.h:1598
GL_MAX_ACTIVE_LIGHTS_SGIX = 33797 # GL/glext.h:1599
GL_CURRENT_RASTER_NORMAL_SGIX = 33798 # GL/glext.h:1600
GL_LIGHT_ENV_MODE_SGIX = 33799 # GL/glext.h:1601
GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX = 33800 # GL/glext.h:1602
GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX = 33801 # GL/glext.h:1603
GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX = 33802 # GL/glext.h:1604
GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX = 33803 # GL/glext.h:1605
GL_FRAGMENT_LIGHT0_SGIX = 33804 # GL/glext.h:1606
GL_FRAGMENT_LIGHT1_SGIX = 33805 # GL/glext.h:1607
GL_FRAGMENT_LIGHT2_SGIX = 33806 # GL/glext.h:1608
GL_FRAGMENT_LIGHT3_SGIX = 33807 # GL/glext.h:1609
GL_FRAGMENT_LIGHT4_SGIX = 33808 # GL/glext.h:1610
GL_FRAGMENT_LIGHT5_SGIX = 33809 # GL/glext.h:1611
GL_FRAGMENT_LIGHT6_SGIX = 33810 # GL/glext.h:1612
GL_FRAGMENT_LIGHT7_SGIX = 33811 # GL/glext.h:1613
# IBM_rasterpos_clip (GL/glext.h:1616)
GL_RASTER_POSITION_UNCLIPPED_IBM = 103010 # GL/glext.h:1617
# HP_texture_lighting (GL/glext.h:1620)
GL_TEXTURE_LIGHTING_MODE_HP = 33127 # GL/glext.h:1621
GL_TEXTURE_POST_SPECULAR_HP = 33128 # GL/glext.h:1622
GL_TEXTURE_PRE_SPECULAR_HP = 33129 # GL/glext.h:1623
# EXT_draw_range_elements (GL/glext.h:1626)
GL_MAX_ELEMENTS_VERTICES_EXT = 33000 # GL/glext.h:1627
GL_MAX_ELEMENTS_INDICES_EXT = 33001 # GL/glext.h:1628
# WIN_phong_shading (GL/glext.h:1631)
GL_PHONG_WIN = 33002 # GL/glext.h:1632
GL_PHONG_HINT_WIN = 33003 # GL/glext.h:1633
# WIN_specular_fog (GL/glext.h:1636)
GL_FOG_SPECULAR_TEXTURE_WIN = 33004 # GL/glext.h:1637
# EXT_light_texture (GL/glext.h:1640)
GL_FRAGMENT_MATERIAL_EXT = 33609 # GL/glext.h:1641
GL_FRAGMENT_NORMAL_EXT = 33610 # GL/glext.h:1642
GL_FRAGMENT_COLOR_EXT = 33612 # GL/glext.h:1643
GL_ATTENUATION_EXT = 33613 # GL/glext.h:1644
GL_SHADOW_ATTENUATION_EXT = 33614 # GL/glext.h:1645
GL_TEXTURE_APPLICATION_MODE_EXT = 33615 # GL/glext.h:1646
GL_TEXTURE_LIGHT_EXT = 33616 # GL/glext.h:1647
GL_TEXTURE_MATERIAL_FACE_EXT = 33617 # GL/glext.h:1648
GL_TEXTURE_MATERIAL_PARAMETER_EXT = 33618 # GL/glext.h:1649
# SGIX_blend_alpha_minmax (GL/glext.h:1653)
GL_ALPHA_MIN_SGIX = 33568 # GL/glext.h:1654
GL_ALPHA_MAX_SGIX = 33569 # GL/glext.h:1655
# SGIX_impact_pixel_texture (GL/glext.h:1658)
GL_PIXEL_TEX_GEN_Q_CEILING_SGIX = 33156 # GL/glext.h:1659
GL_PIXEL_TEX_GEN_Q_ROUND_SGIX = 33157 # GL/glext.h:1660
GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX = 33158 # GL/glext.h:1661
GL_PIXEL_TEX_GEN_ALPHA_REPLACE_SGIX = 33159 # GL/glext.h:1662
GL_PIXEL_TEX_GEN_ALPHA_NO_REPLACE_SGIX = 33160 # GL/glext.h:1663
GL_PIXEL_TEX_GEN_ALPHA_LS_SGIX = 33161 # GL/glext.h:1664
GL_PIXEL_TEX_GEN_ALPHA_MS_SGIX = 33162 # GL/glext.h:1665
# EXT_bgra (GL/glext.h:1668)
GL_BGR_EXT = 32992 # GL/glext.h:1669
GL_BGRA_EXT = 32993 # GL/glext.h:1670
# SGIX_async (GL/glext.h:1673)
GL_ASYNC_MARKER_SGIX = 33577 # GL/glext.h:1674
# SGIX_async_pixel (GL/glext.h:1677)
GL_ASYNC_TEX_IMAGE_SGIX = 33628 # GL/glext.h:1678
GL_ASYNC_DRAW_PIXELS_SGIX = 33629 # GL/glext.h:1679
GL_ASYNC_READ_PIXELS_SGIX = 33630 # GL/glext.h:1680
GL_MAX_ASYNC_TEX_IMAGE_SGIX = 33631 # GL/glext.h:1681
GL_MAX_ASYNC_DRAW_PIXELS_SGIX = 33632 # GL/glext.h:1682
GL_MAX_ASYNC_READ_PIXELS_SGIX = 33633 # GL/glext.h:1683
# SGIX_async_histogram (GL/glext.h:1686)
GL_ASYNC_HISTOGRAM_SGIX = 33580 # GL/glext.h:1687
GL_MAX_ASYNC_HISTOGRAM_SGIX = 33581 # GL/glext.h:1688
# INTEL_texture_scissor (GL/glext.h:1691)
# INTEL_parallel_arrays (GL/glext.h:1694)
GL_PARALLEL_ARRAYS_INTEL = 33780 # GL/glext.h:1695
GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL = 33781 # GL/glext.h:1696
GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL = 33782 # GL/glext.h:1697
GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL = 33783 # GL/glext.h:1698
GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL = 33784 # GL/glext.h:1699
# HP_occlusion_test (GL/glext.h:1702)
GL_OCCLUSION_TEST_HP = 33125 # GL/glext.h:1703
GL_OCCLUSION_TEST_RESULT_HP = 33126 # GL/glext.h:1704
# EXT_pixel_transform (GL/glext.h:1707)
GL_PIXEL_TRANSFORM_2D_EXT = 33584 # GL/glext.h:1708
GL_PIXEL_MAG_FILTER_EXT = 33585 # GL/glext.h:1709
GL_PIXEL_MIN_FILTER_EXT = 33586 # GL/glext.h:1710
GL_PIXEL_CUBIC_WEIGHT_EXT = 33587 # GL/glext.h:1711
GL_CUBIC_EXT = 33588 # GL/glext.h:1712
GL_AVERAGE_EXT = 33589 # GL/glext.h:1713
GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT = 33590 # GL/glext.h:1714
GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT = 33591 # GL/glext.h:1715
GL_PIXEL_TRANSFORM_2D_MATRIX_EXT = 33592 # GL/glext.h:1716
# EXT_pixel_transform_color_table (GL/glext.h:1719)
# EXT_shared_texture_palette (GL/glext.h:1722)
GL_SHARED_TEXTURE_PALETTE_EXT = 33275 # GL/glext.h:1723
# EXT_separate_specular_color (GL/glext.h:1726)
GL_LIGHT_MODEL_COLOR_CONTROL_EXT = 33272 # GL/glext.h:1727
GL_SINGLE_COLOR_EXT = 33273 # GL/glext.h:1728
GL_SEPARATE_SPECULAR_COLOR_EXT = 33274 # GL/glext.h:1729
# EXT_secondary_color (GL/glext.h:1732)
GL_COLOR_SUM_EXT = 33880 # GL/glext.h:1733
GL_CURRENT_SECONDARY_COLOR_EXT = 33881 # GL/glext.h:1734
GL_SECONDARY_COLOR_ARRAY_SIZE_EXT = 33882 # GL/glext.h:1735
GL_SECONDARY_COLOR_ARRAY_TYPE_EXT = 33883 # GL/glext.h:1736
GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT = 33884 # GL/glext.h:1737
GL_SECONDARY_COLOR_ARRAY_POINTER_EXT = 33885 # GL/glext.h:1738
GL_SECONDARY_COLOR_ARRAY_EXT = 33886 # GL/glext.h:1739
# EXT_texture_perturb_normal (GL/glext.h:1742)
GL_PERTURB_EXT = 34222 # GL/glext.h:1743
GL_TEXTURE_NORMAL_EXT = 34223 # GL/glext.h:1744
# EXT_multi_draw_arrays (GL/glext.h:1747)
# EXT_fog_coord (GL/glext.h:1750)
GL_FOG_COORDINATE_SOURCE_EXT = 33872 # GL/glext.h:1751
GL_FOG_COORDINATE_EXT = 33873 # GL/glext.h:1752
GL_FRAGMENT_DEPTH_EXT = 33874 # GL/glext.h:1753
GL_CURRENT_FOG_COORDINATE_EXT = 33875 # GL/glext.h:1754
GL_FOG_COORDINATE_ARRAY_TYPE_EXT = 33876 # GL/glext.h:1755
GL_FOG_COORDINATE_ARRAY_STRIDE_EXT = 33877 # GL/glext.h:1756
GL_FOG_COORDINATE_ARRAY_POINTER_EXT = 33878 # GL/glext.h:1757
GL_FOG_COORDINATE_ARRAY_EXT = 33879 # GL/glext.h:1758
# REND_screen_coordinates (GL/glext.h:1761)
GL_SCREEN_COORDINATES_REND = 33936 # GL/glext.h:1762
GL_INVERTED_SCREEN_W_REND = 33937 # GL/glext.h:1763
# EXT_coordinate_frame (GL/glext.h:1766)
GL_TANGENT_ARRAY_EXT = 33849 # GL/glext.h:1767
GL_BINORMAL_ARRAY_EXT = 33850 # GL/glext.h:1768
GL_CURRENT_TANGENT_EXT = 33851 # GL/glext.h:1769
GL_CURRENT_BINORMAL_EXT = 33852 # GL/glext.h:1770
GL_TANGENT_ARRAY_TYPE_EXT = 33854 # GL/glext.h:1771
GL_TANGENT_ARRAY_STRIDE_EXT = 33855 # GL/glext.h:1772
GL_BINORMAL_ARRAY_TYPE_EXT = 33856 # GL/glext.h:1773
GL_BINORMAL_ARRAY_STRIDE_EXT = 33857 # GL/glext.h:1774
GL_TANGENT_ARRAY_POINTER_EXT = 33858 # GL/glext.h:1775
GL_BINORMAL_ARRAY_POINTER_EXT = 33859 # GL/glext.h:1776
GL_MAP1_TANGENT_EXT = 33860 # GL/glext.h:1777
GL_MAP2_TANGENT_EXT = 33861 # GL/glext.h:1778
GL_MAP1_BINORMAL_EXT = 33862 # GL/glext.h:1779
GL_MAP2_BINORMAL_EXT = 33863 # GL/glext.h:1780
# EXT_texture_env_combine (GL/glext.h:1783)
GL_COMBINE_EXT = 34160 # GL/glext.h:1784
GL_COMBINE_RGB_EXT = 34161 # GL/glext.h:1785
GL_COMBINE_ALPHA_EXT = 34162 # GL/glext.h:1786
GL_RGB_SCALE_EXT = 34163 # GL/glext.h:1787
GL_ADD_SIGNED_EXT = 34164 # GL/glext.h:1788
GL_INTERPOLATE_EXT = 34165 # GL/glext.h:1789
GL_CONSTANT_EXT = 34166 # GL/glext.h:1790
GL_PRIMARY_COLOR_EXT = 34167 # GL/glext.h:1791
GL_PREVIOUS_EXT = 34168 # GL/glext.h:1792
GL_SOURCE0_RGB_EXT = 34176 # GL/glext.h:1793
GL_SOURCE1_RGB_EXT = 34177 # GL/glext.h:1794
GL_SOURCE2_RGB_EXT = 34178 # GL/glext.h:1795
GL_SOURCE0_ALPHA_EXT = 34184 # GL/glext.h:1796
GL_SOURCE1_ALPHA_EXT = 34185 # GL/glext.h:1797
GL_SOURCE2_ALPHA_EXT = 34186 # GL/glext.h:1798
GL_OPERAND0_RGB_EXT = 34192 # GL/glext.h:1799
GL_OPERAND1_RGB_EXT = 34193 # GL/glext.h:1800
GL_OPERAND2_RGB_EXT = 34194 # GL/glext.h:1801
GL_OPERAND0_ALPHA_EXT = 34200 # GL/glext.h:1802
GL_OPERAND1_ALPHA_EXT = 34201 # GL/glext.h:1803
GL_OPERAND2_ALPHA_EXT = 34202 # GL/glext.h:1804
# APPLE_specular_vector (GL/glext.h:1807)
GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE = 34224 # GL/glext.h:1808
# APPLE_transform_hint (GL/glext.h:1811)
GL_TRANSFORM_HINT_APPLE = 34225 # GL/glext.h:1812
# SGIX_fog_scale (GL/glext.h:1815)
GL_FOG_SCALE_SGIX = 33276 # GL/glext.h:1816
GL_FOG_SCALE_VALUE_SGIX = 33277 # GL/glext.h:1817
# SUNX_constant_data (GL/glext.h:1820)
GL_UNPACK_CONSTANT_DATA_SUNX = 33237 # GL/glext.h:1821
GL_TEXTURE_CONSTANT_DATA_SUNX = 33238 # GL/glext.h:1822
# SUN_global_alpha (GL/glext.h:1825)
GL_GLOBAL_ALPHA_SUN = 33241 # GL/glext.h:1826
GL_GLOBAL_ALPHA_FACTOR_SUN = 33242 # GL/glext.h:1827
# SUN_triangle_list (GL/glext.h:1830)
GL_RESTART_SUN = 1 # GL/glext.h:1831
GL_REPLACE_MIDDLE_SUN = 2 # GL/glext.h:1832
GL_REPLACE_OLDEST_SUN = 3 # GL/glext.h:1833
GL_TRIANGLE_LIST_SUN = 33239 # GL/glext.h:1834
GL_REPLACEMENT_CODE_SUN = 33240 # GL/glext.h:1835
GL_REPLACEMENT_CODE_ARRAY_SUN = 34240 # GL/glext.h:1836
GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN = 34241 # GL/glext.h:1837
GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN = 34242 # GL/glext.h:1838
GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN = 34243 # GL/glext.h:1839
GL_R1UI_V3F_SUN = 34244 # GL/glext.h:1840
GL_R1UI_C4UB_V3F_SUN = 34245 # GL/glext.h:1841
GL_R1UI_C3F_V3F_SUN = 34246 # GL/glext.h:1842
GL_R1UI_N3F_V3F_SUN = 34247 # GL/glext.h:1843
GL_R1UI_C4F_N3F_V3F_SUN = 34248 # GL/glext.h:1844
GL_R1UI_T2F_V3F_SUN = 34249 # GL/glext.h:1845
GL_R1UI_T2F_N3F_V3F_SUN = 34250 # GL/glext.h:1846
GL_R1UI_T2F_C4F_N3F_V3F_SUN = 34251 # GL/glext.h:1847
# SUN_vertex (GL/glext.h:1850)
# EXT_blend_func_separate (GL/glext.h:1853)
GL_BLEND_DST_RGB_EXT = 32968 # GL/glext.h:1854
GL_BLEND_SRC_RGB_EXT = 32969 # GL/glext.h:1855
GL_BLEND_DST_ALPHA_EXT = 32970 # GL/glext.h:1856
GL_BLEND_SRC_ALPHA_EXT = 32971 # GL/glext.h:1857
# INGR_color_clamp (GL/glext.h:1860)
GL_RED_MIN_CLAMP_INGR = 34144 # GL/glext.h:1861
GL_GREEN_MIN_CLAMP_INGR = 34145 # GL/glext.h:1862
GL_BLUE_MIN_CLAMP_INGR = 34146 # GL/glext.h:1863
GL_ALPHA_MIN_CLAMP_INGR = 34147 # GL/glext.h:1864
GL_RED_MAX_CLAMP_INGR = 34148 # GL/glext.h:1865
GL_GREEN_MAX_CLAMP_INGR = 34149 # GL/glext.h:1866
GL_BLUE_MAX_CLAMP_INGR = 34150 # GL/glext.h:1867
GL_ALPHA_MAX_CLAMP_INGR = 34151 # GL/glext.h:1868
# INGR_interlace_read (GL/glext.h:1871)
GL_INTERLACE_READ_INGR = 34152 # GL/glext.h:1872
# EXT_stencil_wrap (GL/glext.h:1875)
GL_INCR_WRAP_EXT = 34055 # GL/glext.h:1876
GL_DECR_WRAP_EXT = 34056 # GL/glext.h:1877
# EXT_422_pixels (GL/glext.h:1880)
GL_422_EXT = 32972 # GL/glext.h:1881
GL_422_REV_EXT = 32973 # GL/glext.h:1882
GL_422_AVERAGE_EXT = 32974 # GL/glext.h:1883
GL_422_REV_AVERAGE_EXT = 32975 # GL/glext.h:1884
# NV_texgen_reflection (GL/glext.h:1887)
GL_NORMAL_MAP_NV = 34065 # GL/glext.h:1888
GL_REFLECTION_MAP_NV = 34066 # GL/glext.h:1889
# EXT_texture_cube_map (GL/glext.h:1892)
GL_NORMAL_MAP_EXT = 34065 # GL/glext.h:1893
GL_REFLECTION_MAP_EXT = 34066 # GL/glext.h:1894
GL_TEXTURE_CUBE_MAP_EXT = 34067 # GL/glext.h:1895
GL_TEXTURE_BINDING_CUBE_MAP_EXT = 34068 # GL/glext.h:1896
GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT = 34069 # GL/glext.h:1897
GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT = 34070 # GL/glext.h:1898
GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT = 34071 # GL/glext.h:1899
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT = 34072 # GL/glext.h:1900
GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT = 34073 # GL/glext.h:1901
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT = 34074 # GL/glext.h:1902
GL_PROXY_TEXTURE_CUBE_MAP_EXT = 34075 # GL/glext.h:1903
GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT = 34076 # GL/glext.h:1904
# SUN_convolution_border_modes (GL/glext.h:1907)
GL_WRAP_BORDER_SUN = 33236 # GL/glext.h:1908
# EXT_texture_env_add (GL/glext.h:1911)
# EXT_texture_lod_bias (GL/glext.h:1914)
GL_MAX_TEXTURE_LOD_BIAS_EXT = 34045 # GL/glext.h:1915
GL_TEXTURE_FILTER_CONTROL_EXT = 34048 # GL/glext.h:1916
GL_TEXTURE_LOD_BIAS_EXT = 34049 # GL/glext.h:1917
# EXT_texture_filter_anisotropic (GL/glext.h:1920)
GL_TEXTURE_MAX_ANISOTROPY_EXT = 34046 # GL/glext.h:1921
GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT = 34047 # GL/glext.h:1922
# EXT_vertex_weighting (GL/glext.h:1925)
GL_MODELVIEW0_STACK_DEPTH_EXT = 0 # GL/glext.h:1926
GL_MODELVIEW1_STACK_DEPTH_EXT = 34050 # GL/glext.h:1927
GL_MODELVIEW0_MATRIX_EXT = 0 # GL/glext.h:1928
GL_MODELVIEW1_MATRIX_EXT = 34054 # GL/glext.h:1929
GL_VERTEX_WEIGHTING_EXT = 34057 # GL/glext.h:1930
GL_MODELVIEW0_EXT = 0 # GL/glext.h:1931
GL_MODELVIEW1_EXT = 34058 # GL/glext.h:1932
GL_CURRENT_VERTEX_WEIGHT_EXT = 34059 # GL/glext.h:1933
GL_VERTEX_WEIGHT_ARRAY_EXT = 34060 # GL/glext.h:1934
GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT = 34061 # GL/glext.h:1935
GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT = 34062 # GL/glext.h:1936
GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT = 34063 # GL/glext.h:1937
GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT = 34064 # GL/glext.h:1938
# NV_light_max_exponent (GL/glext.h:1941)
GL_MAX_SHININESS_NV = 34052 # GL/glext.h:1942
GL_MAX_SPOT_EXPONENT_NV = 34053 # GL/glext.h:1943
# NV_vertex_array_range (GL/glext.h:1946)
GL_VERTEX_ARRAY_RANGE_NV = 34077 # GL/glext.h:1947
GL_VERTEX_ARRAY_RANGE_LENGTH_NV = 34078 # GL/glext.h:1948
GL_VERTEX_ARRAY_RANGE_VALID_NV = 34079 # GL/glext.h:1949
GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV = 34080 # GL/glext.h:1950
GL_VERTEX_ARRAY_RANGE_POINTER_NV = 34081 # GL/glext.h:1951
# NV_register_combiners (GL/glext.h:1954)
GL_REGISTER_COMBINERS_NV = 34082 # GL/glext.h:1955
GL_VARIABLE_A_NV = 34083 # GL/glext.h:1956
GL_VARIABLE_B_NV = 34084 # GL/glext.h:1957
GL_VARIABLE_C_NV = 34085 # GL/glext.h:1958
GL_VARIABLE_D_NV = 34086 # GL/glext.h:1959
GL_VARIABLE_E_NV = 34087 # GL/glext.h:1960
GL_VARIABLE_F_NV = 34088 # GL/glext.h:1961
GL_VARIABLE_G_NV = 34089 # GL/glext.h:1962
GL_CONSTANT_COLOR0_NV = 34090 # GL/glext.h:1963
GL_CONSTANT_COLOR1_NV = 34091 # GL/glext.h:1964
GL_PRIMARY_COLOR_NV = 34092 # GL/glext.h:1965
GL_SECONDARY_COLOR_NV = 34093 # GL/glext.h:1966
GL_SPARE0_NV = 34094 # GL/glext.h:1967
GL_SPARE1_NV = 34095 # GL/glext.h:1968
GL_DISCARD_NV = 34096 # GL/glext.h:1969
GL_E_TIMES_F_NV = 34097 # GL/glext.h:1970
GL_SPARE0_PLUS_SECONDARY_COLOR_NV = 34098 # GL/glext.h:1971
GL_UNSIGNED_IDENTITY_NV = 34102 # GL/glext.h:1972
GL_UNSIGNED_INVERT_NV = 34103 # GL/glext.h:1973
GL_EXPAND_NORMAL_NV = 34104 # GL/glext.h:1974
GL_EXPAND_NEGATE_NV = 34105 # GL/glext.h:1975
GL_HALF_BIAS_NORMAL_NV = 34106 # GL/glext.h:1976
GL_HALF_BIAS_NEGATE_NV = 34107 # GL/glext.h:1977
GL_SIGNED_IDENTITY_NV = 34108 # GL/glext.h:1978
GL_SIGNED_NEGATE_NV = 34109 # GL/glext.h:1979
GL_SCALE_BY_TWO_NV = 34110 # GL/glext.h:1980
GL_SCALE_BY_FOUR_NV = 34111 # GL/glext.h:1981
GL_SCALE_BY_ONE_HALF_NV = 34112 # GL/glext.h:1982
GL_BIAS_BY_NEGATIVE_ONE_HALF_NV = 34113 # GL/glext.h:1983
GL_COMBINER_INPUT_NV = 34114 # GL/glext.h:1984
GL_COMBINER_MAPPING_NV = 34115 # GL/glext.h:1985
GL_COMBINER_COMPONENT_USAGE_NV = 34116 # GL/glext.h:1986
GL_COMBINER_AB_DOT_PRODUCT_NV = 34117 # GL/glext.h:1987
GL_COMBINER_CD_DOT_PRODUCT_NV = 34118 # GL/glext.h:1988
GL_COMBINER_MUX_SUM_NV = 34119 # GL/glext.h:1989
GL_COMBINER_SCALE_NV = 34120 # GL/glext.h:1990
GL_COMBINER_BIAS_NV = 34121 # GL/glext.h:1991
GL_COMBINER_AB_OUTPUT_NV = 34122 # GL/glext.h:1992
GL_COMBINER_CD_OUTPUT_NV = 34123 # GL/glext.h:1993
GL_COMBINER_SUM_OUTPUT_NV = 34124 # GL/glext.h:1994
GL_MAX_GENERAL_COMBINERS_NV = 34125 # GL/glext.h:1995
GL_NUM_GENERAL_COMBINERS_NV = 34126 # GL/glext.h:1996
GL_COLOR_SUM_CLAMP_NV = 34127 # GL/glext.h:1997
GL_COMBINER0_NV = 34128 # GL/glext.h:1998
GL_COMBINER1_NV = 34129 # GL/glext.h:1999
GL_COMBINER2_NV = 34130 # GL/glext.h:2000
GL_COMBINER3_NV = 34131 # GL/glext.h:2001
GL_COMBINER4_NV = 34132 # GL/glext.h:2002
GL_COMBINER5_NV = 34133 # GL/glext.h:2003
GL_COMBINER6_NV = 34134 # GL/glext.h:2004
GL_COMBINER7_NV = 34135 # GL/glext.h:2005
# NV_fog_distance (GL/glext.h:2013)
GL_FOG_DISTANCE_MODE_NV = 34138 # GL/glext.h:2014
GL_EYE_RADIAL_NV = 34139 # GL/glext.h:2015
GL_EYE_PLANE_ABSOLUTE_NV = 34140 # GL/glext.h:2016
# NV_texgen_emboss (GL/glext.h:2020)
GL_EMBOSS_LIGHT_NV = 34141 # GL/glext.h:2021
GL_EMBOSS_CONSTANT_NV = 34142 # GL/glext.h:2022
GL_EMBOSS_MAP_NV = 34143 # GL/glext.h:2023
# NV_blend_square (GL/glext.h:2026)
# NV_texture_env_combine4 (GL/glext.h:2029)
GL_COMBINE4_NV = 34051 # GL/glext.h:2030
GL_SOURCE3_RGB_NV = 34179 # GL/glext.h:2031
GL_SOURCE3_ALPHA_NV = 34187 # GL/glext.h:2032
GL_OPERAND3_RGB_NV = 34195 # GL/glext.h:2033
GL_OPERAND3_ALPHA_NV = 34203 # GL/glext.h:2034
# MESA_resize_buffers (GL/glext.h:2037)
# MESA_window_pos (GL/glext.h:2040)
# EXT_texture_compression_s3tc (GL/glext.h:2043)
GL_COMPRESSED_RGB_S3TC_DXT1_EXT = 33776 # GL/glext.h:2044
GL_COMPRESSED_RGBA_S3TC_DXT1_EXT = 33777 # GL/glext.h:2045
GL_COMPRESSED_RGBA_S3TC_DXT3_EXT = 33778 # GL/glext.h:2046
GL_COMPRESSED_RGBA_S3TC_DXT5_EXT = 33779 # GL/glext.h:2047
# IBM_cull_vertex (GL/glext.h:2050)
GL_CULL_VERTEX_IBM = 103050 # GL/glext.h:2051
# IBM_multimode_draw_arrays (GL/glext.h:2054)
# IBM_vertex_array_lists (GL/glext.h:2057)
GL_VERTEX_ARRAY_LIST_IBM = 103070 # GL/glext.h:2058
GL_NORMAL_ARRAY_LIST_IBM = 103071 # GL/glext.h:2059
GL_COLOR_ARRAY_LIST_IBM = 103072 # GL/glext.h:2060
GL_INDEX_ARRAY_LIST_IBM = 103073 # GL/glext.h:2061
GL_TEXTURE_COORD_ARRAY_LIST_IBM = 103074 # GL/glext.h:2062
GL_EDGE_FLAG_ARRAY_LIST_IBM = 103075 # GL/glext.h:2063
GL_FOG_COORDINATE_ARRAY_LIST_IBM = 103076 # GL/glext.h:2064
GL_SECONDARY_COLOR_ARRAY_LIST_IBM = 103077 # GL/glext.h:2065
GL_VERTEX_ARRAY_LIST_STRIDE_IBM = 103080 # GL/glext.h:2066
GL_NORMAL_ARRAY_LIST_STRIDE_IBM = 103081 # GL/glext.h:2067
GL_COLOR_ARRAY_LIST_STRIDE_IBM = 103082 # GL/glext.h:2068
GL_INDEX_ARRAY_LIST_STRIDE_IBM = 103083 # GL/glext.h:2069
GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM = 103084 # GL/glext.h:2070
GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM = 103085 # GL/glext.h:2071
GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM = 103086 # GL/glext.h:2072
GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM = 103087 # GL/glext.h:2073
# SGIX_subsample (GL/glext.h:2076)
GL_PACK_SUBSAMPLE_RATE_SGIX = 34208 # GL/glext.h:2077
GL_UNPACK_SUBSAMPLE_RATE_SGIX = 34209 # GL/glext.h:2078
GL_PIXEL_SUBSAMPLE_4444_SGIX = 34210 # GL/glext.h:2079
GL_PIXEL_SUBSAMPLE_2424_SGIX = 34211 # GL/glext.h:2080
GL_PIXEL_SUBSAMPLE_4242_SGIX = 34212 # GL/glext.h:2081
# SGIX_ycrcb_subsample (GL/glext.h:2084)
# SGIX_ycrcba (GL/glext.h:2087)
GL_YCRCB_SGIX = 33560 # GL/glext.h:2088
GL_YCRCBA_SGIX = 33561 # GL/glext.h:2089
# SGI_depth_pass_instrument (GL/glext.h:2092)
GL_DEPTH_PASS_INSTRUMENT_SGIX = 33552 # GL/glext.h:2093
GL_DEPTH_PASS_INSTRUMENT_COUNTERS_SGIX = 33553 # GL/glext.h:2094
GL_DEPTH_PASS_INSTRUMENT_MAX_SGIX = 33554 # GL/glext.h:2095
# 3DFX_texture_compression_FXT1 (GL/glext.h:2098)
GL_COMPRESSED_RGB_FXT1_3DFX = 34480 # GL/glext.h:2099
GL_COMPRESSED_RGBA_FXT1_3DFX = 34481 # GL/glext.h:2100
# 3DFX_multisample (GL/glext.h:2103)
GL_MULTISAMPLE_3DFX = 34482 # GL/glext.h:2104
GL_SAMPLE_BUFFERS_3DFX = 34483 # GL/glext.h:2105
GL_SAMPLES_3DFX = 34484 # GL/glext.h:2106
GL_MULTISAMPLE_BIT_3DFX = 536870912 # GL/glext.h:2107
# 3DFX_tbuffer (GL/glext.h:2110)
# EXT_multisample (GL/glext.h:2113)
GL_MULTISAMPLE_EXT = 32925 # GL/glext.h:2114
GL_SAMPLE_ALPHA_TO_MASK_EXT = 32926 # GL/glext.h:2115
GL_SAMPLE_ALPHA_TO_ONE_EXT = 32927 # GL/glext.h:2116
GL_SAMPLE_MASK_EXT = 32928 # GL/glext.h:2117
GL_1PASS_EXT = 32929 # GL/glext.h:2118
GL_2PASS_0_EXT = 32930 # GL/glext.h:2119
GL_2PASS_1_EXT = 32931 # GL/glext.h:2120
GL_4PASS_0_EXT = 32932 # GL/glext.h:2121
GL_4PASS_1_EXT = 32933 # GL/glext.h:2122
GL_4PASS_2_EXT = 32934 # GL/glext.h:2123
GL_4PASS_3_EXT = 32935 # GL/glext.h:2124
GL_SAMPLE_BUFFERS_EXT = 32936 # GL/glext.h:2125
GL_SAMPLES_EXT = 32937 # GL/glext.h:2126
GL_SAMPLE_MASK_VALUE_EXT = 32938 # GL/glext.h:2127
GL_SAMPLE_MASK_INVERT_EXT = 32939 # GL/glext.h:2128
GL_SAMPLE_PATTERN_EXT = 32940 # GL/glext.h:2129
GL_MULTISAMPLE_BIT_EXT = 536870912 # GL/glext.h:2130
# SGIX_vertex_preclip (GL/glext.h:2133)
GL_VERTEX_PRECLIP_SGIX = 33774 # GL/glext.h:2134
GL_VERTEX_PRECLIP_HINT_SGIX = 33775 # GL/glext.h:2135
# SGIX_convolution_accuracy (GL/glext.h:2138)
GL_CONVOLUTION_HINT_SGIX = 33558 # GL/glext.h:2139
# SGIX_resample (GL/glext.h:2142)
GL_PACK_RESAMPLE_SGIX = 33836 # GL/glext.h:2143
GL_UNPACK_RESAMPLE_SGIX = 33837 # GL/glext.h:2144
GL_RESAMPLE_REPLICATE_SGIX = 33838 # GL/glext.h:2145
GL_RESAMPLE_ZERO_FILL_SGIX = 33839 # GL/glext.h:2146
GL_RESAMPLE_DECIMATE_SGIX = 33840 # GL/glext.h:2147
# SGIS_point_line_texgen (GL/glext.h:2150)
GL_EYE_DISTANCE_TO_POINT_SGIS = 33264 # GL/glext.h:2151
GL_OBJECT_DISTANCE_TO_POINT_SGIS = 33265 # GL/glext.h:2152
GL_EYE_DISTANCE_TO_LINE_SGIS = 33266 # GL/glext.h:2153
GL_OBJECT_DISTANCE_TO_LINE_SGIS = 33267 # GL/glext.h:2154
GL_EYE_POINT_SGIS = 33268 # GL/glext.h:2155
GL_OBJECT_POINT_SGIS = 33269 # GL/glext.h:2156
GL_EYE_LINE_SGIS = 33270 # GL/glext.h:2157
GL_OBJECT_LINE_SGIS = 33271 # GL/glext.h:2158
# SGIS_texture_color_mask (GL/glext.h:2161)
GL_TEXTURE_COLOR_WRITEMASK_SGIS = 33263 # GL/glext.h:2162
# EXT_texture_env_dot3 (GL/glext.h:2165)
GL_DOT3_RGB_EXT = 34624 # GL/glext.h:2166
GL_DOT3_RGBA_EXT = 34625 # GL/glext.h:2167
# ATI_texture_mirror_once (GL/glext.h:2170)
GL_MIRROR_CLAMP_ATI = 34626 # GL/glext.h:2171
GL_MIRROR_CLAMP_TO_EDGE_ATI = 34627 # GL/glext.h:2172
# NV_fence (GL/glext.h:2175)
GL_ALL_COMPLETED_NV = 34034 # GL/glext.h:2176
GL_FENCE_STATUS_NV = 34035 # GL/glext.h:2177
GL_FENCE_CONDITION_NV = 34036 # GL/glext.h:2178
# IBM_texture_mirrored_repeat (GL/glext.h:2181)
GL_MIRRORED_REPEAT_IBM = 33648 # GL/glext.h:2182
# NV_evaluators (GL/glext.h:2185)
GL_EVAL_2D_NV = 34496 # GL/glext.h:2186
GL_EVAL_TRIANGULAR_2D_NV = 34497 # GL/glext.h:2187
GL_MAP_TESSELLATION_NV = 34498 # GL/glext.h:2188
GL_MAP_ATTRIB_U_ORDER_NV = 34499 # GL/glext.h:2189
GL_MAP_ATTRIB_V_ORDER_NV = 34500 # GL/glext.h:2190
GL_EVAL_FRACTIONAL_TESSELLATION_NV = 34501 # GL/glext.h:2191
GL_EVAL_VERTEX_ATTRIB0_NV = 34502 # GL/glext.h:2192
GL_EVAL_VERTEX_ATTRIB1_NV = 34503 # GL/glext.h:2193
GL_EVAL_VERTEX_ATTRIB2_NV = 34504 # GL/glext.h:2194
GL_EVAL_VERTEX_ATTRIB3_NV = 34505 # GL/glext.h:2195
GL_EVAL_VERTEX_ATTRIB4_NV = 34506 # GL/glext.h:2196
GL_EVAL_VERTEX_ATTRIB5_NV = 34507 # GL/glext.h:2197
GL_EVAL_VERTEX_ATTRIB6_NV = 34508 # GL/glext.h:2198
GL_EVAL_VERTEX_ATTRIB7_NV = 34509 # GL/glext.h:2199
GL_EVAL_VERTEX_ATTRIB8_NV = 34510 # GL/glext.h:2200
GL_EVAL_VERTEX_ATTRIB9_NV = 34511 # GL/glext.h:2201
GL_EVAL_VERTEX_ATTRIB10_NV = 34512 # GL/glext.h:2202
GL_EVAL_VERTEX_ATTRIB11_NV = 34513 # GL/glext.h:2203
GL_EVAL_VERTEX_ATTRIB12_NV = 34514 # GL/glext.h:2204
GL_EVAL_VERTEX_ATTRIB13_NV = 34515 # GL/glext.h:2205
GL_EVAL_VERTEX_ATTRIB14_NV = 34516 # GL/glext.h:2206
GL_EVAL_VERTEX_ATTRIB15_NV = 34517 # GL/glext.h:2207
GL_MAX_MAP_TESSELLATION_NV = 34518 # GL/glext.h:2208
GL_MAX_RATIONAL_EVAL_ORDER_NV = 34519 # GL/glext.h:2209
# NV_packed_depth_stencil (GL/glext.h:2212)
GL_DEPTH_STENCIL_NV = 34041 # GL/glext.h:2213
GL_UNSIGNED_INT_24_8_NV = 34042 # GL/glext.h:2214
# NV_register_combiners2 (GL/glext.h:2217)
GL_PER_STAGE_CONSTANTS_NV = 34101 # GL/glext.h:2218
# NV_texture_compression_vtc (GL/glext.h:2221)
# NV_texture_rectangle (GL/glext.h:2224)
GL_TEXTURE_RECTANGLE_NV = 34037 # GL/glext.h:2225
GL_TEXTURE_BINDING_RECTANGLE_NV = 34038 # GL/glext.h:2226
GL_PROXY_TEXTURE_RECTANGLE_NV = 34039 # GL/glext.h:2227
GL_MAX_RECTANGLE_TEXTURE_SIZE_NV = 34040 # GL/glext.h:2228
# NV_texture_shader (GL/glext.h:2231)
GL_OFFSET_TEXTURE_RECTANGLE_NV = 34380 # GL/glext.h:2232
GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV = 34381 # GL/glext.h:2233
GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV = 34382 # GL/glext.h:2234
GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV = 34521 # GL/glext.h:2235
GL_UNSIGNED_INT_S8_S8_8_8_NV = 34522 # GL/glext.h:2236
GL_UNSIGNED_INT_8_8_S8_S8_REV_NV = 34523 # GL/glext.h:2237
GL_DSDT_MAG_INTENSITY_NV = 34524 # GL/glext.h:2238
GL_SHADER_CONSISTENT_NV = 34525 # GL/glext.h:2239
GL_TEXTURE_SHADER_NV = 34526 # GL/glext.h:2240
GL_SHADER_OPERATION_NV = 34527 # GL/glext.h:2241
GL_CULL_MODES_NV = 34528 # GL/glext.h:2242
GL_OFFSET_TEXTURE_MATRIX_NV = 34529 # GL/glext.h:2243
GL_OFFSET_TEXTURE_SCALE_NV = 34530 # GL/glext.h:2244
GL_OFFSET_TEXTURE_BIAS_NV = 34531 # GL/glext.h:2245
GL_OFFSET_TEXTURE_2D_MATRIX_NV = 34529 # GL/glext.h:2246
GL_OFFSET_TEXTURE_2D_SCALE_NV = 34530 # GL/glext.h:2247
GL_OFFSET_TEXTURE_2D_BIAS_NV = 34531 # GL/glext.h:2248
GL_PREVIOUS_TEXTURE_INPUT_NV = 34532 # GL/glext.h:2249
GL_CONST_EYE_NV = 34533 # GL/glext.h:2250
GL_PASS_THROUGH_NV = 34534 # GL/glext.h:2251
GL_CULL_FRAGMENT_NV = 34535 # GL/glext.h:2252
GL_OFFSET_TEXTURE_2D_NV = 34536 # GL/glext.h:2253
GL_DEPENDENT_AR_TEXTURE_2D_NV = 34537 # GL/glext.h:2254
GL_DEPENDENT_GB_TEXTURE_2D_NV = 34538 # GL/glext.h:2255
GL_DOT_PRODUCT_NV = 34540 # GL/glext.h:2256
GL_DOT_PRODUCT_DEPTH_REPLACE_NV = 34541 # GL/glext.h:2257
GL_DOT_PRODUCT_TEXTURE_2D_NV = 34542 # GL/glext.h:2258
GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV = 34544 # GL/glext.h:2259
GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV = 34545 # GL/glext.h:2260
GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV = 34546 # GL/glext.h:2261
GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV = 34547 # GL/glext.h:2262
GL_HILO_NV = 34548 # GL/glext.h:2263
GL_DSDT_NV = 34549 # GL/glext.h:2264
GL_DSDT_MAG_NV = 34550 # GL/glext.h:2265
GL_DSDT_MAG_VIB_NV = 34551 # GL/glext.h:2266
GL_HILO16_NV = 34552 # GL/glext.h:2267
GL_SIGNED_HILO_NV = 34553 # GL/glext.h:2268
GL_SIGNED_HILO16_NV = 34554 # GL/glext.h:2269
GL_SIGNED_RGBA_NV = 34555 # GL/glext.h:2270
GL_SIGNED_RGBA8_NV = 34556 # GL/glext.h:2271
GL_SIGNED_RGB_NV = 34558 # GL/glext.h:2272
GL_SIGNED_RGB8_NV = 34559 # GL/glext.h:2273
GL_SIGNED_LUMINANCE_NV = 34561 # GL/glext.h:2274
GL_SIGNED_LUMINANCE8_NV = 34562 # GL/glext.h:2275
GL_SIGNED_LUMINANCE_ALPHA_NV = 34563 # GL/glext.h:2276
GL_SIGNED_LUMINANCE8_ALPHA8_NV = 34564 # GL/glext.h:2277
GL_SIGNED_ALPHA_NV = 34565 # GL/glext.h:2278
GL_SIGNED_ALPHA8_NV = 34566 # GL/glext.h:2279
GL_SIGNED_INTENSITY_NV = 34567 # GL/glext.h:2280
GL_SIGNED_INTENSITY8_NV = 34568 # GL/glext.h:2281
GL_DSDT8_NV = 34569 # GL/glext.h:2282
GL_DSDT8_MAG8_NV = 34570 # GL/glext.h:2283
GL_DSDT8_MAG8_INTENSITY8_NV = 34571 # GL/glext.h:2284
GL_SIGNED_RGB_UNSIGNED_ALPHA_NV = 34572 # GL/glext.h:2285
GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV = 34573 # GL/glext.h:2286
GL_HI_SCALE_NV = 34574 # GL/glext.h:2287
GL_LO_SCALE_NV = 34575 # GL/glext.h:2288
GL_DS_SCALE_NV = 34576 # GL/glext.h:2289
GL_DT_SCALE_NV = 34577 # GL/glext.h:2290
GL_MAGNITUDE_SCALE_NV = 34578 # GL/glext.h:2291
GL_VIBRANCE_SCALE_NV = 34579 # GL/glext.h:2292
GL_HI_BIAS_NV = 34580 # GL/glext.h:2293
GL_LO_BIAS_NV = 34581 # GL/glext.h:2294
GL_DS_BIAS_NV = 34582 # GL/glext.h:2295
GL_DT_BIAS_NV = 34583 # GL/glext.h:2296
GL_MAGNITUDE_BIAS_NV = 34584 # GL/glext.h:2297
GL_VIBRANCE_BIAS_NV = 34585 # GL/glext.h:2298
GL_TEXTURE_BORDER_VALUES_NV = 34586 # GL/glext.h:2299
GL_TEXTURE_HI_SIZE_NV = 34587 # GL/glext.h:2300
GL_TEXTURE_LO_SIZE_NV = 34588 # GL/glext.h:2301
GL_TEXTURE_DS_SIZE_NV = 34589 # GL/glext.h:2302
GL_TEXTURE_DT_SIZE_NV = 34590 # GL/glext.h:2303
GL_TEXTURE_MAG_SIZE_NV = 34591 # GL/glext.h:2304
# NV_texture_shader2 (GL/glext.h:2307)
GL_DOT_PRODUCT_TEXTURE_3D_NV = 34543 # GL/glext.h:2308
# NV_vertex_array_range2 (GL/glext.h:2311)
GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV = 34099 # GL/glext.h:2312
# NV_vertex_program (GL/glext.h:2315)
GL_VERTEX_PROGRAM_NV = 34336 # GL/glext.h:2316
GL_VERTEX_STATE_PROGRAM_NV = 34337 # GL/glext.h:2317
GL_ATTRIB_ARRAY_SIZE_NV = 34339 # GL/glext.h:2318
GL_ATTRIB_ARRAY_STRIDE_NV = 34340 # GL/glext.h:2319
GL_ATTRIB_ARRAY_TYPE_NV = 34341 # GL/glext.h:2320
GL_CURRENT_ATTRIB_NV = 34342 # GL/glext.h:2321
GL_PROGRAM_LENGTH_NV = 34343 # GL/glext.h:2322
GL_PROGRAM_STRING_NV = 34344 # GL/glext.h:2323
GL_MODELVIEW_PROJECTION_NV = 34345 # GL/glext.h:2324
GL_IDENTITY_NV = 34346 # GL/glext.h:2325
GL_INVERSE_NV = 34347 # GL/glext.h:2326
GL_TRANSPOSE_NV = 34348 # GL/glext.h:2327
GL_INVERSE_TRANSPOSE_NV = 34349 # GL/glext.h:2328
GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV = 34350 # GL/glext.h:2329
GL_MAX_TRACK_MATRICES_NV = 34351 # GL/glext.h:2330
GL_MATRIX0_NV = 34352 # GL/glext.h:2331
GL_MATRIX1_NV = 34353 # GL/glext.h:2332
GL_MATRIX2_NV = 34354 # GL/glext.h:2333
GL_MATRIX3_NV = 34355 # GL/glext.h:2334
GL_MATRIX4_NV = 34356 # GL/glext.h:2335
GL_MATRIX5_NV = 34357 # GL/glext.h:2336
GL_MATRIX6_NV = 34358 # GL/glext.h:2337
GL_MATRIX7_NV = 34359 # GL/glext.h:2338
GL_CURRENT_MATRIX_STACK_DEPTH_NV = 34368 # GL/glext.h:2339
GL_CURRENT_MATRIX_NV = 34369 # GL/glext.h:2340
GL_VERTEX_PROGRAM_POINT_SIZE_NV = 34370 # GL/glext.h:2341
GL_VERTEX_PROGRAM_TWO_SIDE_NV = 34371 # GL/glext.h:2342
GL_PROGRAM_PARAMETER_NV = 34372 # GL/glext.h:2343
GL_ATTRIB_ARRAY_POINTER_NV = 34373 # GL/glext.h:2344
GL_PROGRAM_TARGET_NV = 34374 # GL/glext.h:2345
GL_PROGRAM_RESIDENT_NV = 34375 # GL/glext.h:2346
GL_TRACK_MATRIX_NV = 34376 # GL/glext.h:2347
GL_TRACK_MATRIX_TRANSFORM_NV = 34377 # GL/glext.h:2348
GL_VERTEX_PROGRAM_BINDING_NV = 34378 # GL/glext.h:2349
GL_PROGRAM_ERROR_POSITION_NV = 34379 # GL/glext.h:2350
GL_VERTEX_ATTRIB_ARRAY0_NV = 34384 # GL/glext.h:2351
GL_VERTEX_ATTRIB_ARRAY1_NV = 34385 # GL/glext.h:2352
GL_VERTEX_ATTRIB_ARRAY2_NV = 34386 # GL/glext.h:2353
GL_VERTEX_ATTRIB_ARRAY3_NV = 34387 # GL/glext.h:2354
GL_VERTEX_ATTRIB_ARRAY4_NV = 34388 # GL/glext.h:2355
GL_VERTEX_ATTRIB_ARRAY5_NV = 34389 # GL/glext.h:2356
GL_VERTEX_ATTRIB_ARRAY6_NV = 34390 # GL/glext.h:2357
GL_VERTEX_ATTRIB_ARRAY7_NV = 34391 # GL/glext.h:2358
GL_VERTEX_ATTRIB_ARRAY8_NV = 34392 # GL/glext.h:2359
GL_VERTEX_ATTRIB_ARRAY9_NV = 34393 # GL/glext.h:2360
GL_VERTEX_ATTRIB_ARRAY10_NV = 34394 # GL/glext.h:2361
GL_VERTEX_ATTRIB_ARRAY11_NV = 34395 # GL/glext.h:2362
GL_VERTEX_ATTRIB_ARRAY12_NV = 34396 # GL/glext.h:2363
GL_VERTEX_ATTRIB_ARRAY13_NV = 34397 # GL/glext.h:2364
GL_VERTEX_ATTRIB_ARRAY14_NV = 34398 # GL/glext.h:2365
GL_VERTEX_ATTRIB_ARRAY15_NV = 34399 # GL/glext.h:2366
GL_MAP1_VERTEX_ATTRIB0_4_NV = 34400 # GL/glext.h:2367
GL_MAP1_VERTEX_ATTRIB1_4_NV = 34401 # GL/glext.h:2368
GL_MAP1_VERTEX_ATTRIB2_4_NV = 34402 # GL/glext.h:2369
GL_MAP1_VERTEX_ATTRIB3_4_NV = 34403 # GL/glext.h:2370
GL_MAP1_VERTEX_ATTRIB4_4_NV = 34404 # GL/glext.h:2371
GL_MAP1_VERTEX_ATTRIB5_4_NV = 34405 # GL/glext.h:2372
GL_MAP1_VERTEX_ATTRIB6_4_NV = 34406 # GL/glext.h:2373
GL_MAP1_VERTEX_ATTRIB7_4_NV = 34407 # GL/glext.h:2374
GL_MAP1_VERTEX_ATTRIB8_4_NV = 34408 # GL/glext.h:2375
GL_MAP1_VERTEX_ATTRIB9_4_NV = 34409 # GL/glext.h:2376
GL_MAP1_VERTEX_ATTRIB10_4_NV = 34410 # GL/glext.h:2377
GL_MAP1_VERTEX_ATTRIB11_4_NV = 34411 # GL/glext.h:2378
GL_MAP1_VERTEX_ATTRIB12_4_NV = 34412 # GL/glext.h:2379
GL_MAP1_VERTEX_ATTRIB13_4_NV = 34413 # GL/glext.h:2380
GL_MAP1_VERTEX_ATTRIB14_4_NV = 34414 # GL/glext.h:2381
GL_MAP1_VERTEX_ATTRIB15_4_NV = 34415 # GL/glext.h:2382
GL_MAP2_VERTEX_ATTRIB0_4_NV = 34416 # GL/glext.h:2383
GL_MAP2_VERTEX_ATTRIB1_4_NV = 34417 # GL/glext.h:2384
GL_MAP2_VERTEX_ATTRIB2_4_NV = 34418 # GL/glext.h:2385
GL_MAP2_VERTEX_ATTRIB3_4_NV = 34419 # GL/glext.h:2386
GL_MAP2_VERTEX_ATTRIB4_4_NV = 34420 # GL/glext.h:2387
GL_MAP2_VERTEX_ATTRIB5_4_NV = 34421 # GL/glext.h:2388
GL_MAP2_VERTEX_ATTRIB6_4_NV = 34422 # GL/glext.h:2389
GL_MAP2_VERTEX_ATTRIB7_4_NV = 34423 # GL/glext.h:2390
GL_MAP2_VERTEX_ATTRIB8_4_NV = 34424 # GL/glext.h:2391
GL_MAP2_VERTEX_ATTRIB9_4_NV = 34425 # GL/glext.h:2392
GL_MAP2_VERTEX_ATTRIB10_4_NV = 34426 # GL/glext.h:2393
GL_MAP2_VERTEX_ATTRIB11_4_NV = 34427 # GL/glext.h:2394
GL_MAP2_VERTEX_ATTRIB12_4_NV = 34428 # GL/glext.h:2395
GL_MAP2_VERTEX_ATTRIB13_4_NV = 34429 # GL/glext.h:2396
GL_MAP2_VERTEX_ATTRIB14_4_NV = 34430 # GL/glext.h:2397
GL_MAP2_VERTEX_ATTRIB15_4_NV = 34431 # GL/glext.h:2398
# SGIX_texture_coordinate_clamp (GL/glext.h:2401)
GL_TEXTURE_MAX_CLAMP_S_SGIX = 33641 # GL/glext.h:2402
GL_TEXTURE_MAX_CLAMP_T_SGIX = 33642 # GL/glext.h:2403
GL_TEXTURE_MAX_CLAMP_R_SGIX = 33643 # GL/glext.h:2404
# SGIX_scalebias_hint (GL/glext.h:2407)
GL_SCALEBIAS_HINT_SGIX = 33570 # GL/glext.h:2408
# OML_interlace (GL/glext.h:2411)
GL_INTERLACE_OML = 35200 # GL/glext.h:2412
GL_INTERLACE_READ_OML = 35201 # GL/glext.h:2413
# OML_subsample (GL/glext.h:2416)
GL_FORMAT_SUBSAMPLE_24_24_OML = 35202 # GL/glext.h:2417
GL_FORMAT_SUBSAMPLE_244_244_OML = 35203 # GL/glext.h:2418
# OML_resample (GL/glext.h:2421)
GL_PACK_RESAMPLE_OML = 35204 # GL/glext.h:2422
GL_UNPACK_RESAMPLE_OML = 35205 # GL/glext.h:2423
GL_RESAMPLE_REPLICATE_OML = 35206 # GL/glext.h:2424
GL_RESAMPLE_ZERO_FILL_OML = 35207 # GL/glext.h:2425
GL_RESAMPLE_AVERAGE_OML = 35208 # GL/glext.h:2426
GL_RESAMPLE_DECIMATE_OML = 35209 # GL/glext.h:2427
# NV_copy_depth_to_color (GL/glext.h:2430)
GL_DEPTH_STENCIL_TO_RGBA_NV = 34926 # GL/glext.h:2431
GL_DEPTH_STENCIL_TO_BGRA_NV = 34927 # GL/glext.h:2432
# ATI_envmap_bumpmap (GL/glext.h:2435)
GL_BUMP_ROT_MATRIX_ATI = 34677 # GL/glext.h:2436
GL_BUMP_ROT_MATRIX_SIZE_ATI = 34678 # GL/glext.h:2437
GL_BUMP_NUM_TEX_UNITS_ATI = 34679 # GL/glext.h:2438
GL_BUMP_TEX_UNITS_ATI = 34680 # GL/glext.h:2439
GL_DUDV_ATI = 34681 # GL/glext.h:2440
GL_DU8DV8_ATI = 34682 # GL/glext.h:2441
GL_BUMP_ENVMAP_ATI = 34683 # GL/glext.h:2442
GL_BUMP_TARGET_ATI = 34684 # GL/glext.h:2443
# ATI_fragment_shader (GL/glext.h:2446)
GL_FRAGMENT_SHADER_ATI = 35104 # GL/glext.h:2447
GL_REG_0_ATI = 35105 # GL/glext.h:2448
GL_REG_1_ATI = 35106 # GL/glext.h:2449
GL_REG_2_ATI = 35107 # GL/glext.h:2450
GL_REG_3_ATI = 35108 # GL/glext.h:2451
GL_REG_4_ATI = 35109 # GL/glext.h:2452
GL_REG_5_ATI = 35110 # GL/glext.h:2453
GL_REG_6_ATI = 35111 # GL/glext.h:2454
GL_REG_7_ATI = 35112 # GL/glext.h:2455
GL_REG_8_ATI = 35113 # GL/glext.h:2456
GL_REG_9_ATI = 35114 # GL/glext.h:2457
GL_REG_10_ATI = 35115 # GL/glext.h:2458
GL_REG_11_ATI = 35116 # GL/glext.h:2459
GL_REG_12_ATI = 35117 # GL/glext.h:2460
GL_REG_13_ATI = 35118 # GL/glext.h:2461
GL_REG_14_ATI = 35119 # GL/glext.h:2462
GL_REG_15_ATI = 35120 # GL/glext.h:2463
GL_REG_16_ATI = 35121 # GL/glext.h:2464
GL_REG_17_ATI = 35122 # GL/glext.h:2465
GL_REG_18_ATI = 35123 # GL/glext.h:2466
GL_REG_19_ATI = 35124 # GL/glext.h:2467
GL_REG_20_ATI = 35125 # GL/glext.h:2468
GL_REG_21_ATI = 35126 # GL/glext.h:2469
GL_REG_22_ATI = 35127 # GL/glext.h:2470
GL_REG_23_ATI = 35128 # GL/glext.h:2471
GL_REG_24_ATI = 35129 # GL/glext.h:2472
GL_REG_25_ATI = 35130 # GL/glext.h:2473
GL_REG_26_ATI = 35131 # GL/glext.h:2474
GL_REG_27_ATI = 35132 # GL/glext.h:2475
GL_REG_28_ATI = 35133 # GL/glext.h:2476
GL_REG_29_ATI = 35134 # GL/glext.h:2477
GL_REG_30_ATI = 35135 # GL/glext.h:2478
GL_REG_31_ATI = 35136 # GL/glext.h:2479
GL_CON_0_ATI = 35137 # GL/glext.h:2480
GL_CON_1_ATI = 35138 # GL/glext.h:2481
GL_CON_2_ATI = 35139 # GL/glext.h:2482
GL_CON_3_ATI = 35140 # GL/glext.h:2483
GL_CON_4_ATI = 35141 # GL/glext.h:2484
GL_CON_5_ATI = 35142 # GL/glext.h:2485
GL_CON_6_ATI = 35143 # GL/glext.h:2486
GL_CON_7_ATI = 35144 # GL/glext.h:2487
GL_CON_8_ATI = 35145 # GL/glext.h:2488
GL_CON_9_ATI = 35146 # GL/glext.h:2489
GL_CON_10_ATI = 35147 # GL/glext.h:2490
GL_CON_11_ATI = 35148 # GL/glext.h:2491
GL_CON_12_ATI = 35149 # GL/glext.h:2492
GL_CON_13_ATI = 35150 # GL/glext.h:2493
GL_CON_14_ATI = 35151 # GL/glext.h:2494
GL_CON_15_ATI = 35152 # GL/glext.h:2495
GL_CON_16_ATI = 35153 # GL/glext.h:2496
GL_CON_17_ATI = 35154 # GL/glext.h:2497
GL_CON_18_ATI = 35155 # GL/glext.h:2498
GL_CON_19_ATI = 35156 # GL/glext.h:2499
GL_CON_20_ATI = 35157 # GL/glext.h:2500
GL_CON_21_ATI = 35158 # GL/glext.h:2501
GL_CON_22_ATI = 35159 # GL/glext.h:2502
GL_CON_23_ATI = 35160 # GL/glext.h:2503
GL_CON_24_ATI = 35161 # GL/glext.h:2504
GL_CON_25_ATI = 35162 # GL/glext.h:2505
GL_CON_26_ATI = 35163 # GL/glext.h:2506
GL_CON_27_ATI = 35164 # GL/glext.h:2507
GL_CON_28_ATI = 35165 # GL/glext.h:2508
GL_CON_29_ATI = 35166 # GL/glext.h:2509
GL_CON_30_ATI = 35167 # GL/glext.h:2510
GL_CON_31_ATI = 35168 # GL/glext.h:2511
GL_MOV_ATI = 35169 # GL/glext.h:2512
GL_ADD_ATI = 35171 # GL/glext.h:2513
GL_MUL_ATI = 35172 # GL/glext.h:2514
GL_SUB_ATI = 35173 # GL/glext.h:2515
GL_DOT3_ATI = 35174 # GL/glext.h:2516
GL_DOT4_ATI = 35175 # GL/glext.h:2517
GL_MAD_ATI = 35176 # GL/glext.h:2518
GL_LERP_ATI = 35177 # GL/glext.h:2519
GL_CND_ATI = 35178 # GL/glext.h:2520
GL_CND0_ATI = 35179 # GL/glext.h:2521
GL_DOT2_ADD_ATI = 35180 # GL/glext.h:2522
GL_SECONDARY_INTERPOLATOR_ATI = 35181 # GL/glext.h:2523
GL_NUM_FRAGMENT_REGISTERS_ATI = 35182 # GL/glext.h:2524
GL_NUM_FRAGMENT_CONSTANTS_ATI = 35183 # GL/glext.h:2525
GL_NUM_PASSES_ATI = 35184 # GL/glext.h:2526
GL_NUM_INSTRUCTIONS_PER_PASS_ATI = 35185 # GL/glext.h:2527
GL_NUM_INSTRUCTIONS_TOTAL_ATI = 35186 # GL/glext.h:2528
GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI = 35187 # GL/glext.h:2529
GL_NUM_LOOPBACK_COMPONENTS_ATI = 35188 # GL/glext.h:2530
GL_COLOR_ALPHA_PAIRING_ATI = 35189 # GL/glext.h:2531
GL_SWIZZLE_STR_ATI = 35190 # GL/glext.h:2532
GL_SWIZZLE_STQ_ATI = 35191 # GL/glext.h:2533
GL_SWIZZLE_STR_DR_ATI = 35192 # GL/glext.h:2534
GL_SWIZZLE_STQ_DQ_ATI = 35193 # GL/glext.h:2535
GL_SWIZZLE_STRQ_ATI = 35194 # GL/glext.h:2536
GL_SWIZZLE_STRQ_DQ_ATI = 35195 # GL/glext.h:2537
GL_RED_BIT_ATI = 1 # GL/glext.h:2538
GL_GREEN_BIT_ATI = 2 # GL/glext.h:2539
GL_BLUE_BIT_ATI = 4 # GL/glext.h:2540
GL_2X_BIT_ATI = 1 # GL/glext.h:2541
GL_4X_BIT_ATI = 2 # GL/glext.h:2542
GL_8X_BIT_ATI = 4 # GL/glext.h:2543
GL_HALF_BIT_ATI = 8 # GL/glext.h:2544
GL_QUARTER_BIT_ATI = 16 # GL/glext.h:2545
GL_EIGHTH_BIT_ATI = 32 # GL/glext.h:2546
GL_SATURATE_BIT_ATI = 64 # GL/glext.h:2547
GL_COMP_BIT_ATI = 2 # GL/glext.h:2548
GL_NEGATE_BIT_ATI = 4 # GL/glext.h:2549
GL_BIAS_BIT_ATI = 8 # GL/glext.h:2550
# ATI_pn_triangles (GL/glext.h:2553)
GL_PN_TRIANGLES_ATI = 34800 # GL/glext.h:2554
GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI = 34801 # GL/glext.h:2555
GL_PN_TRIANGLES_POINT_MODE_ATI = 34802 # GL/glext.h:2556
GL_PN_TRIANGLES_NORMAL_MODE_ATI = 34803 # GL/glext.h:2557
GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI = 34804 # GL/glext.h:2558
GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI = 34805 # GL/glext.h:2559
GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI = 34806 # GL/glext.h:2560
GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI = 34807 # GL/glext.h:2561
GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI = 34808 # GL/glext.h:2562
# ATI_vertex_array_object (GL/glext.h:2565)
GL_STATIC_ATI = 34656 # GL/glext.h:2566
GL_DYNAMIC_ATI = 34657 # GL/glext.h:2567
GL_PRESERVE_ATI = 34658 # GL/glext.h:2568
GL_DISCARD_ATI = 34659 # GL/glext.h:2569
GL_OBJECT_BUFFER_SIZE_ATI = 34660 # GL/glext.h:2570
GL_OBJECT_BUFFER_USAGE_ATI = 34661 # GL/glext.h:2571
GL_ARRAY_OBJECT_BUFFER_ATI = 34662 # GL/glext.h:2572
GL_ARRAY_OBJECT_OFFSET_ATI = 34663 # GL/glext.h:2573
# EXT_vertex_shader (GL/glext.h:2576)
GL_VERTEX_SHADER_EXT = 34688 # GL/glext.h:2577
GL_VERTEX_SHADER_BINDING_EXT = 34689 # GL/glext.h:2578
GL_OP_INDEX_EXT = 34690 # GL/glext.h:2579
GL_OP_NEGATE_EXT = 34691 # GL/glext.h:2580
GL_OP_DOT3_EXT = 34692 # GL/glext.h:2581
GL_OP_DOT4_EXT = 34693 # GL/glext.h:2582
GL_OP_MUL_EXT = 34694 # GL/glext.h:2583
GL_OP_ADD_EXT = 34695 # GL/glext.h:2584
GL_OP_MADD_EXT = 34696 # GL/glext.h:2585
GL_OP_FRAC_EXT = 34697 # GL/glext.h:2586
GL_OP_MAX_EXT = 34698 # GL/glext.h:2587
GL_OP_MIN_EXT = 34699 # GL/glext.h:2588
GL_OP_SET_GE_EXT = 34700 # GL/glext.h:2589
GL_OP_SET_LT_EXT = 34701 # GL/glext.h:2590
GL_OP_CLAMP_EXT = 34702 # GL/glext.h:2591
GL_OP_FLOOR_EXT = 34703 # GL/glext.h:2592
GL_OP_ROUND_EXT = 34704 # GL/glext.h:2593
GL_OP_EXP_BASE_2_EXT = 34705 # GL/glext.h:2594
GL_OP_LOG_BASE_2_EXT = 34706 # GL/glext.h:2595
GL_OP_POWER_EXT = 34707 # GL/glext.h:2596
GL_OP_RECIP_EXT = 34708 # GL/glext.h:2597
GL_OP_RECIP_SQRT_EXT = 34709 # GL/glext.h:2598
GL_OP_SUB_EXT = 34710 # GL/glext.h:2599
GL_OP_CROSS_PRODUCT_EXT = 34711 # GL/glext.h:2600
GL_OP_MULTIPLY_MATRIX_EXT = 34712 # GL/glext.h:2601
GL_OP_MOV_EXT = 34713 # GL/glext.h:2602
GL_OUTPUT_VERTEX_EXT = 34714 # GL/glext.h:2603
GL_OUTPUT_COLOR0_EXT = 34715 # GL/glext.h:2604
GL_OUTPUT_COLOR1_EXT = 34716 # GL/glext.h:2605
GL_OUTPUT_TEXTURE_COORD0_EXT = 34717 # GL/glext.h:2606
GL_OUTPUT_TEXTURE_COORD1_EXT = 34718 # GL/glext.h:2607
GL_OUTPUT_TEXTURE_COORD2_EXT = 34719 # GL/glext.h:2608
GL_OUTPUT_TEXTURE_COORD3_EXT = 34720 # GL/glext.h:2609
GL_OUTPUT_TEXTURE_COORD4_EXT = 34721 # GL/glext.h:2610
GL_OUTPUT_TEXTURE_COORD5_EXT = 34722 # GL/glext.h:2611
GL_OUTPUT_TEXTURE_COORD6_EXT = 34723 # GL/glext.h:2612
GL_OUTPUT_TEXTURE_COORD7_EXT = 34724 # GL/glext.h:2613
GL_OUTPUT_TEXTURE_COORD8_EXT = 34725 # GL/glext.h:2614
GL_OUTPUT_TEXTURE_COORD9_EXT = 34726 # GL/glext.h:2615
GL_OUTPUT_TEXTURE_COORD10_EXT = 34727 # GL/glext.h:2616
GL_OUTPUT_TEXTURE_COORD11_EXT = 34728 # GL/glext.h:2617
GL_OUTPUT_TEXTURE_COORD12_EXT = 34729 # GL/glext.h:2618
GL_OUTPUT_TEXTURE_COORD13_EXT = 34730 # GL/glext.h:2619
GL_OUTPUT_TEXTURE_COORD14_EXT = 34731 # GL/glext.h:2620
GL_OUTPUT_TEXTURE_COORD15_EXT = 34732 # GL/glext.h:2621
GL_OUTPUT_TEXTURE_COORD16_EXT = 34733 # GL/glext.h:2622
GL_OUTPUT_TEXTURE_COORD17_EXT = 34734 # GL/glext.h:2623
GL_OUTPUT_TEXTURE_COORD18_EXT = 34735 # GL/glext.h:2624
GL_OUTPUT_TEXTURE_COORD19_EXT = 34736 # GL/glext.h:2625
GL_OUTPUT_TEXTURE_COORD20_EXT = 34737 # GL/glext.h:2626
GL_OUTPUT_TEXTURE_COORD21_EXT = 34738 # GL/glext.h:2627
GL_OUTPUT_TEXTURE_COORD22_EXT = 34739 # GL/glext.h:2628
GL_OUTPUT_TEXTURE_COORD23_EXT = 34740 # GL/glext.h:2629
GL_OUTPUT_TEXTURE_COORD24_EXT = 34741 # GL/glext.h:2630
GL_OUTPUT_TEXTURE_COORD25_EXT = 34742 # GL/glext.h:2631
GL_OUTPUT_TEXTURE_COORD26_EXT = 34743 # GL/glext.h:2632
GL_OUTPUT_TEXTURE_COORD27_EXT = 34744 # GL/glext.h:2633
GL_OUTPUT_TEXTURE_COORD28_EXT = 34745 # GL/glext.h:2634
GL_OUTPUT_TEXTURE_COORD29_EXT = 34746 # GL/glext.h:2635
GL_OUTPUT_TEXTURE_COORD30_EXT = 34747 # GL/glext.h:2636
GL_OUTPUT_TEXTURE_COORD31_EXT = 34748 # GL/glext.h:2637
GL_OUTPUT_FOG_EXT = 34749 # GL/glext.h:2638
GL_SCALAR_EXT = 34750 # GL/glext.h:2639
GL_VECTOR_EXT = 34751 # GL/glext.h:2640
GL_MATRIX_EXT = 34752 # GL/glext.h:2641
GL_VARIANT_EXT = 34753 # GL/glext.h:2642
GL_INVARIANT_EXT = 34754 # GL/glext.h:2643
GL_LOCAL_CONSTANT_EXT = 34755 # GL/glext.h:2644
GL_LOCAL_EXT = 34756 # GL/glext.h:2645
GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT = 34757 # GL/glext.h:2646
GL_MAX_VERTEX_SHADER_VARIANTS_EXT = 34758 # GL/glext.h:2647
GL_MAX_VERTEX_SHADER_INVARIANTS_EXT = 34759 # GL/glext.h:2648
GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT = 34760 # GL/glext.h:2649
GL_MAX_VERTEX_SHADER_LOCALS_EXT = 34761 # GL/glext.h:2650
GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT = 34762 # GL/glext.h:2651
GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT = 34763 # GL/glext.h:2652
GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT = 34764 # GL/glext.h:2653
GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT = 34765 # GL/glext.h:2654
GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT = 34766 # GL/glext.h:2655
GL_VERTEX_SHADER_INSTRUCTIONS_EXT = 34767 # GL/glext.h:2656
GL_VERTEX_SHADER_VARIANTS_EXT = 34768 # GL/glext.h:2657
GL_VERTEX_SHADER_INVARIANTS_EXT = 34769 # GL/glext.h:2658
GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT = 34770 # GL/glext.h:2659
GL_VERTEX_SHADER_LOCALS_EXT = 34771 # GL/glext.h:2660
GL_VERTEX_SHADER_OPTIMIZED_EXT = 34772 # GL/glext.h:2661
GL_X_EXT = 34773 # GL/glext.h:2662
GL_Y_EXT = 34774 # GL/glext.h:2663
GL_Z_EXT = 34775 # GL/glext.h:2664
GL_W_EXT = 34776 # GL/glext.h:2665
GL_NEGATIVE_X_EXT = 34777 # GL/glext.h:2666
GL_NEGATIVE_Y_EXT = 34778 # GL/glext.h:2667
GL_NEGATIVE_Z_EXT = 34779 # GL/glext.h:2668
GL_NEGATIVE_W_EXT = 34780 # GL/glext.h:2669
GL_ZERO_EXT = 34781 # GL/glext.h:2670
GL_ONE_EXT = 34782 # GL/glext.h:2671
GL_NEGATIVE_ONE_EXT = 34783 # GL/glext.h:2672
GL_NORMALIZED_RANGE_EXT = 34784 # GL/glext.h:2673
GL_FULL_RANGE_EXT = 34785 # GL/glext.h:2674
GL_CURRENT_VERTEX_EXT = 34786 # GL/glext.h:2675
GL_MVP_MATRIX_EXT = 34787 # GL/glext.h:2676
GL_VARIANT_VALUE_EXT = 34788 # GL/glext.h:2677
GL_VARIANT_DATATYPE_EXT = 34789 # GL/glext.h:2678
GL_VARIANT_ARRAY_STRIDE_EXT = 34790 # GL/glext.h:2679
GL_VARIANT_ARRAY_TYPE_EXT = 34791 # GL/glext.h:2680
GL_VARIANT_ARRAY_EXT = 34792 # GL/glext.h:2681
GL_VARIANT_ARRAY_POINTER_EXT = 34793 # GL/glext.h:2682
GL_INVARIANT_VALUE_EXT = 34794 # GL/glext.h:2683
GL_INVARIANT_DATATYPE_EXT = 34795 # GL/glext.h:2684
GL_LOCAL_CONSTANT_VALUE_EXT = 34796 # GL/glext.h:2685
GL_LOCAL_CONSTANT_DATATYPE_EXT = 34797 # GL/glext.h:2686
# ATI_vertex_streams (GL/glext.h:2689)
GL_MAX_VERTEX_STREAMS_ATI = 34667 # GL/glext.h:2690
GL_VERTEX_STREAM0_ATI = 34668 # GL/glext.h:2691
GL_VERTEX_STREAM1_ATI = 34669 # GL/glext.h:2692
GL_VERTEX_STREAM2_ATI = 34670 # GL/glext.h:2693
GL_VERTEX_STREAM3_ATI = 34671 # GL/glext.h:2694
GL_VERTEX_STREAM4_ATI = 34672 # GL/glext.h:2695
GL_VERTEX_STREAM5_ATI = 34673 # GL/glext.h:2696
GL_VERTEX_STREAM6_ATI = 34674 # GL/glext.h:2697
GL_VERTEX_STREAM7_ATI = 34675 # GL/glext.h:2698
GL_VERTEX_SOURCE_ATI = 34676 # GL/glext.h:2699
# ATI_element_array (GL/glext.h:2702)
GL_ELEMENT_ARRAY_ATI = 34664 # GL/glext.h:2703
GL_ELEMENT_ARRAY_TYPE_ATI = 34665 # GL/glext.h:2704
GL_ELEMENT_ARRAY_POINTER_ATI = 34666 # GL/glext.h:2705
# SUN_mesh_array (GL/glext.h:2708)
GL_QUAD_MESH_SUN = 34324 # GL/glext.h:2709
GL_TRIANGLE_MESH_SUN = 34325 # GL/glext.h:2710
# SUN_slice_accum (GL/glext.h:2713)
GL_SLICE_ACCUM_SUN = 34252 # GL/glext.h:2714
# NV_multisample_filter_hint (GL/glext.h:2717)
GL_MULTISAMPLE_FILTER_HINT_NV = 34100 # GL/glext.h:2718
# NV_depth_clamp (GL/glext.h:2721)
GL_DEPTH_CLAMP_NV = 34383 # GL/glext.h:2722
# NV_occlusion_query (GL/glext.h:2725)
GL_PIXEL_COUNTER_BITS_NV = 34916 # GL/glext.h:2726
GL_CURRENT_OCCLUSION_QUERY_ID_NV = 34917 # GL/glext.h:2727
GL_PIXEL_COUNT_NV = 34918 # GL/glext.h:2728
GL_PIXEL_COUNT_AVAILABLE_NV = 34919 # GL/glext.h:2729
# NV_point_sprite (GL/glext.h:2732)
GL_POINT_SPRITE_NV = 34913 # GL/glext.h:2733
GL_COORD_REPLACE_NV = 34914 # GL/glext.h:2734
GL_POINT_SPRITE_R_MODE_NV = 34915 # GL/glext.h:2735
# NV_texture_shader3 (GL/glext.h:2738)
GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV = 34896 # GL/glext.h:2739
GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV = 34897 # GL/glext.h:2740
GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV = 34898 # GL/glext.h:2741
GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV = 34899 # GL/glext.h:2742
GL_OFFSET_HILO_TEXTURE_2D_NV = 34900 # GL/glext.h:2743
GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV = 34901 # GL/glext.h:2744
GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV = 34902 # GL/glext.h:2745
GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV = 34903 # GL/glext.h:2746
GL_DEPENDENT_HILO_TEXTURE_2D_NV = 34904 # GL/glext.h:2747
GL_DEPENDENT_RGB_TEXTURE_3D_NV = 34905 # GL/glext.h:2748
GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV = 34906 # GL/glext.h:2749
GL_DOT_PRODUCT_PASS_THROUGH_NV = 34907 # GL/glext.h:2750
GL_DOT_PRODUCT_TEXTURE_1D_NV = 34908 # GL/glext.h:2751
GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV = 34909 # GL/glext.h:2752
GL_HILO8_NV = 34910 # GL/glext.h:2753
GL_SIGNED_HILO8_NV = 34911 # GL/glext.h:2754
GL_FORCE_BLUE_TO_ONE_NV = 34912 # GL/glext.h:2755
# NV_vertex_program1_1 (GL/glext.h:2758)
# EXT_shadow_funcs (GL/glext.h:2761)
# EXT_stencil_two_side (GL/glext.h:2764)
GL_STENCIL_TEST_TWO_SIDE_EXT = 35088 # GL/glext.h:2765
GL_ACTIVE_STENCIL_FACE_EXT = 35089 # GL/glext.h:2766
# ATI_text_fragment_shader (GL/glext.h:2769)
GL_TEXT_FRAGMENT_SHADER_ATI = 33280 # GL/glext.h:2770
# APPLE_client_storage (GL/glext.h:2773)
GL_UNPACK_CLIENT_STORAGE_APPLE = 34226 # GL/glext.h:2774
# APPLE_element_array (GL/glext.h:2777)
GL_ELEMENT_ARRAY_APPLE = 34664 # GL/glext.h:2778
GL_ELEMENT_ARRAY_TYPE_APPLE = 34665 # GL/glext.h:2779
GL_ELEMENT_ARRAY_POINTER_APPLE = 34666 # GL/glext.h:2780
# APPLE_fence (GL/glext.h:2783)
GL_DRAW_PIXELS_APPLE = 35338 # GL/glext.h:2784
GL_FENCE_APPLE = 35339 # GL/glext.h:2785
# APPLE_vertex_array_object (GL/glext.h:2788)
GL_VERTEX_ARRAY_BINDING_APPLE = 34229 # GL/glext.h:2789
# APPLE_vertex_array_range (GL/glext.h:2792)
GL_VERTEX_ARRAY_RANGE_APPLE = 34077 # GL/glext.h:2793
GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE = 34078 # GL/glext.h:2794
GL_VERTEX_ARRAY_STORAGE_HINT_APPLE = 34079 # GL/glext.h:2795
GL_VERTEX_ARRAY_RANGE_POINTER_APPLE = 34081 # GL/glext.h:2796
GL_STORAGE_CACHED_APPLE = 34238 # GL/glext.h:2797
GL_STORAGE_SHARED_APPLE = 34239 # GL/glext.h:2798
# APPLE_ycbcr_422 (GL/glext.h:2801)
GL_YCBCR_422_APPLE = 34233 # GL/glext.h:2802
GL_UNSIGNED_SHORT_8_8_APPLE = 34234 # GL/glext.h:2803
GL_UNSIGNED_SHORT_8_8_REV_APPLE = 34235 # GL/glext.h:2804
# S3_s3tc (GL/glext.h:2807)
GL_RGB_S3TC = 33696 # GL/glext.h:2808
GL_RGB4_S3TC = 33697 # GL/glext.h:2809
GL_RGBA_S3TC = 33698 # GL/glext.h:2810
GL_RGBA4_S3TC = 33699 # GL/glext.h:2811
# ATI_draw_buffers (GL/glext.h:2814)
GL_MAX_DRAW_BUFFERS_ATI = 34852 # GL/glext.h:2815
GL_DRAW_BUFFER0_ATI = 34853 # GL/glext.h:2816
GL_DRAW_BUFFER1_ATI = 34854 # GL/glext.h:2817
GL_DRAW_BUFFER2_ATI = 34855 # GL/glext.h:2818
GL_DRAW_BUFFER3_ATI = 34856 # GL/glext.h:2819
GL_DRAW_BUFFER4_ATI = 34857 # GL/glext.h:2820
GL_DRAW_BUFFER5_ATI = 34858 # GL/glext.h:2821
GL_DRAW_BUFFER6_ATI = 34859 # GL/glext.h:2822
GL_DRAW_BUFFER7_ATI = 34860 # GL/glext.h:2823
GL_DRAW_BUFFER8_ATI = 34861 # GL/glext.h:2824
GL_DRAW_BUFFER9_ATI = 34862 # GL/glext.h:2825
GL_DRAW_BUFFER10_ATI = 34863 # GL/glext.h:2826
GL_DRAW_BUFFER11_ATI = 34864 # GL/glext.h:2827
GL_DRAW_BUFFER12_ATI = 34865 # GL/glext.h:2828
GL_DRAW_BUFFER13_ATI = 34866 # GL/glext.h:2829
GL_DRAW_BUFFER14_ATI = 34867 # GL/glext.h:2830
GL_DRAW_BUFFER15_ATI = 34868 # GL/glext.h:2831
# ATI_pixel_format_float (GL/glext.h:2834)
GL_TYPE_RGBA_FLOAT_ATI = 34848 # GL/glext.h:2835
GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI = 34869 # GL/glext.h:2836
# ATI_texture_env_combine3 (GL/glext.h:2839)
GL_MODULATE_ADD_ATI = 34628 # GL/glext.h:2840
GL_MODULATE_SIGNED_ADD_ATI = 34629 # GL/glext.h:2841
GL_MODULATE_SUBTRACT_ATI = 34630 # GL/glext.h:2842
# ATI_texture_float (GL/glext.h:2845)
GL_RGBA_FLOAT32_ATI = 34836 # GL/glext.h:2846
GL_RGB_FLOAT32_ATI = 34837 # GL/glext.h:2847
GL_ALPHA_FLOAT32_ATI = 34838 # GL/glext.h:2848
GL_INTENSITY_FLOAT32_ATI = 34839 # GL/glext.h:2849
GL_LUMINANCE_FLOAT32_ATI = 34840 # GL/glext.h:2850
GL_LUMINANCE_ALPHA_FLOAT32_ATI = 34841 # GL/glext.h:2851
GL_RGBA_FLOAT16_ATI = 34842 # GL/glext.h:2852
GL_RGB_FLOAT16_ATI = 34843 # GL/glext.h:2853
GL_ALPHA_FLOAT16_ATI = 34844 # GL/glext.h:2854
GL_INTENSITY_FLOAT16_ATI = 34845 # GL/glext.h:2855
GL_LUMINANCE_FLOAT16_ATI = 34846 # GL/glext.h:2856
GL_LUMINANCE_ALPHA_FLOAT16_ATI = 34847 # GL/glext.h:2857
# NV_float_buffer (GL/glext.h:2860)
GL_FLOAT_R_NV = 34944 # GL/glext.h:2861
GL_FLOAT_RG_NV = 34945 # GL/glext.h:2862
GL_FLOAT_RGB_NV = 34946 # GL/glext.h:2863
GL_FLOAT_RGBA_NV = 34947 # GL/glext.h:2864
GL_FLOAT_R16_NV = 34948 # GL/glext.h:2865
GL_FLOAT_R32_NV = 34949 # GL/glext.h:2866
GL_FLOAT_RG16_NV = 34950 # GL/glext.h:2867
GL_FLOAT_RG32_NV = 34951 # GL/glext.h:2868
GL_FLOAT_RGB16_NV = 34952 # GL/glext.h:2869
GL_FLOAT_RGB32_NV = 34953 # GL/glext.h:2870
GL_FLOAT_RGBA16_NV = 34954 # GL/glext.h:2871
GL_FLOAT_RGBA32_NV = 34955 # GL/glext.h:2872
GL_TEXTURE_FLOAT_COMPONENTS_NV = 34956 # GL/glext.h:2873
GL_FLOAT_CLEAR_COLOR_VALUE_NV = 34957 # GL/glext.h:2874
GL_FLOAT_RGBA_MODE_NV = 34958 # GL/glext.h:2875
# NV_fragment_program (GL/glext.h:2878)
GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV = 34920 # GL/glext.h:2879
GL_FRAGMENT_PROGRAM_NV = 34928 # GL/glext.h:2880
GL_MAX_TEXTURE_COORDS_NV = 34929 # GL/glext.h:2881
GL_MAX_TEXTURE_IMAGE_UNITS_NV = 34930 # GL/glext.h:2882
GL_FRAGMENT_PROGRAM_BINDING_NV = 34931 # GL/glext.h:2883
GL_PROGRAM_ERROR_STRING_NV = 34932 # GL/glext.h:2884
# NV_half_float (GL/glext.h:2887)
GL_HALF_FLOAT_NV = 5131 # GL/glext.h:2888
# NV_pixel_data_range (GL/glext.h:2891)
GL_WRITE_PIXEL_DATA_RANGE_NV = 34936 # GL/glext.h:2892
GL_READ_PIXEL_DATA_RANGE_NV = 34937 # GL/glext.h:2893
GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV = 34938 # GL/glext.h:2894
GL_READ_PIXEL_DATA_RANGE_LENGTH_NV = 34939 # GL/glext.h:2895
GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV = 34940 # GL/glext.h:2896
GL_READ_PIXEL_DATA_RANGE_POINTER_NV = 34941 # GL/glext.h:2897
# NV_primitive_restart (GL/glext.h:2900)
GL_PRIMITIVE_RESTART_NV = 34136 # GL/glext.h:2901
GL_PRIMITIVE_RESTART_INDEX_NV = 34137 # GL/glext.h:2902
# NV_texture_expand_normal (GL/glext.h:2905)
GL_TEXTURE_UNSIGNED_REMAP_MODE_NV = 34959 # GL/glext.h:2906
# NV_vertex_program2 (GL/glext.h:2909)
# ATI_map_object_buffer (GL/glext.h:2912)
# ATI_separate_stencil (GL/glext.h:2915)
GL_STENCIL_BACK_FUNC_ATI = 34816 # GL/glext.h:2916
GL_STENCIL_BACK_FAIL_ATI = 34817 # GL/glext.h:2917
GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI = 34818 # GL/glext.h:2918
GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI = 34819 # GL/glext.h:2919
# ATI_vertex_attrib_array_object (GL/glext.h:2922)
# OES_read_format (GL/glext.h:2925)
GL_IMPLEMENTATION_COLOR_READ_TYPE_OES = 35738 # GL/glext.h:2926
GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES = 35739 # GL/glext.h:2927
# EXT_depth_bounds_test (GL/glext.h:2930)
GL_DEPTH_BOUNDS_TEST_EXT = 34960 # GL/glext.h:2931
GL_DEPTH_BOUNDS_EXT = 34961 # GL/glext.h:2932
# EXT_texture_mirror_clamp (GL/glext.h:2935)
GL_MIRROR_CLAMP_EXT = 34626 # GL/glext.h:2936
GL_MIRROR_CLAMP_TO_EDGE_EXT = 34627 # GL/glext.h:2937
GL_MIRROR_CLAMP_TO_BORDER_EXT = 35090 # GL/glext.h:2938
# EXT_blend_equation_separate (GL/glext.h:2941)
GL_BLEND_EQUATION_RGB_EXT = 32777 # GL/glext.h:2942
GL_BLEND_EQUATION_ALPHA_EXT = 34877 # GL/glext.h:2943
# MESA_pack_invert (GL/glext.h:2946)
GL_PACK_INVERT_MESA = 34648 # GL/glext.h:2947
# MESA_ycbcr_texture (GL/glext.h:2950)
GL_UNSIGNED_SHORT_8_8_MESA = 34234 # GL/glext.h:2951
GL_UNSIGNED_SHORT_8_8_REV_MESA = 34235 # GL/glext.h:2952
GL_YCBCR_MESA = 34647 # GL/glext.h:2953
# EXT_pixel_buffer_object (GL/glext.h:2956)
GL_PIXEL_PACK_BUFFER_EXT = 35051 # GL/glext.h:2957
GL_PIXEL_UNPACK_BUFFER_EXT = 35052 # GL/glext.h:2958
GL_PIXEL_PACK_BUFFER_BINDING_EXT = 35053 # GL/glext.h:2959
GL_PIXEL_UNPACK_BUFFER_BINDING_EXT = 35055 # GL/glext.h:2960
# NV_fragment_program_option (GL/glext.h:2963)
# NV_fragment_program2 (GL/glext.h:2966)
GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV = 35060 # GL/glext.h:2967
GL_MAX_PROGRAM_CALL_DEPTH_NV = 35061 # GL/glext.h:2968
GL_MAX_PROGRAM_IF_DEPTH_NV = 35062 # GL/glext.h:2969
GL_MAX_PROGRAM_LOOP_DEPTH_NV = 35063 # GL/glext.h:2970
GL_MAX_PROGRAM_LOOP_COUNT_NV = 35064 # GL/glext.h:2971
# NV_vertex_program2_option (GL/glext.h:2974)
# NV_vertex_program3 (GL/glext.h:2979)
# EXT_framebuffer_object (GL/glext.h:2983)
GL_INVALID_FRAMEBUFFER_OPERATION_EXT = 1286 # GL/glext.h:2984
GL_MAX_RENDERBUFFER_SIZE_EXT = 34024 # GL/glext.h:2985
GL_FRAMEBUFFER_BINDING_EXT = 36006 # GL/glext.h:2986
GL_RENDERBUFFER_BINDING_EXT = 36007 # GL/glext.h:2987
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT = 36048 # GL/glext.h:2988
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT = 36049 # GL/glext.h:2989
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT = 36050 # GL/glext.h:2990
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT = 36051 # GL/glext.h:2991
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT = 36052 # GL/glext.h:2992
GL_FRAMEBUFFER_COMPLETE_EXT = 36053 # GL/glext.h:2993
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT = 36054 # GL/glext.h:2994
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT = 36055 # GL/glext.h:2995
GL_FRAMEBUFFER_INCOMPLETE_DUPLICATE_ATTACHMENT_EXT = 36056 # GL/glext.h:2996
GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT = 36057 # GL/glext.h:2997
GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT = 36058 # GL/glext.h:2998
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT = 36059 # GL/glext.h:2999
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT = 36060 # GL/glext.h:3000
GL_FRAMEBUFFER_UNSUPPORTED_EXT = 36061 # GL/glext.h:3001
GL_MAX_COLOR_ATTACHMENTS_EXT = 36063 # GL/glext.h:3002
GL_COLOR_ATTACHMENT0_EXT = 36064 # GL/glext.h:3003
GL_COLOR_ATTACHMENT1_EXT = 36065 # GL/glext.h:3004
GL_COLOR_ATTACHMENT2_EXT = 36066 # GL/glext.h:3005
GL_COLOR_ATTACHMENT3_EXT = 36067 # GL/glext.h:3006
GL_COLOR_ATTACHMENT4_EXT = 36068 # GL/glext.h:3007
GL_COLOR_ATTACHMENT5_EXT = 36069 # GL/glext.h:3008
GL_COLOR_ATTACHMENT6_EXT = 36070 # GL/glext.h:3009
GL_COLOR_ATTACHMENT7_EXT = 36071 # GL/glext.h:3010
GL_COLOR_ATTACHMENT8_EXT = 36072 # GL/glext.h:3011
GL_COLOR_ATTACHMENT9_EXT = 36073 # GL/glext.h:3012
GL_COLOR_ATTACHMENT10_EXT = 36074 # GL/glext.h:3013
GL_COLOR_ATTACHMENT11_EXT = 36075 # GL/glext.h:3014
GL_COLOR_ATTACHMENT12_EXT = 36076 # GL/glext.h:3015
GL_COLOR_ATTACHMENT13_EXT = 36077 # GL/glext.h:3016
GL_COLOR_ATTACHMENT14_EXT = 36078 # GL/glext.h:3017
GL_COLOR_ATTACHMENT15_EXT = 36079 # GL/glext.h:3018
GL_DEPTH_ATTACHMENT_EXT = 36096 # GL/glext.h:3019
GL_STENCIL_ATTACHMENT_EXT = 36128 # GL/glext.h:3020
GL_FRAMEBUFFER_EXT = 36160 # GL/glext.h:3021
GL_RENDERBUFFER_EXT = 36161 # GL/glext.h:3022
GL_RENDERBUFFER_WIDTH_EXT = 36162 # GL/glext.h:3023
GL_RENDERBUFFER_HEIGHT_EXT = 36163 # GL/glext.h:3024
GL_RENDERBUFFER_INTERNAL_FORMAT_EXT = 36164 # GL/glext.h:3025
GL_STENCIL_INDEX1_EXT = 36166 # GL/glext.h:3026
GL_STENCIL_INDEX4_EXT = 36167 # GL/glext.h:3027
GL_STENCIL_INDEX8_EXT = 36168 # GL/glext.h:3028
GL_STENCIL_INDEX16_EXT = 36169 # GL/glext.h:3029
GL_RENDERBUFFER_RED_SIZE_EXT = 36176 # GL/glext.h:3030
GL_RENDERBUFFER_GREEN_SIZE_EXT = 36177 # GL/glext.h:3031
GL_RENDERBUFFER_BLUE_SIZE_EXT = 36178 # GL/glext.h:3032
GL_RENDERBUFFER_ALPHA_SIZE_EXT = 36179 # GL/glext.h:3033
GL_RENDERBUFFER_DEPTH_SIZE_EXT = 36180 # GL/glext.h:3034
GL_RENDERBUFFER_STENCIL_SIZE_EXT = 36181 # GL/glext.h:3035
# GREMEDY_string_marker (GL/glext.h:3038)
# VERSION_2_0 (GL/glext.h:3045)
GLchar = c_char # GL/glext.h:3047
# VERSION_1_5 (GL/glext.h:3050)
GLintptr = c_ptrdiff_t # GL/glext.h:3052
GLsizeiptr = c_ptrdiff_t # GL/glext.h:3053
# ARB_vertex_buffer_object (GL/glext.h:3056)
GLintptrARB = c_ptrdiff_t # GL/glext.h:3058
GLsizeiptrARB = c_ptrdiff_t # GL/glext.h:3059
# ARB_shader_objects (GL/glext.h:3062)
GLcharARB = c_char # GL/glext.h:3064
GLhandleARB = c_uint # GL/glext.h:3065
# ARB_half_float_pixel (GL/glext.h:3069)
GLhalfARB = c_ushort # GL/glext.h:3070
# NV_half_float (GL/glext.h:3073)
GLhalfNV = c_ushort # GL/glext.h:3074
# VERSION_1_2 (GL/glext.h:3077)
GL_VERSION_1_2 = 1 # GL/glext.h:3078
GLclampf = c_float # /usr/include/GL/gl.h:64
# GL/glext.h:3080
glBlendColor = _link_function('glBlendColor', None, [GLclampf, GLclampf, GLclampf, GLclampf], 'VERSION_1_2')
GLenum = c_uint # /usr/include/GL/gl.h:53
# GL/glext.h:3081
glBlendEquation = _link_function('glBlendEquation', None, [GLenum], 'VERSION_1_2')
GLuint = c_uint # /usr/include/GL/gl.h:62
GLsizei = c_int # /usr/include/GL/gl.h:59
GLvoid = None # /usr/include/GL/gl.h:67
# GL/glext.h:3082
glDrawRangeElements = _link_function('glDrawRangeElements', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3083
glColorTable = _link_function('glColorTable', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
GLfloat = c_float # /usr/include/GL/gl.h:63
# GL/glext.h:3084
glColorTableParameterfv = _link_function('glColorTableParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
GLint = c_int # /usr/include/GL/gl.h:58
# GL/glext.h:3085
glColorTableParameteriv = _link_function('glColorTableParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3086
glCopyColorTable = _link_function('glCopyColorTable', None, [GLenum, GLenum, GLint, GLint, GLsizei], 'VERSION_1_2')
# GL/glext.h:3087
glGetColorTable = _link_function('glGetColorTable', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3088
glGetColorTableParameterfv = _link_function('glGetColorTableParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3089
glGetColorTableParameteriv = _link_function('glGetColorTableParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3090
glColorSubTable = _link_function('glColorSubTable', None, [GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3091
glCopyColorSubTable = _link_function('glCopyColorSubTable', None, [GLenum, GLsizei, GLint, GLint, GLsizei], 'VERSION_1_2')
# GL/glext.h:3092
glConvolutionFilter1D = _link_function('glConvolutionFilter1D', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3093
glConvolutionFilter2D = _link_function('glConvolutionFilter2D', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3094
glConvolutionParameterf = _link_function('glConvolutionParameterf', None, [GLenum, GLenum, GLfloat], 'VERSION_1_2')
# GL/glext.h:3095
glConvolutionParameterfv = _link_function('glConvolutionParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3096
glConvolutionParameteri = _link_function('glConvolutionParameteri', None, [GLenum, GLenum, GLint], 'VERSION_1_2')
# GL/glext.h:3097
glConvolutionParameteriv = _link_function('glConvolutionParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3098
glCopyConvolutionFilter1D = _link_function('glCopyConvolutionFilter1D', None, [GLenum, GLenum, GLint, GLint, GLsizei], 'VERSION_1_2')
# GL/glext.h:3099
glCopyConvolutionFilter2D = _link_function('glCopyConvolutionFilter2D', None, [GLenum, GLenum, GLint, GLint, GLsizei, GLsizei], 'VERSION_1_2')
# GL/glext.h:3100
glGetConvolutionFilter = _link_function('glGetConvolutionFilter', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3101
glGetConvolutionParameterfv = _link_function('glGetConvolutionParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3102
glGetConvolutionParameteriv = _link_function('glGetConvolutionParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3103
glGetSeparableFilter = _link_function('glGetSeparableFilter', None, [GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3104
glSeparableFilter2D = _link_function('glSeparableFilter2D', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)], 'VERSION_1_2')
GLboolean = c_ubyte # /usr/include/GL/gl.h:54
# GL/glext.h:3105
glGetHistogram = _link_function('glGetHistogram', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3106
glGetHistogramParameterfv = _link_function('glGetHistogramParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3107
glGetHistogramParameteriv = _link_function('glGetHistogramParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3108
glGetMinmax = _link_function('glGetMinmax', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3109
glGetMinmaxParameterfv = _link_function('glGetMinmaxParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], 'VERSION_1_2')
# GL/glext.h:3110
glGetMinmaxParameteriv = _link_function('glGetMinmaxParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_2')
# GL/glext.h:3111
glHistogram = _link_function('glHistogram', None, [GLenum, GLsizei, GLenum, GLboolean], 'VERSION_1_2')
# GL/glext.h:3112
glMinmax = _link_function('glMinmax', None, [GLenum, GLenum, GLboolean], 'VERSION_1_2')
# GL/glext.h:3113
glResetHistogram = _link_function('glResetHistogram', None, [GLenum], 'VERSION_1_2')
# GL/glext.h:3114
glResetMinmax = _link_function('glResetMinmax', None, [GLenum], 'VERSION_1_2')
# GL/glext.h:3115
glTexImage3D = _link_function('glTexImage3D', None, [GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3116
glTexSubImage3D = _link_function('glTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'VERSION_1_2')
# GL/glext.h:3117
glCopyTexSubImage3D = _link_function('glCopyTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], 'VERSION_1_2')
PFNGLBLENDCOLORPROC = CFUNCTYPE(None, GLclampf, GLclampf, GLclampf, GLclampf) # GL/glext.h:3119
PFNGLBLENDEQUATIONPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3120
PFNGLDRAWRANGEELEMENTSPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)) # GL/glext.h:3121
PFNGLCOLORTABLEPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3122
PFNGLCOLORTABLEPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3123
PFNGLCOLORTABLEPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3124
PFNGLCOPYCOLORTABLEPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei) # GL/glext.h:3125
PFNGLGETCOLORTABLEPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3126
PFNGLGETCOLORTABLEPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3127
PFNGLGETCOLORTABLEPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3128
PFNGLCOLORSUBTABLEPROC = CFUNCTYPE(None, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3129
PFNGLCOPYCOLORSUBTABLEPROC = CFUNCTYPE(None, GLenum, GLsizei, GLint, GLint, GLsizei) # GL/glext.h:3130
PFNGLCONVOLUTIONFILTER1DPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3131
PFNGLCONVOLUTIONFILTER2DPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3132
PFNGLCONVOLUTIONPARAMETERFPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:3133
PFNGLCONVOLUTIONPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3134
PFNGLCONVOLUTIONPARAMETERIPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:3135
PFNGLCONVOLUTIONPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3136
PFNGLCOPYCONVOLUTIONFILTER1DPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei) # GL/glext.h:3137
PFNGLCOPYCONVOLUTIONFILTER2DPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:3138
PFNGLGETCONVOLUTIONFILTERPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3139
PFNGLGETCONVOLUTIONPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3140
PFNGLGETCONVOLUTIONPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3141
PFNGLGETSEPARABLEFILTERPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)) # GL/glext.h:3142
PFNGLSEPARABLEFILTER2DPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)) # GL/glext.h:3143
PFNGLGETHISTOGRAMPROC = CFUNCTYPE(None, GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3144
PFNGLGETHISTOGRAMPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3145
PFNGLGETHISTOGRAMPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3146
PFNGLGETMINMAXPROC = CFUNCTYPE(None, GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3147
PFNGLGETMINMAXPARAMETERFVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:3148
PFNGLGETMINMAXPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3149
PFNGLHISTOGRAMPROC = CFUNCTYPE(None, GLenum, GLsizei, GLenum, GLboolean) # GL/glext.h:3150
PFNGLMINMAXPROC = CFUNCTYPE(None, GLenum, GLenum, GLboolean) # GL/glext.h:3151
PFNGLRESETHISTOGRAMPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3152
PFNGLRESETMINMAXPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3153
PFNGLTEXIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3154
PFNGLTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3155
PFNGLCOPYTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:3156
# VERSION_1_3 (GL/glext.h:3159)
GL_VERSION_1_3 = 1 # GL/glext.h:3160
# GL/glext.h:3162
glActiveTexture = _link_function('glActiveTexture', None, [GLenum], 'VERSION_1_3')
# GL/glext.h:3163
glClientActiveTexture = _link_function('glClientActiveTexture', None, [GLenum], 'VERSION_1_3')
GLdouble = c_double # /usr/include/GL/gl.h:65
# GL/glext.h:3164
glMultiTexCoord1d = _link_function('glMultiTexCoord1d', None, [GLenum, GLdouble], 'VERSION_1_3')
# GL/glext.h:3165
glMultiTexCoord1dv = _link_function('glMultiTexCoord1dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3166
glMultiTexCoord1f = _link_function('glMultiTexCoord1f', None, [GLenum, GLfloat], 'VERSION_1_3')
# GL/glext.h:3167
glMultiTexCoord1fv = _link_function('glMultiTexCoord1fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3168
glMultiTexCoord1i = _link_function('glMultiTexCoord1i', None, [GLenum, GLint], 'VERSION_1_3')
# GL/glext.h:3169
glMultiTexCoord1iv = _link_function('glMultiTexCoord1iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
GLshort = c_short # /usr/include/GL/gl.h:57
# GL/glext.h:3170
glMultiTexCoord1s = _link_function('glMultiTexCoord1s', None, [GLenum, GLshort], 'VERSION_1_3')
# GL/glext.h:3171
glMultiTexCoord1sv = _link_function('glMultiTexCoord1sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3172
glMultiTexCoord2d = _link_function('glMultiTexCoord2d', None, [GLenum, GLdouble, GLdouble], 'VERSION_1_3')
# GL/glext.h:3173
glMultiTexCoord2dv = _link_function('glMultiTexCoord2dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3174
glMultiTexCoord2f = _link_function('glMultiTexCoord2f', None, [GLenum, GLfloat, GLfloat], 'VERSION_1_3')
# GL/glext.h:3175
glMultiTexCoord2fv = _link_function('glMultiTexCoord2fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3176
glMultiTexCoord2i = _link_function('glMultiTexCoord2i', None, [GLenum, GLint, GLint], 'VERSION_1_3')
# GL/glext.h:3177
glMultiTexCoord2iv = _link_function('glMultiTexCoord2iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
# GL/glext.h:3178
glMultiTexCoord2s = _link_function('glMultiTexCoord2s', None, [GLenum, GLshort, GLshort], 'VERSION_1_3')
# GL/glext.h:3179
glMultiTexCoord2sv = _link_function('glMultiTexCoord2sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3180
glMultiTexCoord3d = _link_function('glMultiTexCoord3d', None, [GLenum, GLdouble, GLdouble, GLdouble], 'VERSION_1_3')
# GL/glext.h:3181
glMultiTexCoord3dv = _link_function('glMultiTexCoord3dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3182
glMultiTexCoord3f = _link_function('glMultiTexCoord3f', None, [GLenum, GLfloat, GLfloat, GLfloat], 'VERSION_1_3')
# GL/glext.h:3183
glMultiTexCoord3fv = _link_function('glMultiTexCoord3fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3184
glMultiTexCoord3i = _link_function('glMultiTexCoord3i', None, [GLenum, GLint, GLint, GLint], 'VERSION_1_3')
# GL/glext.h:3185
glMultiTexCoord3iv = _link_function('glMultiTexCoord3iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
# GL/glext.h:3186
glMultiTexCoord3s = _link_function('glMultiTexCoord3s', None, [GLenum, GLshort, GLshort, GLshort], 'VERSION_1_3')
# GL/glext.h:3187
glMultiTexCoord3sv = _link_function('glMultiTexCoord3sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3188
glMultiTexCoord4d = _link_function('glMultiTexCoord4d', None, [GLenum, GLdouble, GLdouble, GLdouble, GLdouble], 'VERSION_1_3')
# GL/glext.h:3189
glMultiTexCoord4dv = _link_function('glMultiTexCoord4dv', None, [GLenum, POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3190
glMultiTexCoord4f = _link_function('glMultiTexCoord4f', None, [GLenum, GLfloat, GLfloat, GLfloat, GLfloat], 'VERSION_1_3')
# GL/glext.h:3191
glMultiTexCoord4fv = _link_function('glMultiTexCoord4fv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3192
glMultiTexCoord4i = _link_function('glMultiTexCoord4i', None, [GLenum, GLint, GLint, GLint, GLint], 'VERSION_1_3')
# GL/glext.h:3193
glMultiTexCoord4iv = _link_function('glMultiTexCoord4iv', None, [GLenum, POINTER(GLint)], 'VERSION_1_3')
# GL/glext.h:3194
glMultiTexCoord4s = _link_function('glMultiTexCoord4s', None, [GLenum, GLshort, GLshort, GLshort, GLshort], 'VERSION_1_3')
# GL/glext.h:3195
glMultiTexCoord4sv = _link_function('glMultiTexCoord4sv', None, [GLenum, POINTER(GLshort)], 'VERSION_1_3')
# GL/glext.h:3196
glLoadTransposeMatrixf = _link_function('glLoadTransposeMatrixf', None, [POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3197
glLoadTransposeMatrixd = _link_function('glLoadTransposeMatrixd', None, [POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3198
glMultTransposeMatrixf = _link_function('glMultTransposeMatrixf', None, [POINTER(GLfloat)], 'VERSION_1_3')
# GL/glext.h:3199
glMultTransposeMatrixd = _link_function('glMultTransposeMatrixd', None, [POINTER(GLdouble)], 'VERSION_1_3')
# GL/glext.h:3200
glSampleCoverage = _link_function('glSampleCoverage', None, [GLclampf, GLboolean], 'VERSION_1_3')
# GL/glext.h:3201
glCompressedTexImage3D = _link_function('glCompressedTexImage3D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], 'VERSION_1_3')
# GL/glext.h:3202
glCompressedTexImage2D = _link_function('glCompressedTexImage2D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], 'VERSION_1_3')
# GL/glext.h:3203
glCompressedTexImage1D = _link_function('glCompressedTexImage1D', None, [GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)], 'VERSION_1_3')
# GL/glext.h:3204
glCompressedTexSubImage3D = _link_function('glCompressedTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'VERSION_1_3')
# GL/glext.h:3205
glCompressedTexSubImage2D = _link_function('glCompressedTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'VERSION_1_3')
# GL/glext.h:3206
glCompressedTexSubImage1D = _link_function('glCompressedTexSubImage1D', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'VERSION_1_3')
# GL/glext.h:3207
glGetCompressedTexImage = _link_function('glGetCompressedTexImage', None, [GLenum, GLint, POINTER(GLvoid)], 'VERSION_1_3')
PFNGLACTIVETEXTUREPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3209
PFNGLCLIENTACTIVETEXTUREPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3210
PFNGLMULTITEXCOORD1DPROC = CFUNCTYPE(None, GLenum, GLdouble) # GL/glext.h:3211
PFNGLMULTITEXCOORD1DVPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3212
PFNGLMULTITEXCOORD1FPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:3213
PFNGLMULTITEXCOORD1FVPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3214
PFNGLMULTITEXCOORD1IPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:3215
PFNGLMULTITEXCOORD1IVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3216
PFNGLMULTITEXCOORD1SPROC = CFUNCTYPE(None, GLenum, GLshort) # GL/glext.h:3217
PFNGLMULTITEXCOORD1SVPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3218
PFNGLMULTITEXCOORD2DPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble) # GL/glext.h:3219
PFNGLMULTITEXCOORD2DVPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3220
PFNGLMULTITEXCOORD2FPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat) # GL/glext.h:3221
PFNGLMULTITEXCOORD2FVPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3222
PFNGLMULTITEXCOORD2IPROC = CFUNCTYPE(None, GLenum, GLint, GLint) # GL/glext.h:3223
PFNGLMULTITEXCOORD2IVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3224
PFNGLMULTITEXCOORD2SPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort) # GL/glext.h:3225
PFNGLMULTITEXCOORD2SVPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3226
PFNGLMULTITEXCOORD3DPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble) # GL/glext.h:3227
PFNGLMULTITEXCOORD3DVPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3228
PFNGLMULTITEXCOORD3FPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat) # GL/glext.h:3229
PFNGLMULTITEXCOORD3FVPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3230
PFNGLMULTITEXCOORD3IPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint) # GL/glext.h:3231
PFNGLMULTITEXCOORD3IVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3232
PFNGLMULTITEXCOORD3SPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort) # GL/glext.h:3233
PFNGLMULTITEXCOORD3SVPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3234
PFNGLMULTITEXCOORD4DPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:3235
PFNGLMULTITEXCOORD4DVPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3236
PFNGLMULTITEXCOORD4FPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3237
PFNGLMULTITEXCOORD4FVPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3238
PFNGLMULTITEXCOORD4IPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint) # GL/glext.h:3239
PFNGLMULTITEXCOORD4IVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3240
PFNGLMULTITEXCOORD4SPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:3241
PFNGLMULTITEXCOORD4SVPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3242
PFNGLLOADTRANSPOSEMATRIXFPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3243
PFNGLLOADTRANSPOSEMATRIXDPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3244
PFNGLMULTTRANSPOSEMATRIXFPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3245
PFNGLMULTTRANSPOSEMATRIXDPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3246
PFNGLSAMPLECOVERAGEPROC = CFUNCTYPE(None, GLclampf, GLboolean) # GL/glext.h:3247
PFNGLCOMPRESSEDTEXIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)) # GL/glext.h:3248
PFNGLCOMPRESSEDTEXIMAGE2DPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)) # GL/glext.h:3249
PFNGLCOMPRESSEDTEXIMAGE1DPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)) # GL/glext.h:3250
PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3251
PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3252
PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3253
PFNGLGETCOMPRESSEDTEXIMAGEPROC = CFUNCTYPE(None, GLenum, GLint, POINTER(GLvoid)) # GL/glext.h:3254
# VERSION_1_4 (GL/glext.h:3257)
GL_VERSION_1_4 = 1 # GL/glext.h:3258
# GL/glext.h:3260
glBlendFuncSeparate = _link_function('glBlendFuncSeparate', None, [GLenum, GLenum, GLenum, GLenum], 'VERSION_1_4')
# GL/glext.h:3261
glFogCoordf = _link_function('glFogCoordf', None, [GLfloat], 'VERSION_1_4')
# GL/glext.h:3262
glFogCoordfv = _link_function('glFogCoordfv', None, [POINTER(GLfloat)], 'VERSION_1_4')
# GL/glext.h:3263
glFogCoordd = _link_function('glFogCoordd', None, [GLdouble], 'VERSION_1_4')
# GL/glext.h:3264
glFogCoorddv = _link_function('glFogCoorddv', None, [POINTER(GLdouble)], 'VERSION_1_4')
# GL/glext.h:3265
glFogCoordPointer = _link_function('glFogCoordPointer', None, [GLenum, GLsizei, POINTER(GLvoid)], 'VERSION_1_4')
# GL/glext.h:3266
glMultiDrawArrays = _link_function('glMultiDrawArrays', None, [GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei], 'VERSION_1_4')
# GL/glext.h:3267
glMultiDrawElements = _link_function('glMultiDrawElements', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei], 'VERSION_1_4')
# GL/glext.h:3268
glPointParameterf = _link_function('glPointParameterf', None, [GLenum, GLfloat], 'VERSION_1_4')
# GL/glext.h:3269
glPointParameterfv = _link_function('glPointParameterfv', None, [GLenum, POINTER(GLfloat)], 'VERSION_1_4')
# GL/glext.h:3270
glPointParameteri = _link_function('glPointParameteri', None, [GLenum, GLint], 'VERSION_1_4')
# GL/glext.h:3271
glPointParameteriv = _link_function('glPointParameteriv', None, [GLenum, POINTER(GLint)], 'VERSION_1_4')
GLbyte = c_char # /usr/include/GL/gl.h:56
# GL/glext.h:3272
glSecondaryColor3b = _link_function('glSecondaryColor3b', None, [GLbyte, GLbyte, GLbyte], 'VERSION_1_4')
# GL/glext.h:3273
glSecondaryColor3bv = _link_function('glSecondaryColor3bv', None, [POINTER(GLbyte)], 'VERSION_1_4')
# GL/glext.h:3274
glSecondaryColor3d = _link_function('glSecondaryColor3d', None, [GLdouble, GLdouble, GLdouble], 'VERSION_1_4')
# GL/glext.h:3275
glSecondaryColor3dv = _link_function('glSecondaryColor3dv', None, [POINTER(GLdouble)], 'VERSION_1_4')
# GL/glext.h:3276
glSecondaryColor3f = _link_function('glSecondaryColor3f', None, [GLfloat, GLfloat, GLfloat], 'VERSION_1_4')
# GL/glext.h:3277
glSecondaryColor3fv = _link_function('glSecondaryColor3fv', None, [POINTER(GLfloat)], 'VERSION_1_4')
# GL/glext.h:3278
glSecondaryColor3i = _link_function('glSecondaryColor3i', None, [GLint, GLint, GLint], 'VERSION_1_4')
# GL/glext.h:3279
glSecondaryColor3iv = _link_function('glSecondaryColor3iv', None, [POINTER(GLint)], 'VERSION_1_4')
# GL/glext.h:3280
glSecondaryColor3s = _link_function('glSecondaryColor3s', None, [GLshort, GLshort, GLshort], 'VERSION_1_4')
# GL/glext.h:3281
glSecondaryColor3sv = _link_function('glSecondaryColor3sv', None, [POINTER(GLshort)], 'VERSION_1_4')
GLubyte = c_ubyte # /usr/include/GL/gl.h:60
# GL/glext.h:3282
glSecondaryColor3ub = _link_function('glSecondaryColor3ub', None, [GLubyte, GLubyte, GLubyte], 'VERSION_1_4')
# GL/glext.h:3283
glSecondaryColor3ubv = _link_function('glSecondaryColor3ubv', None, [POINTER(GLubyte)], 'VERSION_1_4')
# GL/glext.h:3284
glSecondaryColor3ui = _link_function('glSecondaryColor3ui', None, [GLuint, GLuint, GLuint], 'VERSION_1_4')
# GL/glext.h:3285
glSecondaryColor3uiv = _link_function('glSecondaryColor3uiv', None, [POINTER(GLuint)], 'VERSION_1_4')
GLushort = c_ushort # /usr/include/GL/gl.h:61
# GL/glext.h:3286
glSecondaryColor3us = _link_function('glSecondaryColor3us', None, [GLushort, GLushort, GLushort], 'VERSION_1_4')
# GL/glext.h:3287
glSecondaryColor3usv = _link_function('glSecondaryColor3usv', None, [POINTER(GLushort)], 'VERSION_1_4')
# GL/glext.h:3288
glSecondaryColorPointer = _link_function('glSecondaryColorPointer', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], 'VERSION_1_4')
# GL/glext.h:3289
glWindowPos2d = _link_function('glWindowPos2d', None, [GLdouble, GLdouble], 'VERSION_1_4')
# GL/glext.h:3290
glWindowPos2dv = _link_function('glWindowPos2dv', None, [POINTER(GLdouble)], 'VERSION_1_4')
# GL/glext.h:3291
glWindowPos2f = _link_function('glWindowPos2f', None, [GLfloat, GLfloat], 'VERSION_1_4')
# GL/glext.h:3292
glWindowPos2fv = _link_function('glWindowPos2fv', None, [POINTER(GLfloat)], 'VERSION_1_4')
# GL/glext.h:3293
glWindowPos2i = _link_function('glWindowPos2i', None, [GLint, GLint], 'VERSION_1_4')
# GL/glext.h:3294
glWindowPos2iv = _link_function('glWindowPos2iv', None, [POINTER(GLint)], 'VERSION_1_4')
# GL/glext.h:3295
glWindowPos2s = _link_function('glWindowPos2s', None, [GLshort, GLshort], 'VERSION_1_4')
# GL/glext.h:3296
glWindowPos2sv = _link_function('glWindowPos2sv', None, [POINTER(GLshort)], 'VERSION_1_4')
# GL/glext.h:3297
glWindowPos3d = _link_function('glWindowPos3d', None, [GLdouble, GLdouble, GLdouble], 'VERSION_1_4')
# GL/glext.h:3298
glWindowPos3dv = _link_function('glWindowPos3dv', None, [POINTER(GLdouble)], 'VERSION_1_4')
# GL/glext.h:3299
glWindowPos3f = _link_function('glWindowPos3f', None, [GLfloat, GLfloat, GLfloat], 'VERSION_1_4')
# GL/glext.h:3300
glWindowPos3fv = _link_function('glWindowPos3fv', None, [POINTER(GLfloat)], 'VERSION_1_4')
# GL/glext.h:3301
glWindowPos3i = _link_function('glWindowPos3i', None, [GLint, GLint, GLint], 'VERSION_1_4')
# GL/glext.h:3302
glWindowPos3iv = _link_function('glWindowPos3iv', None, [POINTER(GLint)], 'VERSION_1_4')
# GL/glext.h:3303
glWindowPos3s = _link_function('glWindowPos3s', None, [GLshort, GLshort, GLshort], 'VERSION_1_4')
# GL/glext.h:3304
glWindowPos3sv = _link_function('glWindowPos3sv', None, [POINTER(GLshort)], 'VERSION_1_4')
PFNGLBLENDFUNCSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:3306
PFNGLFOGCOORDFPROC = CFUNCTYPE(None, GLfloat) # GL/glext.h:3307
PFNGLFOGCOORDFVPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3308
PFNGLFOGCOORDDPROC = CFUNCTYPE(None, GLdouble) # GL/glext.h:3309
PFNGLFOGCOORDDVPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3310
PFNGLFOGCOORDPOINTERPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3311
PFNGLMULTIDRAWARRAYSPROC = CFUNCTYPE(None, GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei) # GL/glext.h:3312
PFNGLMULTIDRAWELEMENTSPROC = CFUNCTYPE(None, GLenum, POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei) # GL/glext.h:3313
PFNGLPOINTPARAMETERFPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:3314
PFNGLPOINTPARAMETERFVPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3315
PFNGLPOINTPARAMETERIPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:3316
PFNGLPOINTPARAMETERIVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3317
PFNGLSECONDARYCOLOR3BPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:3318
PFNGLSECONDARYCOLOR3BVPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:3319
PFNGLSECONDARYCOLOR3DPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:3320
PFNGLSECONDARYCOLOR3DVPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3321
PFNGLSECONDARYCOLOR3FPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:3322
PFNGLSECONDARYCOLOR3FVPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3323
PFNGLSECONDARYCOLOR3IPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:3324
PFNGLSECONDARYCOLOR3IVPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:3325
PFNGLSECONDARYCOLOR3SPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:3326
PFNGLSECONDARYCOLOR3SVPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:3327
PFNGLSECONDARYCOLOR3UBPROC = CFUNCTYPE(None, GLubyte, GLubyte, GLubyte) # GL/glext.h:3328
PFNGLSECONDARYCOLOR3UBVPROC = CFUNCTYPE(None, POINTER(GLubyte)) # GL/glext.h:3329
PFNGLSECONDARYCOLOR3UIPROC = CFUNCTYPE(None, GLuint, GLuint, GLuint) # GL/glext.h:3330
PFNGLSECONDARYCOLOR3UIVPROC = CFUNCTYPE(None, POINTER(GLuint)) # GL/glext.h:3331
PFNGLSECONDARYCOLOR3USPROC = CFUNCTYPE(None, GLushort, GLushort, GLushort) # GL/glext.h:3332
PFNGLSECONDARYCOLOR3USVPROC = CFUNCTYPE(None, POINTER(GLushort)) # GL/glext.h:3333
PFNGLSECONDARYCOLORPOINTERPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3334
PFNGLWINDOWPOS2DPROC = CFUNCTYPE(None, GLdouble, GLdouble) # GL/glext.h:3335
PFNGLWINDOWPOS2DVPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3336
PFNGLWINDOWPOS2FPROC = CFUNCTYPE(None, GLfloat, GLfloat) # GL/glext.h:3337
PFNGLWINDOWPOS2FVPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3338
PFNGLWINDOWPOS2IPROC = CFUNCTYPE(None, GLint, GLint) # GL/glext.h:3339
PFNGLWINDOWPOS2IVPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:3340
PFNGLWINDOWPOS2SPROC = CFUNCTYPE(None, GLshort, GLshort) # GL/glext.h:3341
PFNGLWINDOWPOS2SVPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:3342
PFNGLWINDOWPOS3DPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:3343
PFNGLWINDOWPOS3DVPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3344
PFNGLWINDOWPOS3FPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:3345
PFNGLWINDOWPOS3FVPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3346
PFNGLWINDOWPOS3IPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:3347
PFNGLWINDOWPOS3IVPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:3348
PFNGLWINDOWPOS3SPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:3349
PFNGLWINDOWPOS3SVPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:3350
# VERSION_1_5 (GL/glext.h:3353)
GL_VERSION_1_5 = 1 # GL/glext.h:3354
# GL/glext.h:3356
glGenQueries = _link_function('glGenQueries', None, [GLsizei, POINTER(GLuint)], 'VERSION_1_5')
# GL/glext.h:3357
glDeleteQueries = _link_function('glDeleteQueries', None, [GLsizei, POINTER(GLuint)], 'VERSION_1_5')
# GL/glext.h:3358
glIsQuery = _link_function('glIsQuery', GLboolean, [GLuint], 'VERSION_1_5')
# GL/glext.h:3359
glBeginQuery = _link_function('glBeginQuery', None, [GLenum, GLuint], 'VERSION_1_5')
# GL/glext.h:3360
glEndQuery = _link_function('glEndQuery', None, [GLenum], 'VERSION_1_5')
# GL/glext.h:3361
glGetQueryiv = _link_function('glGetQueryiv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_5')
# GL/glext.h:3362
glGetQueryObjectiv = _link_function('glGetQueryObjectiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_1_5')
# GL/glext.h:3363
glGetQueryObjectuiv = _link_function('glGetQueryObjectuiv', None, [GLuint, GLenum, POINTER(GLuint)], 'VERSION_1_5')
# GL/glext.h:3364
glBindBuffer = _link_function('glBindBuffer', None, [GLenum, GLuint], 'VERSION_1_5')
# GL/glext.h:3365
glDeleteBuffers = _link_function('glDeleteBuffers', None, [GLsizei, POINTER(GLuint)], 'VERSION_1_5')
# GL/glext.h:3366
glGenBuffers = _link_function('glGenBuffers', None, [GLsizei, POINTER(GLuint)], 'VERSION_1_5')
# GL/glext.h:3367
glIsBuffer = _link_function('glIsBuffer', GLboolean, [GLuint], 'VERSION_1_5')
# GL/glext.h:3368
glBufferData = _link_function('glBufferData', None, [GLenum, GLsizeiptr, POINTER(GLvoid), GLenum], 'VERSION_1_5')
# GL/glext.h:3369
glBufferSubData = _link_function('glBufferSubData', None, [GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)], 'VERSION_1_5')
# GL/glext.h:3370
glGetBufferSubData = _link_function('glGetBufferSubData', None, [GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)], 'VERSION_1_5')
# GL/glext.h:3371
glMapBuffer = _link_function('glMapBuffer', POINTER(GLvoid), [GLenum, GLenum], 'VERSION_1_5')
# GL/glext.h:3372
glUnmapBuffer = _link_function('glUnmapBuffer', GLboolean, [GLenum], 'VERSION_1_5')
# GL/glext.h:3373
glGetBufferParameteriv = _link_function('glGetBufferParameteriv', None, [GLenum, GLenum, POINTER(GLint)], 'VERSION_1_5')
# GL/glext.h:3374
glGetBufferPointerv = _link_function('glGetBufferPointerv', None, [GLenum, GLenum, POINTER(POINTER(GLvoid))], 'VERSION_1_5')
PFNGLGENQUERIESPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3376
PFNGLDELETEQUERIESPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3377
PFNGLISQUERYPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3378
PFNGLBEGINQUERYPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:3379
PFNGLENDQUERYPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3380
PFNGLGETQUERYIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3381
PFNGLGETQUERYOBJECTIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3382
PFNGLGETQUERYOBJECTUIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLuint)) # GL/glext.h:3383
PFNGLBINDBUFFERPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:3384
PFNGLDELETEBUFFERSPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3385
PFNGLGENBUFFERSPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3386
PFNGLISBUFFERPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3387
PFNGLBUFFERDATAPROC = CFUNCTYPE(None, GLenum, GLsizeiptr, POINTER(GLvoid), GLenum) # GL/glext.h:3388
PFNGLBUFFERSUBDATAPROC = CFUNCTYPE(None, GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)) # GL/glext.h:3389
PFNGLGETBUFFERSUBDATAPROC = CFUNCTYPE(None, GLenum, GLintptr, GLsizeiptr, POINTER(GLvoid)) # GL/glext.h:3390
PFNGLMAPBUFFERPROC = CFUNCTYPE(POINTER(GLvoid), GLenum, GLenum) # GL/glext.h:3391
PFNGLUNMAPBUFFERPROC = CFUNCTYPE(GLboolean, GLenum) # GL/glext.h:3392
PFNGLGETBUFFERPARAMETERIVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3393
PFNGLGETBUFFERPOINTERVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:3394
# VERSION_2_0 (GL/glext.h:3397)
GL_VERSION_2_0 = 1 # GL/glext.h:3398
# GL/glext.h:3400
glBlendEquationSeparate = _link_function('glBlendEquationSeparate', None, [GLenum, GLenum], 'VERSION_2_0')
# GL/glext.h:3401
glDrawBuffers = _link_function('glDrawBuffers', None, [GLsizei, POINTER(GLenum)], 'VERSION_2_0')
# GL/glext.h:3402
glStencilOpSeparate = _link_function('glStencilOpSeparate', None, [GLenum, GLenum, GLenum, GLenum], 'VERSION_2_0')
# GL/glext.h:3403
glStencilFuncSeparate = _link_function('glStencilFuncSeparate', None, [GLenum, GLenum, GLint, GLuint], 'VERSION_2_0')
# GL/glext.h:3404
glStencilMaskSeparate = _link_function('glStencilMaskSeparate', None, [GLenum, GLuint], 'VERSION_2_0')
# GL/glext.h:3405
glAttachShader = _link_function('glAttachShader', None, [GLuint, GLuint], 'VERSION_2_0')
# GL/glext.h:3406
glBindAttribLocation = _link_function('glBindAttribLocation', None, [GLuint, GLuint, POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3407
glCompileShader = _link_function('glCompileShader', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3408
glCreateProgram = _link_function('glCreateProgram', GLuint, [], 'VERSION_2_0')
# GL/glext.h:3409
glCreateShader = _link_function('glCreateShader', GLuint, [GLenum], 'VERSION_2_0')
# GL/glext.h:3410
glDeleteProgram = _link_function('glDeleteProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3411
glDeleteShader = _link_function('glDeleteShader', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3412
glDetachShader = _link_function('glDetachShader', None, [GLuint, GLuint], 'VERSION_2_0')
# GL/glext.h:3413
glDisableVertexAttribArray = _link_function('glDisableVertexAttribArray', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3414
glEnableVertexAttribArray = _link_function('glEnableVertexAttribArray', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3415
glGetActiveAttrib = _link_function('glGetActiveAttrib', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3416
glGetActiveUniform = _link_function('glGetActiveUniform', None, [GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3417
glGetAttachedShaders = _link_function('glGetAttachedShaders', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLuint)], 'VERSION_2_0')
# GL/glext.h:3418
glGetAttribLocation = _link_function('glGetAttribLocation', GLint, [GLuint, POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3419
glGetProgramiv = _link_function('glGetProgramiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3420
glGetProgramInfoLog = _link_function('glGetProgramInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3421
glGetShaderiv = _link_function('glGetShaderiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3422
glGetShaderInfoLog = _link_function('glGetShaderInfoLog', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3423
glGetShaderSource = _link_function('glGetShaderSource', None, [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3424
glGetUniformLocation = _link_function('glGetUniformLocation', GLint, [GLuint, POINTER(GLchar)], 'VERSION_2_0')
# GL/glext.h:3425
glGetUniformfv = _link_function('glGetUniformfv', None, [GLuint, GLint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3426
glGetUniformiv = _link_function('glGetUniformiv', None, [GLuint, GLint, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3427
glGetVertexAttribdv = _link_function('glGetVertexAttribdv', None, [GLuint, GLenum, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3428
glGetVertexAttribfv = _link_function('glGetVertexAttribfv', None, [GLuint, GLenum, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3429
glGetVertexAttribiv = _link_function('glGetVertexAttribiv', None, [GLuint, GLenum, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3430
glGetVertexAttribPointerv = _link_function('glGetVertexAttribPointerv', None, [GLuint, GLenum, POINTER(POINTER(GLvoid))], 'VERSION_2_0')
# GL/glext.h:3431
glIsProgram = _link_function('glIsProgram', GLboolean, [GLuint], 'VERSION_2_0')
# GL/glext.h:3432
glIsShader = _link_function('glIsShader', GLboolean, [GLuint], 'VERSION_2_0')
# GL/glext.h:3433
glLinkProgram = _link_function('glLinkProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3434
glShaderSource = _link_function('glShaderSource', None, [GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3435
glUseProgram = _link_function('glUseProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3436
glUniform1f = _link_function('glUniform1f', None, [GLint, GLfloat], 'VERSION_2_0')
# GL/glext.h:3437
glUniform2f = _link_function('glUniform2f', None, [GLint, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3438
glUniform3f = _link_function('glUniform3f', None, [GLint, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3439
glUniform4f = _link_function('glUniform4f', None, [GLint, GLfloat, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3440
glUniform1i = _link_function('glUniform1i', None, [GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3441
glUniform2i = _link_function('glUniform2i', None, [GLint, GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3442
glUniform3i = _link_function('glUniform3i', None, [GLint, GLint, GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3443
glUniform4i = _link_function('glUniform4i', None, [GLint, GLint, GLint, GLint, GLint], 'VERSION_2_0')
# GL/glext.h:3444
glUniform1fv = _link_function('glUniform1fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3445
glUniform2fv = _link_function('glUniform2fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3446
glUniform3fv = _link_function('glUniform3fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3447
glUniform4fv = _link_function('glUniform4fv', None, [GLint, GLsizei, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3448
glUniform1iv = _link_function('glUniform1iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3449
glUniform2iv = _link_function('glUniform2iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3450
glUniform3iv = _link_function('glUniform3iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3451
glUniform4iv = _link_function('glUniform4iv', None, [GLint, GLsizei, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3452
glUniformMatrix2fv = _link_function('glUniformMatrix2fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3453
glUniformMatrix3fv = _link_function('glUniformMatrix3fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3454
glUniformMatrix4fv = _link_function('glUniformMatrix4fv', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3455
glValidateProgram = _link_function('glValidateProgram', None, [GLuint], 'VERSION_2_0')
# GL/glext.h:3456
glVertexAttrib1d = _link_function('glVertexAttrib1d', None, [GLuint, GLdouble], 'VERSION_2_0')
# GL/glext.h:3457
glVertexAttrib1dv = _link_function('glVertexAttrib1dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3458
glVertexAttrib1f = _link_function('glVertexAttrib1f', None, [GLuint, GLfloat], 'VERSION_2_0')
# GL/glext.h:3459
glVertexAttrib1fv = _link_function('glVertexAttrib1fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3460
glVertexAttrib1s = _link_function('glVertexAttrib1s', None, [GLuint, GLshort], 'VERSION_2_0')
# GL/glext.h:3461
glVertexAttrib1sv = _link_function('glVertexAttrib1sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3462
glVertexAttrib2d = _link_function('glVertexAttrib2d', None, [GLuint, GLdouble, GLdouble], 'VERSION_2_0')
# GL/glext.h:3463
glVertexAttrib2dv = _link_function('glVertexAttrib2dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3464
glVertexAttrib2f = _link_function('glVertexAttrib2f', None, [GLuint, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3465
glVertexAttrib2fv = _link_function('glVertexAttrib2fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3466
glVertexAttrib2s = _link_function('glVertexAttrib2s', None, [GLuint, GLshort, GLshort], 'VERSION_2_0')
# GL/glext.h:3467
glVertexAttrib2sv = _link_function('glVertexAttrib2sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3468
glVertexAttrib3d = _link_function('glVertexAttrib3d', None, [GLuint, GLdouble, GLdouble, GLdouble], 'VERSION_2_0')
# GL/glext.h:3469
glVertexAttrib3dv = _link_function('glVertexAttrib3dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3470
glVertexAttrib3f = _link_function('glVertexAttrib3f', None, [GLuint, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3471
glVertexAttrib3fv = _link_function('glVertexAttrib3fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3472
glVertexAttrib3s = _link_function('glVertexAttrib3s', None, [GLuint, GLshort, GLshort, GLshort], 'VERSION_2_0')
# GL/glext.h:3473
glVertexAttrib3sv = _link_function('glVertexAttrib3sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3474
glVertexAttrib4Nbv = _link_function('glVertexAttrib4Nbv', None, [GLuint, POINTER(GLbyte)], 'VERSION_2_0')
# GL/glext.h:3475
glVertexAttrib4Niv = _link_function('glVertexAttrib4Niv', None, [GLuint, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3476
glVertexAttrib4Nsv = _link_function('glVertexAttrib4Nsv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3477
glVertexAttrib4Nub = _link_function('glVertexAttrib4Nub', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte], 'VERSION_2_0')
# GL/glext.h:3478
glVertexAttrib4Nubv = _link_function('glVertexAttrib4Nubv', None, [GLuint, POINTER(GLubyte)], 'VERSION_2_0')
# GL/glext.h:3479
glVertexAttrib4Nuiv = _link_function('glVertexAttrib4Nuiv', None, [GLuint, POINTER(GLuint)], 'VERSION_2_0')
# GL/glext.h:3480
glVertexAttrib4Nusv = _link_function('glVertexAttrib4Nusv', None, [GLuint, POINTER(GLushort)], 'VERSION_2_0')
# GL/glext.h:3481
glVertexAttrib4bv = _link_function('glVertexAttrib4bv', None, [GLuint, POINTER(GLbyte)], 'VERSION_2_0')
# GL/glext.h:3482
glVertexAttrib4d = _link_function('glVertexAttrib4d', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'VERSION_2_0')
# GL/glext.h:3483
glVertexAttrib4dv = _link_function('glVertexAttrib4dv', None, [GLuint, POINTER(GLdouble)], 'VERSION_2_0')
# GL/glext.h:3484
glVertexAttrib4f = _link_function('glVertexAttrib4f', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'VERSION_2_0')
# GL/glext.h:3485
glVertexAttrib4fv = _link_function('glVertexAttrib4fv', None, [GLuint, POINTER(GLfloat)], 'VERSION_2_0')
# GL/glext.h:3486
glVertexAttrib4iv = _link_function('glVertexAttrib4iv', None, [GLuint, POINTER(GLint)], 'VERSION_2_0')
# GL/glext.h:3487
glVertexAttrib4s = _link_function('glVertexAttrib4s', None, [GLuint, GLshort, GLshort, GLshort, GLshort], 'VERSION_2_0')
# GL/glext.h:3488
glVertexAttrib4sv = _link_function('glVertexAttrib4sv', None, [GLuint, POINTER(GLshort)], 'VERSION_2_0')
# GL/glext.h:3489
glVertexAttrib4ubv = _link_function('glVertexAttrib4ubv', None, [GLuint, POINTER(GLubyte)], 'VERSION_2_0')
# GL/glext.h:3490
glVertexAttrib4uiv = _link_function('glVertexAttrib4uiv', None, [GLuint, POINTER(GLuint)], 'VERSION_2_0')
# GL/glext.h:3491
glVertexAttrib4usv = _link_function('glVertexAttrib4usv', None, [GLuint, POINTER(GLushort)], 'VERSION_2_0')
# GL/glext.h:3492
glVertexAttribPointer = _link_function('glVertexAttribPointer', None, [GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)], 'VERSION_2_0')
PFNGLBLENDEQUATIONSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:3494
PFNGLDRAWBUFFERSPROC = CFUNCTYPE(None, GLsizei, POINTER(GLenum)) # GL/glext.h:3495
PFNGLSTENCILOPSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:3496
PFNGLSTENCILFUNCSEPARATEPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLuint) # GL/glext.h:3497
PFNGLSTENCILMASKSEPARATEPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:3498
PFNGLATTACHSHADERPROC = CFUNCTYPE(None, GLuint, GLuint) # GL/glext.h:3499
PFNGLBINDATTRIBLOCATIONPROC = CFUNCTYPE(None, GLuint, GLuint, POINTER(GLchar)) # GL/glext.h:3500
PFNGLCOMPILESHADERPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3501
PFNGLCREATEPROGRAMPROC = CFUNCTYPE(GLuint) # GL/glext.h:3502
PFNGLCREATESHADERPROC = CFUNCTYPE(GLuint, GLenum) # GL/glext.h:3503
PFNGLDELETEPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3504
PFNGLDELETESHADERPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3505
PFNGLDETACHSHADERPROC = CFUNCTYPE(None, GLuint, GLuint) # GL/glext.h:3506
PFNGLDISABLEVERTEXATTRIBARRAYPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3507
PFNGLENABLEVERTEXATTRIBARRAYPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3508
PFNGLGETACTIVEATTRIBPROC = CFUNCTYPE(None, GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)) # GL/glext.h:3509
PFNGLGETACTIVEUNIFORMPROC = CFUNCTYPE(None, GLuint, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLchar)) # GL/glext.h:3510
PFNGLGETATTACHEDSHADERSPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLuint)) # GL/glext.h:3511
PFNGLGETATTRIBLOCATIONPROC = CFUNCTYPE(GLint, GLuint, POINTER(GLchar)) # GL/glext.h:3512
PFNGLGETPROGRAMIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3513
PFNGLGETPROGRAMINFOLOGPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)) # GL/glext.h:3514
PFNGLGETSHADERIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3515
PFNGLGETSHADERINFOLOGPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)) # GL/glext.h:3516
PFNGLGETSHADERSOURCEPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)) # GL/glext.h:3517
PFNGLGETUNIFORMLOCATIONPROC = CFUNCTYPE(GLint, GLuint, POINTER(GLchar)) # GL/glext.h:3518
PFNGLGETUNIFORMFVPROC = CFUNCTYPE(None, GLuint, GLint, POINTER(GLfloat)) # GL/glext.h:3519
PFNGLGETUNIFORMIVPROC = CFUNCTYPE(None, GLuint, GLint, POINTER(GLint)) # GL/glext.h:3520
PFNGLGETVERTEXATTRIBDVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLdouble)) # GL/glext.h:3521
PFNGLGETVERTEXATTRIBFVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:3522
PFNGLGETVERTEXATTRIBIVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3523
PFNGLGETVERTEXATTRIBPOINTERVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:3524
PFNGLISPROGRAMPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3525
PFNGLISSHADERPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3526
PFNGLLINKPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3527
PFNGLSHADERSOURCEPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(POINTER(GLchar)), POINTER(GLint)) # GL/glext.h:3528
PFNGLUSEPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3529
PFNGLUNIFORM1FPROC = CFUNCTYPE(None, GLint, GLfloat) # GL/glext.h:3530
PFNGLUNIFORM2FPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat) # GL/glext.h:3531
PFNGLUNIFORM3FPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat, GLfloat) # GL/glext.h:3532
PFNGLUNIFORM4FPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3533
PFNGLUNIFORM1IPROC = CFUNCTYPE(None, GLint, GLint) # GL/glext.h:3534
PFNGLUNIFORM2IPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:3535
PFNGLUNIFORM3IPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint) # GL/glext.h:3536
PFNGLUNIFORM4IPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint, GLint) # GL/glext.h:3537
PFNGLUNIFORM1FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3538
PFNGLUNIFORM2FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3539
PFNGLUNIFORM3FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3540
PFNGLUNIFORM4FVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:3541
PFNGLUNIFORM1IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3542
PFNGLUNIFORM2IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3543
PFNGLUNIFORM3IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3544
PFNGLUNIFORM4IVPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:3545
PFNGLUNIFORMMATRIX2FVPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:3546
PFNGLUNIFORMMATRIX3FVPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:3547
PFNGLUNIFORMMATRIX4FVPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:3548
PFNGLVALIDATEPROGRAMPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3549
PFNGLVERTEXATTRIB1DPROC = CFUNCTYPE(None, GLuint, GLdouble) # GL/glext.h:3550
PFNGLVERTEXATTRIB1DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3551
PFNGLVERTEXATTRIB1FPROC = CFUNCTYPE(None, GLuint, GLfloat) # GL/glext.h:3552
PFNGLVERTEXATTRIB1FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3553
PFNGLVERTEXATTRIB1SPROC = CFUNCTYPE(None, GLuint, GLshort) # GL/glext.h:3554
PFNGLVERTEXATTRIB1SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3555
PFNGLVERTEXATTRIB2DPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble) # GL/glext.h:3556
PFNGLVERTEXATTRIB2DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3557
PFNGLVERTEXATTRIB2FPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat) # GL/glext.h:3558
PFNGLVERTEXATTRIB2FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3559
PFNGLVERTEXATTRIB2SPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort) # GL/glext.h:3560
PFNGLVERTEXATTRIB2SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3561
PFNGLVERTEXATTRIB3DPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble) # GL/glext.h:3562
PFNGLVERTEXATTRIB3DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3563
PFNGLVERTEXATTRIB3FPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat) # GL/glext.h:3564
PFNGLVERTEXATTRIB3FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3565
PFNGLVERTEXATTRIB3SPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort) # GL/glext.h:3566
PFNGLVERTEXATTRIB3SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3567
PFNGLVERTEXATTRIB4NBVPROC = CFUNCTYPE(None, GLuint, POINTER(GLbyte)) # GL/glext.h:3568
PFNGLVERTEXATTRIB4NIVPROC = CFUNCTYPE(None, GLuint, POINTER(GLint)) # GL/glext.h:3569
PFNGLVERTEXATTRIB4NSVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3570
PFNGLVERTEXATTRIB4NUBPROC = CFUNCTYPE(None, GLuint, GLubyte, GLubyte, GLubyte, GLubyte) # GL/glext.h:3571
PFNGLVERTEXATTRIB4NUBVPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:3572
PFNGLVERTEXATTRIB4NUIVPROC = CFUNCTYPE(None, GLuint, POINTER(GLuint)) # GL/glext.h:3573
PFNGLVERTEXATTRIB4NUSVPROC = CFUNCTYPE(None, GLuint, POINTER(GLushort)) # GL/glext.h:3574
PFNGLVERTEXATTRIB4BVPROC = CFUNCTYPE(None, GLuint, POINTER(GLbyte)) # GL/glext.h:3575
PFNGLVERTEXATTRIB4DPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:3576
PFNGLVERTEXATTRIB4DVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3577
PFNGLVERTEXATTRIB4FPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3578
PFNGLVERTEXATTRIB4FVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3579
PFNGLVERTEXATTRIB4IVPROC = CFUNCTYPE(None, GLuint, POINTER(GLint)) # GL/glext.h:3580
PFNGLVERTEXATTRIB4SPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:3581
PFNGLVERTEXATTRIB4SVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3582
PFNGLVERTEXATTRIB4UBVPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:3583
PFNGLVERTEXATTRIB4UIVPROC = CFUNCTYPE(None, GLuint, POINTER(GLuint)) # GL/glext.h:3584
PFNGLVERTEXATTRIB4USVPROC = CFUNCTYPE(None, GLuint, POINTER(GLushort)) # GL/glext.h:3585
PFNGLVERTEXATTRIBPOINTERPROC = CFUNCTYPE(None, GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)) # GL/glext.h:3586
# ARB_multitexture (GL/glext.h:3589)
GL_ARB_multitexture = 1 # GL/glext.h:3590
# GL/glext.h:3592
glActiveTextureARB = _link_function('glActiveTextureARB', None, [GLenum], 'ARB_multitexture')
# GL/glext.h:3593
glClientActiveTextureARB = _link_function('glClientActiveTextureARB', None, [GLenum], 'ARB_multitexture')
# GL/glext.h:3594
glMultiTexCoord1dARB = _link_function('glMultiTexCoord1dARB', None, [GLenum, GLdouble], 'ARB_multitexture')
# GL/glext.h:3595
glMultiTexCoord1dvARB = _link_function('glMultiTexCoord1dvARB', None, [GLenum, POINTER(GLdouble)], 'ARB_multitexture')
# GL/glext.h:3596
glMultiTexCoord1fARB = _link_function('glMultiTexCoord1fARB', None, [GLenum, GLfloat], 'ARB_multitexture')
# GL/glext.h:3597
glMultiTexCoord1fvARB = _link_function('glMultiTexCoord1fvARB', None, [GLenum, POINTER(GLfloat)], 'ARB_multitexture')
# GL/glext.h:3598
glMultiTexCoord1iARB = _link_function('glMultiTexCoord1iARB', None, [GLenum, GLint], 'ARB_multitexture')
# GL/glext.h:3599
glMultiTexCoord1ivARB = _link_function('glMultiTexCoord1ivARB', None, [GLenum, POINTER(GLint)], 'ARB_multitexture')
# GL/glext.h:3600
glMultiTexCoord1sARB = _link_function('glMultiTexCoord1sARB', None, [GLenum, GLshort], 'ARB_multitexture')
# GL/glext.h:3601
glMultiTexCoord1svARB = _link_function('glMultiTexCoord1svARB', None, [GLenum, POINTER(GLshort)], 'ARB_multitexture')
# GL/glext.h:3602
glMultiTexCoord2dARB = _link_function('glMultiTexCoord2dARB', None, [GLenum, GLdouble, GLdouble], 'ARB_multitexture')
# GL/glext.h:3603
glMultiTexCoord2dvARB = _link_function('glMultiTexCoord2dvARB', None, [GLenum, POINTER(GLdouble)], 'ARB_multitexture')
# GL/glext.h:3604
glMultiTexCoord2fARB = _link_function('glMultiTexCoord2fARB', None, [GLenum, GLfloat, GLfloat], 'ARB_multitexture')
# GL/glext.h:3605
glMultiTexCoord2fvARB = _link_function('glMultiTexCoord2fvARB', None, [GLenum, POINTER(GLfloat)], 'ARB_multitexture')
# GL/glext.h:3606
glMultiTexCoord2iARB = _link_function('glMultiTexCoord2iARB', None, [GLenum, GLint, GLint], 'ARB_multitexture')
# GL/glext.h:3607
glMultiTexCoord2ivARB = _link_function('glMultiTexCoord2ivARB', None, [GLenum, POINTER(GLint)], 'ARB_multitexture')
# GL/glext.h:3608
glMultiTexCoord2sARB = _link_function('glMultiTexCoord2sARB', None, [GLenum, GLshort, GLshort], 'ARB_multitexture')
# GL/glext.h:3609
glMultiTexCoord2svARB = _link_function('glMultiTexCoord2svARB', None, [GLenum, POINTER(GLshort)], 'ARB_multitexture')
# GL/glext.h:3610
glMultiTexCoord3dARB = _link_function('glMultiTexCoord3dARB', None, [GLenum, GLdouble, GLdouble, GLdouble], 'ARB_multitexture')
# GL/glext.h:3611
glMultiTexCoord3dvARB = _link_function('glMultiTexCoord3dvARB', None, [GLenum, POINTER(GLdouble)], 'ARB_multitexture')
# GL/glext.h:3612
glMultiTexCoord3fARB = _link_function('glMultiTexCoord3fARB', None, [GLenum, GLfloat, GLfloat, GLfloat], 'ARB_multitexture')
# GL/glext.h:3613
glMultiTexCoord3fvARB = _link_function('glMultiTexCoord3fvARB', None, [GLenum, POINTER(GLfloat)], 'ARB_multitexture')
# GL/glext.h:3614
glMultiTexCoord3iARB = _link_function('glMultiTexCoord3iARB', None, [GLenum, GLint, GLint, GLint], 'ARB_multitexture')
# GL/glext.h:3615
glMultiTexCoord3ivARB = _link_function('glMultiTexCoord3ivARB', None, [GLenum, POINTER(GLint)], 'ARB_multitexture')
# GL/glext.h:3616
glMultiTexCoord3sARB = _link_function('glMultiTexCoord3sARB', None, [GLenum, GLshort, GLshort, GLshort], 'ARB_multitexture')
# GL/glext.h:3617
glMultiTexCoord3svARB = _link_function('glMultiTexCoord3svARB', None, [GLenum, POINTER(GLshort)], 'ARB_multitexture')
# GL/glext.h:3618
glMultiTexCoord4dARB = _link_function('glMultiTexCoord4dARB', None, [GLenum, GLdouble, GLdouble, GLdouble, GLdouble], 'ARB_multitexture')
# GL/glext.h:3619
glMultiTexCoord4dvARB = _link_function('glMultiTexCoord4dvARB', None, [GLenum, POINTER(GLdouble)], 'ARB_multitexture')
# GL/glext.h:3620
glMultiTexCoord4fARB = _link_function('glMultiTexCoord4fARB', None, [GLenum, GLfloat, GLfloat, GLfloat, GLfloat], 'ARB_multitexture')
# GL/glext.h:3621
glMultiTexCoord4fvARB = _link_function('glMultiTexCoord4fvARB', None, [GLenum, POINTER(GLfloat)], 'ARB_multitexture')
# GL/glext.h:3622
glMultiTexCoord4iARB = _link_function('glMultiTexCoord4iARB', None, [GLenum, GLint, GLint, GLint, GLint], 'ARB_multitexture')
# GL/glext.h:3623
glMultiTexCoord4ivARB = _link_function('glMultiTexCoord4ivARB', None, [GLenum, POINTER(GLint)], 'ARB_multitexture')
# GL/glext.h:3624
glMultiTexCoord4sARB = _link_function('glMultiTexCoord4sARB', None, [GLenum, GLshort, GLshort, GLshort, GLshort], 'ARB_multitexture')
# GL/glext.h:3625
glMultiTexCoord4svARB = _link_function('glMultiTexCoord4svARB', None, [GLenum, POINTER(GLshort)], 'ARB_multitexture')
PFNGLACTIVETEXTUREARBPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3627
PFNGLCLIENTACTIVETEXTUREARBPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:3628
PFNGLMULTITEXCOORD1DARBPROC = CFUNCTYPE(None, GLenum, GLdouble) # GL/glext.h:3629
PFNGLMULTITEXCOORD1DVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3630
PFNGLMULTITEXCOORD1FARBPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:3631
PFNGLMULTITEXCOORD1FVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3632
PFNGLMULTITEXCOORD1IARBPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:3633
PFNGLMULTITEXCOORD1IVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3634
PFNGLMULTITEXCOORD1SARBPROC = CFUNCTYPE(None, GLenum, GLshort) # GL/glext.h:3635
PFNGLMULTITEXCOORD1SVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3636
PFNGLMULTITEXCOORD2DARBPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble) # GL/glext.h:3637
PFNGLMULTITEXCOORD2DVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3638
PFNGLMULTITEXCOORD2FARBPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat) # GL/glext.h:3639
PFNGLMULTITEXCOORD2FVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3640
PFNGLMULTITEXCOORD2IARBPROC = CFUNCTYPE(None, GLenum, GLint, GLint) # GL/glext.h:3641
PFNGLMULTITEXCOORD2IVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3642
PFNGLMULTITEXCOORD2SARBPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort) # GL/glext.h:3643
PFNGLMULTITEXCOORD2SVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3644
PFNGLMULTITEXCOORD3DARBPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble) # GL/glext.h:3645
PFNGLMULTITEXCOORD3DVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3646
PFNGLMULTITEXCOORD3FARBPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat) # GL/glext.h:3647
PFNGLMULTITEXCOORD3FVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3648
PFNGLMULTITEXCOORD3IARBPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint) # GL/glext.h:3649
PFNGLMULTITEXCOORD3IVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3650
PFNGLMULTITEXCOORD3SARBPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort) # GL/glext.h:3651
PFNGLMULTITEXCOORD3SVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3652
PFNGLMULTITEXCOORD4DARBPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:3653
PFNGLMULTITEXCOORD4DVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:3654
PFNGLMULTITEXCOORD4FARBPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3655
PFNGLMULTITEXCOORD4FVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3656
PFNGLMULTITEXCOORD4IARBPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint) # GL/glext.h:3657
PFNGLMULTITEXCOORD4IVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:3658
PFNGLMULTITEXCOORD4SARBPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:3659
PFNGLMULTITEXCOORD4SVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:3660
# ARB_transpose_matrix (GL/glext.h:3663)
GL_ARB_transpose_matrix = 1 # GL/glext.h:3664
# GL/glext.h:3666
glLoadTransposeMatrixfARB = _link_function('glLoadTransposeMatrixfARB', None, [POINTER(GLfloat)], 'ARB_transpose_matrix')
# GL/glext.h:3667
glLoadTransposeMatrixdARB = _link_function('glLoadTransposeMatrixdARB', None, [POINTER(GLdouble)], 'ARB_transpose_matrix')
# GL/glext.h:3668
glMultTransposeMatrixfARB = _link_function('glMultTransposeMatrixfARB', None, [POINTER(GLfloat)], 'ARB_transpose_matrix')
# GL/glext.h:3669
glMultTransposeMatrixdARB = _link_function('glMultTransposeMatrixdARB', None, [POINTER(GLdouble)], 'ARB_transpose_matrix')
PFNGLLOADTRANSPOSEMATRIXFARBPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3671
PFNGLLOADTRANSPOSEMATRIXDARBPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3672
PFNGLMULTTRANSPOSEMATRIXFARBPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3673
PFNGLMULTTRANSPOSEMATRIXDARBPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3674
# ARB_multisample (GL/glext.h:3677)
GL_ARB_multisample = 1 # GL/glext.h:3678
# GL/glext.h:3680
glSampleCoverageARB = _link_function('glSampleCoverageARB', None, [GLclampf, GLboolean], 'ARB_multisample')
PFNGLSAMPLECOVERAGEARBPROC = CFUNCTYPE(None, GLclampf, GLboolean) # GL/glext.h:3682
# ARB_texture_env_add (GL/glext.h:3685)
GL_ARB_texture_env_add = 1 # GL/glext.h:3686
# ARB_texture_cube_map (GL/glext.h:3689)
GL_ARB_texture_cube_map = 1 # GL/glext.h:3690
# ARB_texture_compression (GL/glext.h:3693)
GL_ARB_texture_compression = 1 # GL/glext.h:3694
# GL/glext.h:3696
glCompressedTexImage3DARB = _link_function('glCompressedTexImage3DARB', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], 'ARB_texture_compression')
# GL/glext.h:3697
glCompressedTexImage2DARB = _link_function('glCompressedTexImage2DARB', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], 'ARB_texture_compression')
# GL/glext.h:3698
glCompressedTexImage1DARB = _link_function('glCompressedTexImage1DARB', None, [GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)], 'ARB_texture_compression')
# GL/glext.h:3699
glCompressedTexSubImage3DARB = _link_function('glCompressedTexSubImage3DARB', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'ARB_texture_compression')
# GL/glext.h:3700
glCompressedTexSubImage2DARB = _link_function('glCompressedTexSubImage2DARB', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'ARB_texture_compression')
# GL/glext.h:3701
glCompressedTexSubImage1DARB = _link_function('glCompressedTexSubImage1DARB', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'ARB_texture_compression')
# GL/glext.h:3702
glGetCompressedTexImageARB = _link_function('glGetCompressedTexImageARB', None, [GLenum, GLint, POINTER(GLvoid)], 'ARB_texture_compression')
PFNGLCOMPRESSEDTEXIMAGE3DARBPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)) # GL/glext.h:3704
PFNGLCOMPRESSEDTEXIMAGE2DARBPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)) # GL/glext.h:3705
PFNGLCOMPRESSEDTEXIMAGE1DARBPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)) # GL/glext.h:3706
PFNGLCOMPRESSEDTEXSUBIMAGE3DARBPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3707
PFNGLCOMPRESSEDTEXSUBIMAGE2DARBPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3708
PFNGLCOMPRESSEDTEXSUBIMAGE1DARBPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3709
PFNGLGETCOMPRESSEDTEXIMAGEARBPROC = CFUNCTYPE(None, GLenum, GLint, POINTER(GLvoid)) # GL/glext.h:3710
# ARB_texture_border_clamp (GL/glext.h:3713)
GL_ARB_texture_border_clamp = 1 # GL/glext.h:3714
# ARB_point_parameters (GL/glext.h:3717)
GL_ARB_point_parameters = 1 # GL/glext.h:3718
# GL/glext.h:3720
glPointParameterfARB = _link_function('glPointParameterfARB', None, [GLenum, GLfloat], 'ARB_point_parameters')
# GL/glext.h:3721
glPointParameterfvARB = _link_function('glPointParameterfvARB', None, [GLenum, POINTER(GLfloat)], 'ARB_point_parameters')
PFNGLPOINTPARAMETERFARBPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:3723
PFNGLPOINTPARAMETERFVARBPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:3724
# ARB_vertex_blend (GL/glext.h:3727)
GL_ARB_vertex_blend = 1 # GL/glext.h:3728
# GL/glext.h:3730
glWeightbvARB = _link_function('glWeightbvARB', None, [GLint, POINTER(GLbyte)], 'ARB_vertex_blend')
# GL/glext.h:3731
glWeightsvARB = _link_function('glWeightsvARB', None, [GLint, POINTER(GLshort)], 'ARB_vertex_blend')
# GL/glext.h:3732
glWeightivARB = _link_function('glWeightivARB', None, [GLint, POINTER(GLint)], 'ARB_vertex_blend')
# GL/glext.h:3733
glWeightfvARB = _link_function('glWeightfvARB', None, [GLint, POINTER(GLfloat)], 'ARB_vertex_blend')
# GL/glext.h:3734
glWeightdvARB = _link_function('glWeightdvARB', None, [GLint, POINTER(GLdouble)], 'ARB_vertex_blend')
# GL/glext.h:3735
glWeightubvARB = _link_function('glWeightubvARB', None, [GLint, POINTER(GLubyte)], 'ARB_vertex_blend')
# GL/glext.h:3736
glWeightusvARB = _link_function('glWeightusvARB', None, [GLint, POINTER(GLushort)], 'ARB_vertex_blend')
# GL/glext.h:3737
glWeightuivARB = _link_function('glWeightuivARB', None, [GLint, POINTER(GLuint)], 'ARB_vertex_blend')
# GL/glext.h:3738
glWeightPointerARB = _link_function('glWeightPointerARB', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], 'ARB_vertex_blend')
# GL/glext.h:3739
glVertexBlendARB = _link_function('glVertexBlendARB', None, [GLint], 'ARB_vertex_blend')
PFNGLWEIGHTBVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLbyte)) # GL/glext.h:3741
PFNGLWEIGHTSVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLshort)) # GL/glext.h:3742
PFNGLWEIGHTIVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLint)) # GL/glext.h:3743
PFNGLWEIGHTFVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLfloat)) # GL/glext.h:3744
PFNGLWEIGHTDVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLdouble)) # GL/glext.h:3745
PFNGLWEIGHTUBVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLubyte)) # GL/glext.h:3746
PFNGLWEIGHTUSVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLushort)) # GL/glext.h:3747
PFNGLWEIGHTUIVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLuint)) # GL/glext.h:3748
PFNGLWEIGHTPOINTERARBPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3749
PFNGLVERTEXBLENDARBPROC = CFUNCTYPE(None, GLint) # GL/glext.h:3750
# ARB_matrix_palette (GL/glext.h:3753)
GL_ARB_matrix_palette = 1 # GL/glext.h:3754
# GL/glext.h:3756
glCurrentPaletteMatrixARB = _link_function('glCurrentPaletteMatrixARB', None, [GLint], 'ARB_matrix_palette')
# GL/glext.h:3757
glMatrixIndexubvARB = _link_function('glMatrixIndexubvARB', None, [GLint, POINTER(GLubyte)], 'ARB_matrix_palette')
# GL/glext.h:3758
glMatrixIndexusvARB = _link_function('glMatrixIndexusvARB', None, [GLint, POINTER(GLushort)], 'ARB_matrix_palette')
# GL/glext.h:3759
glMatrixIndexuivARB = _link_function('glMatrixIndexuivARB', None, [GLint, POINTER(GLuint)], 'ARB_matrix_palette')
# GL/glext.h:3760
glMatrixIndexPointerARB = _link_function('glMatrixIndexPointerARB', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], 'ARB_matrix_palette')
PFNGLCURRENTPALETTEMATRIXARBPROC = CFUNCTYPE(None, GLint) # GL/glext.h:3762
PFNGLMATRIXINDEXUBVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLubyte)) # GL/glext.h:3763
PFNGLMATRIXINDEXUSVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLushort)) # GL/glext.h:3764
PFNGLMATRIXINDEXUIVARBPROC = CFUNCTYPE(None, GLint, POINTER(GLuint)) # GL/glext.h:3765
PFNGLMATRIXINDEXPOINTERARBPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3766
# ARB_texture_env_combine (GL/glext.h:3769)
GL_ARB_texture_env_combine = 1 # GL/glext.h:3770
# ARB_texture_env_crossbar (GL/glext.h:3773)
GL_ARB_texture_env_crossbar = 1 # GL/glext.h:3774
# ARB_texture_env_dot3 (GL/glext.h:3777)
GL_ARB_texture_env_dot3 = 1 # GL/glext.h:3778
# ARB_texture_mirrored_repeat (GL/glext.h:3781)
GL_ARB_texture_mirrored_repeat = 1 # GL/glext.h:3782
# ARB_depth_texture (GL/glext.h:3785)
GL_ARB_depth_texture = 1 # GL/glext.h:3786
# ARB_shadow (GL/glext.h:3789)
GL_ARB_shadow = 1 # GL/glext.h:3790
# ARB_shadow_ambient (GL/glext.h:3793)
GL_ARB_shadow_ambient = 1 # GL/glext.h:3794
# ARB_window_pos (GL/glext.h:3797)
GL_ARB_window_pos = 1 # GL/glext.h:3798
# GL/glext.h:3800
glWindowPos2dARB = _link_function('glWindowPos2dARB', None, [GLdouble, GLdouble], 'ARB_window_pos')
# GL/glext.h:3801
glWindowPos2dvARB = _link_function('glWindowPos2dvARB', None, [POINTER(GLdouble)], 'ARB_window_pos')
# GL/glext.h:3802
glWindowPos2fARB = _link_function('glWindowPos2fARB', None, [GLfloat, GLfloat], 'ARB_window_pos')
# GL/glext.h:3803
glWindowPos2fvARB = _link_function('glWindowPos2fvARB', None, [POINTER(GLfloat)], 'ARB_window_pos')
# GL/glext.h:3804
glWindowPos2iARB = _link_function('glWindowPos2iARB', None, [GLint, GLint], 'ARB_window_pos')
# GL/glext.h:3805
glWindowPos2ivARB = _link_function('glWindowPos2ivARB', None, [POINTER(GLint)], 'ARB_window_pos')
# GL/glext.h:3806
glWindowPos2sARB = _link_function('glWindowPos2sARB', None, [GLshort, GLshort], 'ARB_window_pos')
# GL/glext.h:3807
glWindowPos2svARB = _link_function('glWindowPos2svARB', None, [POINTER(GLshort)], 'ARB_window_pos')
# GL/glext.h:3808
glWindowPos3dARB = _link_function('glWindowPos3dARB', None, [GLdouble, GLdouble, GLdouble], 'ARB_window_pos')
# GL/glext.h:3809
glWindowPos3dvARB = _link_function('glWindowPos3dvARB', None, [POINTER(GLdouble)], 'ARB_window_pos')
# GL/glext.h:3810
glWindowPos3fARB = _link_function('glWindowPos3fARB', None, [GLfloat, GLfloat, GLfloat], 'ARB_window_pos')
# GL/glext.h:3811
glWindowPos3fvARB = _link_function('glWindowPos3fvARB', None, [POINTER(GLfloat)], 'ARB_window_pos')
# GL/glext.h:3812
glWindowPos3iARB = _link_function('glWindowPos3iARB', None, [GLint, GLint, GLint], 'ARB_window_pos')
# GL/glext.h:3813
glWindowPos3ivARB = _link_function('glWindowPos3ivARB', None, [POINTER(GLint)], 'ARB_window_pos')
# GL/glext.h:3814
glWindowPos3sARB = _link_function('glWindowPos3sARB', None, [GLshort, GLshort, GLshort], 'ARB_window_pos')
# GL/glext.h:3815
glWindowPos3svARB = _link_function('glWindowPos3svARB', None, [POINTER(GLshort)], 'ARB_window_pos')
PFNGLWINDOWPOS2DARBPROC = CFUNCTYPE(None, GLdouble, GLdouble) # GL/glext.h:3817
PFNGLWINDOWPOS2DVARBPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3818
PFNGLWINDOWPOS2FARBPROC = CFUNCTYPE(None, GLfloat, GLfloat) # GL/glext.h:3819
PFNGLWINDOWPOS2FVARBPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3820
PFNGLWINDOWPOS2IARBPROC = CFUNCTYPE(None, GLint, GLint) # GL/glext.h:3821
PFNGLWINDOWPOS2IVARBPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:3822
PFNGLWINDOWPOS2SARBPROC = CFUNCTYPE(None, GLshort, GLshort) # GL/glext.h:3823
PFNGLWINDOWPOS2SVARBPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:3824
PFNGLWINDOWPOS3DARBPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:3825
PFNGLWINDOWPOS3DVARBPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:3826
PFNGLWINDOWPOS3FARBPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:3827
PFNGLWINDOWPOS3FVARBPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:3828
PFNGLWINDOWPOS3IARBPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:3829
PFNGLWINDOWPOS3IVARBPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:3830
PFNGLWINDOWPOS3SARBPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:3831
PFNGLWINDOWPOS3SVARBPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:3832
# ARB_vertex_program (GL/glext.h:3835)
GL_ARB_vertex_program = 1 # GL/glext.h:3836
# GL/glext.h:3838
glVertexAttrib1dARB = _link_function('glVertexAttrib1dARB', None, [GLuint, GLdouble], 'ARB_vertex_program')
# GL/glext.h:3839
glVertexAttrib1dvARB = _link_function('glVertexAttrib1dvARB', None, [GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3840
glVertexAttrib1fARB = _link_function('glVertexAttrib1fARB', None, [GLuint, GLfloat], 'ARB_vertex_program')
# GL/glext.h:3841
glVertexAttrib1fvARB = _link_function('glVertexAttrib1fvARB', None, [GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3842
glVertexAttrib1sARB = _link_function('glVertexAttrib1sARB', None, [GLuint, GLshort], 'ARB_vertex_program')
# GL/glext.h:3843
glVertexAttrib1svARB = _link_function('glVertexAttrib1svARB', None, [GLuint, POINTER(GLshort)], 'ARB_vertex_program')
# GL/glext.h:3844
glVertexAttrib2dARB = _link_function('glVertexAttrib2dARB', None, [GLuint, GLdouble, GLdouble], 'ARB_vertex_program')
# GL/glext.h:3845
glVertexAttrib2dvARB = _link_function('glVertexAttrib2dvARB', None, [GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3846
glVertexAttrib2fARB = _link_function('glVertexAttrib2fARB', None, [GLuint, GLfloat, GLfloat], 'ARB_vertex_program')
# GL/glext.h:3847
glVertexAttrib2fvARB = _link_function('glVertexAttrib2fvARB', None, [GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3848
glVertexAttrib2sARB = _link_function('glVertexAttrib2sARB', None, [GLuint, GLshort, GLshort], 'ARB_vertex_program')
# GL/glext.h:3849
glVertexAttrib2svARB = _link_function('glVertexAttrib2svARB', None, [GLuint, POINTER(GLshort)], 'ARB_vertex_program')
# GL/glext.h:3850
glVertexAttrib3dARB = _link_function('glVertexAttrib3dARB', None, [GLuint, GLdouble, GLdouble, GLdouble], 'ARB_vertex_program')
# GL/glext.h:3851
glVertexAttrib3dvARB = _link_function('glVertexAttrib3dvARB', None, [GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3852
glVertexAttrib3fARB = _link_function('glVertexAttrib3fARB', None, [GLuint, GLfloat, GLfloat, GLfloat], 'ARB_vertex_program')
# GL/glext.h:3853
glVertexAttrib3fvARB = _link_function('glVertexAttrib3fvARB', None, [GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3854
glVertexAttrib3sARB = _link_function('glVertexAttrib3sARB', None, [GLuint, GLshort, GLshort, GLshort], 'ARB_vertex_program')
# GL/glext.h:3855
glVertexAttrib3svARB = _link_function('glVertexAttrib3svARB', None, [GLuint, POINTER(GLshort)], 'ARB_vertex_program')
# GL/glext.h:3856
glVertexAttrib4NbvARB = _link_function('glVertexAttrib4NbvARB', None, [GLuint, POINTER(GLbyte)], 'ARB_vertex_program')
# GL/glext.h:3857
glVertexAttrib4NivARB = _link_function('glVertexAttrib4NivARB', None, [GLuint, POINTER(GLint)], 'ARB_vertex_program')
# GL/glext.h:3858
glVertexAttrib4NsvARB = _link_function('glVertexAttrib4NsvARB', None, [GLuint, POINTER(GLshort)], 'ARB_vertex_program')
# GL/glext.h:3859
glVertexAttrib4NubARB = _link_function('glVertexAttrib4NubARB', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte], 'ARB_vertex_program')
# GL/glext.h:3860
glVertexAttrib4NubvARB = _link_function('glVertexAttrib4NubvARB', None, [GLuint, POINTER(GLubyte)], 'ARB_vertex_program')
# GL/glext.h:3861
glVertexAttrib4NuivARB = _link_function('glVertexAttrib4NuivARB', None, [GLuint, POINTER(GLuint)], 'ARB_vertex_program')
# GL/glext.h:3862
glVertexAttrib4NusvARB = _link_function('glVertexAttrib4NusvARB', None, [GLuint, POINTER(GLushort)], 'ARB_vertex_program')
# GL/glext.h:3863
glVertexAttrib4bvARB = _link_function('glVertexAttrib4bvARB', None, [GLuint, POINTER(GLbyte)], 'ARB_vertex_program')
# GL/glext.h:3864
glVertexAttrib4dARB = _link_function('glVertexAttrib4dARB', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'ARB_vertex_program')
# GL/glext.h:3865
glVertexAttrib4dvARB = _link_function('glVertexAttrib4dvARB', None, [GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3866
glVertexAttrib4fARB = _link_function('glVertexAttrib4fARB', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'ARB_vertex_program')
# GL/glext.h:3867
glVertexAttrib4fvARB = _link_function('glVertexAttrib4fvARB', None, [GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3868
glVertexAttrib4ivARB = _link_function('glVertexAttrib4ivARB', None, [GLuint, POINTER(GLint)], 'ARB_vertex_program')
# GL/glext.h:3869
glVertexAttrib4sARB = _link_function('glVertexAttrib4sARB', None, [GLuint, GLshort, GLshort, GLshort, GLshort], 'ARB_vertex_program')
# GL/glext.h:3870
glVertexAttrib4svARB = _link_function('glVertexAttrib4svARB', None, [GLuint, POINTER(GLshort)], 'ARB_vertex_program')
# GL/glext.h:3871
glVertexAttrib4ubvARB = _link_function('glVertexAttrib4ubvARB', None, [GLuint, POINTER(GLubyte)], 'ARB_vertex_program')
# GL/glext.h:3872
glVertexAttrib4uivARB = _link_function('glVertexAttrib4uivARB', None, [GLuint, POINTER(GLuint)], 'ARB_vertex_program')
# GL/glext.h:3873
glVertexAttrib4usvARB = _link_function('glVertexAttrib4usvARB', None, [GLuint, POINTER(GLushort)], 'ARB_vertex_program')
# GL/glext.h:3874
glVertexAttribPointerARB = _link_function('glVertexAttribPointerARB', None, [GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)], 'ARB_vertex_program')
# GL/glext.h:3875
glEnableVertexAttribArrayARB = _link_function('glEnableVertexAttribArrayARB', None, [GLuint], 'ARB_vertex_program')
# GL/glext.h:3876
glDisableVertexAttribArrayARB = _link_function('glDisableVertexAttribArrayARB', None, [GLuint], 'ARB_vertex_program')
# GL/glext.h:3877
glProgramStringARB = _link_function('glProgramStringARB', None, [GLenum, GLenum, GLsizei, POINTER(GLvoid)], 'ARB_vertex_program')
# GL/glext.h:3878
glBindProgramARB = _link_function('glBindProgramARB', None, [GLenum, GLuint], 'ARB_vertex_program')
# GL/glext.h:3879
glDeleteProgramsARB = _link_function('glDeleteProgramsARB', None, [GLsizei, POINTER(GLuint)], 'ARB_vertex_program')
# GL/glext.h:3880
glGenProgramsARB = _link_function('glGenProgramsARB', None, [GLsizei, POINTER(GLuint)], 'ARB_vertex_program')
# GL/glext.h:3881
glProgramEnvParameter4dARB = _link_function('glProgramEnvParameter4dARB', None, [GLenum, GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'ARB_vertex_program')
# GL/glext.h:3882
glProgramEnvParameter4dvARB = _link_function('glProgramEnvParameter4dvARB', None, [GLenum, GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3883
glProgramEnvParameter4fARB = _link_function('glProgramEnvParameter4fARB', None, [GLenum, GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'ARB_vertex_program')
# GL/glext.h:3884
glProgramEnvParameter4fvARB = _link_function('glProgramEnvParameter4fvARB', None, [GLenum, GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3885
glProgramLocalParameter4dARB = _link_function('glProgramLocalParameter4dARB', None, [GLenum, GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'ARB_vertex_program')
# GL/glext.h:3886
glProgramLocalParameter4dvARB = _link_function('glProgramLocalParameter4dvARB', None, [GLenum, GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3887
glProgramLocalParameter4fARB = _link_function('glProgramLocalParameter4fARB', None, [GLenum, GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'ARB_vertex_program')
# GL/glext.h:3888
glProgramLocalParameter4fvARB = _link_function('glProgramLocalParameter4fvARB', None, [GLenum, GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3889
glGetProgramEnvParameterdvARB = _link_function('glGetProgramEnvParameterdvARB', None, [GLenum, GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3890
glGetProgramEnvParameterfvARB = _link_function('glGetProgramEnvParameterfvARB', None, [GLenum, GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3891
glGetProgramLocalParameterdvARB = _link_function('glGetProgramLocalParameterdvARB', None, [GLenum, GLuint, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3892
glGetProgramLocalParameterfvARB = _link_function('glGetProgramLocalParameterfvARB', None, [GLenum, GLuint, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3893
glGetProgramivARB = _link_function('glGetProgramivARB', None, [GLenum, GLenum, POINTER(GLint)], 'ARB_vertex_program')
# GL/glext.h:3894
glGetProgramStringARB = _link_function('glGetProgramStringARB', None, [GLenum, GLenum, POINTER(GLvoid)], 'ARB_vertex_program')
# GL/glext.h:3895
glGetVertexAttribdvARB = _link_function('glGetVertexAttribdvARB', None, [GLuint, GLenum, POINTER(GLdouble)], 'ARB_vertex_program')
# GL/glext.h:3896
glGetVertexAttribfvARB = _link_function('glGetVertexAttribfvARB', None, [GLuint, GLenum, POINTER(GLfloat)], 'ARB_vertex_program')
# GL/glext.h:3897
glGetVertexAttribivARB = _link_function('glGetVertexAttribivARB', None, [GLuint, GLenum, POINTER(GLint)], 'ARB_vertex_program')
# GL/glext.h:3898
glGetVertexAttribPointervARB = _link_function('glGetVertexAttribPointervARB', None, [GLuint, GLenum, POINTER(POINTER(GLvoid))], 'ARB_vertex_program')
# GL/glext.h:3899
glIsProgramARB = _link_function('glIsProgramARB', GLboolean, [GLuint], 'ARB_vertex_program')
PFNGLVERTEXATTRIB1DARBPROC = CFUNCTYPE(None, GLuint, GLdouble) # GL/glext.h:3901
PFNGLVERTEXATTRIB1DVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3902
PFNGLVERTEXATTRIB1FARBPROC = CFUNCTYPE(None, GLuint, GLfloat) # GL/glext.h:3903
PFNGLVERTEXATTRIB1FVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3904
PFNGLVERTEXATTRIB1SARBPROC = CFUNCTYPE(None, GLuint, GLshort) # GL/glext.h:3905
PFNGLVERTEXATTRIB1SVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3906
PFNGLVERTEXATTRIB2DARBPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble) # GL/glext.h:3907
PFNGLVERTEXATTRIB2DVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3908
PFNGLVERTEXATTRIB2FARBPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat) # GL/glext.h:3909
PFNGLVERTEXATTRIB2FVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3910
PFNGLVERTEXATTRIB2SARBPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort) # GL/glext.h:3911
PFNGLVERTEXATTRIB2SVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3912
PFNGLVERTEXATTRIB3DARBPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble) # GL/glext.h:3913
PFNGLVERTEXATTRIB3DVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3914
PFNGLVERTEXATTRIB3FARBPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat) # GL/glext.h:3915
PFNGLVERTEXATTRIB3FVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3916
PFNGLVERTEXATTRIB3SARBPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort) # GL/glext.h:3917
PFNGLVERTEXATTRIB3SVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3918
PFNGLVERTEXATTRIB4NBVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLbyte)) # GL/glext.h:3919
PFNGLVERTEXATTRIB4NIVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLint)) # GL/glext.h:3920
PFNGLVERTEXATTRIB4NSVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3921
PFNGLVERTEXATTRIB4NUBARBPROC = CFUNCTYPE(None, GLuint, GLubyte, GLubyte, GLubyte, GLubyte) # GL/glext.h:3922
PFNGLVERTEXATTRIB4NUBVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:3923
PFNGLVERTEXATTRIB4NUIVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLuint)) # GL/glext.h:3924
PFNGLVERTEXATTRIB4NUSVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLushort)) # GL/glext.h:3925
PFNGLVERTEXATTRIB4BVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLbyte)) # GL/glext.h:3926
PFNGLVERTEXATTRIB4DARBPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:3927
PFNGLVERTEXATTRIB4DVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:3928
PFNGLVERTEXATTRIB4FARBPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3929
PFNGLVERTEXATTRIB4FVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:3930
PFNGLVERTEXATTRIB4IVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLint)) # GL/glext.h:3931
PFNGLVERTEXATTRIB4SARBPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:3932
PFNGLVERTEXATTRIB4SVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:3933
PFNGLVERTEXATTRIB4UBVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:3934
PFNGLVERTEXATTRIB4UIVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLuint)) # GL/glext.h:3935
PFNGLVERTEXATTRIB4USVARBPROC = CFUNCTYPE(None, GLuint, POINTER(GLushort)) # GL/glext.h:3936
PFNGLVERTEXATTRIBPOINTERARBPROC = CFUNCTYPE(None, GLuint, GLint, GLenum, GLboolean, GLsizei, POINTER(GLvoid)) # GL/glext.h:3937
PFNGLENABLEVERTEXATTRIBARRAYARBPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3938
PFNGLDISABLEVERTEXATTRIBARRAYARBPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:3939
PFNGLPROGRAMSTRINGARBPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:3940
PFNGLBINDPROGRAMARBPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:3941
PFNGLDELETEPROGRAMSARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3942
PFNGLGENPROGRAMSARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3943
PFNGLPROGRAMENVPARAMETER4DARBPROC = CFUNCTYPE(None, GLenum, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:3944
PFNGLPROGRAMENVPARAMETER4DVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLdouble)) # GL/glext.h:3945
PFNGLPROGRAMENVPARAMETER4FARBPROC = CFUNCTYPE(None, GLenum, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3946
PFNGLPROGRAMENVPARAMETER4FVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:3947
PFNGLPROGRAMLOCALPARAMETER4DARBPROC = CFUNCTYPE(None, GLenum, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:3948
PFNGLPROGRAMLOCALPARAMETER4DVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLdouble)) # GL/glext.h:3949
PFNGLPROGRAMLOCALPARAMETER4FARBPROC = CFUNCTYPE(None, GLenum, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:3950
PFNGLPROGRAMLOCALPARAMETER4FVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:3951
PFNGLGETPROGRAMENVPARAMETERDVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLdouble)) # GL/glext.h:3952
PFNGLGETPROGRAMENVPARAMETERFVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:3953
PFNGLGETPROGRAMLOCALPARAMETERDVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLdouble)) # GL/glext.h:3954
PFNGLGETPROGRAMLOCALPARAMETERFVARBPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:3955
PFNGLGETPROGRAMIVARBPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3956
PFNGLGETPROGRAMSTRINGARBPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:3957
PFNGLGETVERTEXATTRIBDVARBPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLdouble)) # GL/glext.h:3958
PFNGLGETVERTEXATTRIBFVARBPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:3959
PFNGLGETVERTEXATTRIBIVARBPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:3960
PFNGLGETVERTEXATTRIBPOINTERVARBPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:3961
PFNGLISPROGRAMARBPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3962
# ARB_fragment_program (GL/glext.h:3965)
GL_ARB_fragment_program = 1 # GL/glext.h:3966
# ARB_vertex_buffer_object (GL/glext.h:3970)
GL_ARB_vertex_buffer_object = 1 # GL/glext.h:3971
# GL/glext.h:3973
glBindBufferARB = _link_function('glBindBufferARB', None, [GLenum, GLuint], 'ARB_vertex_buffer_object')
# GL/glext.h:3974
glDeleteBuffersARB = _link_function('glDeleteBuffersARB', None, [GLsizei, POINTER(GLuint)], 'ARB_vertex_buffer_object')
# GL/glext.h:3975
glGenBuffersARB = _link_function('glGenBuffersARB', None, [GLsizei, POINTER(GLuint)], 'ARB_vertex_buffer_object')
# GL/glext.h:3976
glIsBufferARB = _link_function('glIsBufferARB', GLboolean, [GLuint], 'ARB_vertex_buffer_object')
# GL/glext.h:3977
glBufferDataARB = _link_function('glBufferDataARB', None, [GLenum, GLsizeiptrARB, POINTER(GLvoid), GLenum], 'ARB_vertex_buffer_object')
# GL/glext.h:3978
glBufferSubDataARB = _link_function('glBufferSubDataARB', None, [GLenum, GLintptrARB, GLsizeiptrARB, POINTER(GLvoid)], 'ARB_vertex_buffer_object')
# GL/glext.h:3979
glGetBufferSubDataARB = _link_function('glGetBufferSubDataARB', None, [GLenum, GLintptrARB, GLsizeiptrARB, POINTER(GLvoid)], 'ARB_vertex_buffer_object')
# GL/glext.h:3980
glMapBufferARB = _link_function('glMapBufferARB', POINTER(GLvoid), [GLenum, GLenum], 'ARB_vertex_buffer_object')
# GL/glext.h:3981
glUnmapBufferARB = _link_function('glUnmapBufferARB', GLboolean, [GLenum], 'ARB_vertex_buffer_object')
# GL/glext.h:3982
glGetBufferParameterivARB = _link_function('glGetBufferParameterivARB', None, [GLenum, GLenum, POINTER(GLint)], 'ARB_vertex_buffer_object')
# GL/glext.h:3983
glGetBufferPointervARB = _link_function('glGetBufferPointervARB', None, [GLenum, GLenum, POINTER(POINTER(GLvoid))], 'ARB_vertex_buffer_object')
PFNGLBINDBUFFERARBPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:3985
PFNGLDELETEBUFFERSARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3986
PFNGLGENBUFFERSARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:3987
PFNGLISBUFFERARBPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:3988
PFNGLBUFFERDATAARBPROC = CFUNCTYPE(None, GLenum, GLsizeiptrARB, POINTER(GLvoid), GLenum) # GL/glext.h:3989
PFNGLBUFFERSUBDATAARBPROC = CFUNCTYPE(None, GLenum, GLintptrARB, GLsizeiptrARB, POINTER(GLvoid)) # GL/glext.h:3990
PFNGLGETBUFFERSUBDATAARBPROC = CFUNCTYPE(None, GLenum, GLintptrARB, GLsizeiptrARB, POINTER(GLvoid)) # GL/glext.h:3991
PFNGLMAPBUFFERARBPROC = CFUNCTYPE(POINTER(GLvoid), GLenum, GLenum) # GL/glext.h:3992
PFNGLUNMAPBUFFERARBPROC = CFUNCTYPE(GLboolean, GLenum) # GL/glext.h:3993
PFNGLGETBUFFERPARAMETERIVARBPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:3994
PFNGLGETBUFFERPOINTERVARBPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:3995
# ARB_occlusion_query (GL/glext.h:3998)
GL_ARB_occlusion_query = 1 # GL/glext.h:3999
# GL/glext.h:4001
glGenQueriesARB = _link_function('glGenQueriesARB', None, [GLsizei, POINTER(GLuint)], 'ARB_occlusion_query')
# GL/glext.h:4002
glDeleteQueriesARB = _link_function('glDeleteQueriesARB', None, [GLsizei, POINTER(GLuint)], 'ARB_occlusion_query')
# GL/glext.h:4003
glIsQueryARB = _link_function('glIsQueryARB', GLboolean, [GLuint], 'ARB_occlusion_query')
# GL/glext.h:4004
glBeginQueryARB = _link_function('glBeginQueryARB', None, [GLenum, GLuint], 'ARB_occlusion_query')
# GL/glext.h:4005
glEndQueryARB = _link_function('glEndQueryARB', None, [GLenum], 'ARB_occlusion_query')
# GL/glext.h:4006
glGetQueryivARB = _link_function('glGetQueryivARB', None, [GLenum, GLenum, POINTER(GLint)], 'ARB_occlusion_query')
# GL/glext.h:4007
glGetQueryObjectivARB = _link_function('glGetQueryObjectivARB', None, [GLuint, GLenum, POINTER(GLint)], 'ARB_occlusion_query')
# GL/glext.h:4008
glGetQueryObjectuivARB = _link_function('glGetQueryObjectuivARB', None, [GLuint, GLenum, POINTER(GLuint)], 'ARB_occlusion_query')
PFNGLGENQUERIESARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:4010
PFNGLDELETEQUERIESARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:4011
PFNGLISQUERYARBPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:4012
PFNGLBEGINQUERYARBPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:4013
PFNGLENDQUERYARBPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4014
PFNGLGETQUERYIVARBPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4015
PFNGLGETQUERYOBJECTIVARBPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:4016
PFNGLGETQUERYOBJECTUIVARBPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLuint)) # GL/glext.h:4017
# ARB_shader_objects (GL/glext.h:4020)
GL_ARB_shader_objects = 1 # GL/glext.h:4021
# GL/glext.h:4023
glDeleteObjectARB = _link_function('glDeleteObjectARB', None, [GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4024
glGetHandleARB = _link_function('glGetHandleARB', GLhandleARB, [GLenum], 'ARB_shader_objects')
# GL/glext.h:4025
glDetachObjectARB = _link_function('glDetachObjectARB', None, [GLhandleARB, GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4026
glCreateShaderObjectARB = _link_function('glCreateShaderObjectARB', GLhandleARB, [GLenum], 'ARB_shader_objects')
# GL/glext.h:4027
glShaderSourceARB = _link_function('glShaderSourceARB', None, [GLhandleARB, GLsizei, POINTER(POINTER(GLcharARB)), POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4028
glCompileShaderARB = _link_function('glCompileShaderARB', None, [GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4029
glCreateProgramObjectARB = _link_function('glCreateProgramObjectARB', GLhandleARB, [], 'ARB_shader_objects')
# GL/glext.h:4030
glAttachObjectARB = _link_function('glAttachObjectARB', None, [GLhandleARB, GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4031
glLinkProgramARB = _link_function('glLinkProgramARB', None, [GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4032
glUseProgramObjectARB = _link_function('glUseProgramObjectARB', None, [GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4033
glValidateProgramARB = _link_function('glValidateProgramARB', None, [GLhandleARB], 'ARB_shader_objects')
# GL/glext.h:4034
glUniform1fARB = _link_function('glUniform1fARB', None, [GLint, GLfloat], 'ARB_shader_objects')
# GL/glext.h:4035
glUniform2fARB = _link_function('glUniform2fARB', None, [GLint, GLfloat, GLfloat], 'ARB_shader_objects')
# GL/glext.h:4036
glUniform3fARB = _link_function('glUniform3fARB', None, [GLint, GLfloat, GLfloat, GLfloat], 'ARB_shader_objects')
# GL/glext.h:4037
glUniform4fARB = _link_function('glUniform4fARB', None, [GLint, GLfloat, GLfloat, GLfloat, GLfloat], 'ARB_shader_objects')
# GL/glext.h:4038
glUniform1iARB = _link_function('glUniform1iARB', None, [GLint, GLint], 'ARB_shader_objects')
# GL/glext.h:4039
glUniform2iARB = _link_function('glUniform2iARB', None, [GLint, GLint, GLint], 'ARB_shader_objects')
# GL/glext.h:4040
glUniform3iARB = _link_function('glUniform3iARB', None, [GLint, GLint, GLint, GLint], 'ARB_shader_objects')
# GL/glext.h:4041
glUniform4iARB = _link_function('glUniform4iARB', None, [GLint, GLint, GLint, GLint, GLint], 'ARB_shader_objects')
# GL/glext.h:4042
glUniform1fvARB = _link_function('glUniform1fvARB', None, [GLint, GLsizei, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4043
glUniform2fvARB = _link_function('glUniform2fvARB', None, [GLint, GLsizei, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4044
glUniform3fvARB = _link_function('glUniform3fvARB', None, [GLint, GLsizei, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4045
glUniform4fvARB = _link_function('glUniform4fvARB', None, [GLint, GLsizei, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4046
glUniform1ivARB = _link_function('glUniform1ivARB', None, [GLint, GLsizei, POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4047
glUniform2ivARB = _link_function('glUniform2ivARB', None, [GLint, GLsizei, POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4048
glUniform3ivARB = _link_function('glUniform3ivARB', None, [GLint, GLsizei, POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4049
glUniform4ivARB = _link_function('glUniform4ivARB', None, [GLint, GLsizei, POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4050
glUniformMatrix2fvARB = _link_function('glUniformMatrix2fvARB', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4051
glUniformMatrix3fvARB = _link_function('glUniformMatrix3fvARB', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4052
glUniformMatrix4fvARB = _link_function('glUniformMatrix4fvARB', None, [GLint, GLsizei, GLboolean, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4053
glGetObjectParameterfvARB = _link_function('glGetObjectParameterfvARB', None, [GLhandleARB, GLenum, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4054
glGetObjectParameterivARB = _link_function('glGetObjectParameterivARB', None, [GLhandleARB, GLenum, POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4055
glGetInfoLogARB = _link_function('glGetInfoLogARB', None, [GLhandleARB, GLsizei, POINTER(GLsizei), POINTER(GLcharARB)], 'ARB_shader_objects')
# GL/glext.h:4056
glGetAttachedObjectsARB = _link_function('glGetAttachedObjectsARB', None, [GLhandleARB, GLsizei, POINTER(GLsizei), POINTER(GLhandleARB)], 'ARB_shader_objects')
# GL/glext.h:4057
glGetUniformLocationARB = _link_function('glGetUniformLocationARB', GLint, [GLhandleARB, POINTER(GLcharARB)], 'ARB_shader_objects')
# GL/glext.h:4058
glGetActiveUniformARB = _link_function('glGetActiveUniformARB', None, [GLhandleARB, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLcharARB)], 'ARB_shader_objects')
# GL/glext.h:4059
glGetUniformfvARB = _link_function('glGetUniformfvARB', None, [GLhandleARB, GLint, POINTER(GLfloat)], 'ARB_shader_objects')
# GL/glext.h:4060
glGetUniformivARB = _link_function('glGetUniformivARB', None, [GLhandleARB, GLint, POINTER(GLint)], 'ARB_shader_objects')
# GL/glext.h:4061
glGetShaderSourceARB = _link_function('glGetShaderSourceARB', None, [GLhandleARB, GLsizei, POINTER(GLsizei), POINTER(GLcharARB)], 'ARB_shader_objects')
PFNGLDELETEOBJECTARBPROC = CFUNCTYPE(None, GLhandleARB) # GL/glext.h:4063
PFNGLGETHANDLEARBPROC = CFUNCTYPE(GLhandleARB, GLenum) # GL/glext.h:4064
PFNGLDETACHOBJECTARBPROC = CFUNCTYPE(None, GLhandleARB, GLhandleARB) # GL/glext.h:4065
PFNGLCREATESHADEROBJECTARBPROC = CFUNCTYPE(GLhandleARB, GLenum) # GL/glext.h:4066
PFNGLSHADERSOURCEARBPROC = CFUNCTYPE(None, GLhandleARB, GLsizei, POINTER(POINTER(GLcharARB)), POINTER(GLint)) # GL/glext.h:4067
PFNGLCOMPILESHADERARBPROC = CFUNCTYPE(None, GLhandleARB) # GL/glext.h:4068
PFNGLCREATEPROGRAMOBJECTARBPROC = CFUNCTYPE(GLhandleARB) # GL/glext.h:4069
PFNGLATTACHOBJECTARBPROC = CFUNCTYPE(None, GLhandleARB, GLhandleARB) # GL/glext.h:4070
PFNGLLINKPROGRAMARBPROC = CFUNCTYPE(None, GLhandleARB) # GL/glext.h:4071
PFNGLUSEPROGRAMOBJECTARBPROC = CFUNCTYPE(None, GLhandleARB) # GL/glext.h:4072
PFNGLVALIDATEPROGRAMARBPROC = CFUNCTYPE(None, GLhandleARB) # GL/glext.h:4073
PFNGLUNIFORM1FARBPROC = CFUNCTYPE(None, GLint, GLfloat) # GL/glext.h:4074
PFNGLUNIFORM2FARBPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat) # GL/glext.h:4075
PFNGLUNIFORM3FARBPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat, GLfloat) # GL/glext.h:4076
PFNGLUNIFORM4FARBPROC = CFUNCTYPE(None, GLint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:4077
PFNGLUNIFORM1IARBPROC = CFUNCTYPE(None, GLint, GLint) # GL/glext.h:4078
PFNGLUNIFORM2IARBPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:4079
PFNGLUNIFORM3IARBPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint) # GL/glext.h:4080
PFNGLUNIFORM4IARBPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint, GLint) # GL/glext.h:4081
PFNGLUNIFORM1FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:4082
PFNGLUNIFORM2FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:4083
PFNGLUNIFORM3FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:4084
PFNGLUNIFORM4FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLfloat)) # GL/glext.h:4085
PFNGLUNIFORM1IVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:4086
PFNGLUNIFORM2IVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:4087
PFNGLUNIFORM3IVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:4088
PFNGLUNIFORM4IVARBPROC = CFUNCTYPE(None, GLint, GLsizei, POINTER(GLint)) # GL/glext.h:4089
PFNGLUNIFORMMATRIX2FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:4090
PFNGLUNIFORMMATRIX3FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:4091
PFNGLUNIFORMMATRIX4FVARBPROC = CFUNCTYPE(None, GLint, GLsizei, GLboolean, POINTER(GLfloat)) # GL/glext.h:4092
PFNGLGETOBJECTPARAMETERFVARBPROC = CFUNCTYPE(None, GLhandleARB, GLenum, POINTER(GLfloat)) # GL/glext.h:4093
PFNGLGETOBJECTPARAMETERIVARBPROC = CFUNCTYPE(None, GLhandleARB, GLenum, POINTER(GLint)) # GL/glext.h:4094
PFNGLGETINFOLOGARBPROC = CFUNCTYPE(None, GLhandleARB, GLsizei, POINTER(GLsizei), POINTER(GLcharARB)) # GL/glext.h:4095
PFNGLGETATTACHEDOBJECTSARBPROC = CFUNCTYPE(None, GLhandleARB, GLsizei, POINTER(GLsizei), POINTER(GLhandleARB)) # GL/glext.h:4096
PFNGLGETUNIFORMLOCATIONARBPROC = CFUNCTYPE(GLint, GLhandleARB, POINTER(GLcharARB)) # GL/glext.h:4097
PFNGLGETACTIVEUNIFORMARBPROC = CFUNCTYPE(None, GLhandleARB, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLcharARB)) # GL/glext.h:4098
PFNGLGETUNIFORMFVARBPROC = CFUNCTYPE(None, GLhandleARB, GLint, POINTER(GLfloat)) # GL/glext.h:4099
PFNGLGETUNIFORMIVARBPROC = CFUNCTYPE(None, GLhandleARB, GLint, POINTER(GLint)) # GL/glext.h:4100
PFNGLGETSHADERSOURCEARBPROC = CFUNCTYPE(None, GLhandleARB, GLsizei, POINTER(GLsizei), POINTER(GLcharARB)) # GL/glext.h:4101
# ARB_vertex_shader (GL/glext.h:4104)
GL_ARB_vertex_shader = 1 # GL/glext.h:4105
# GL/glext.h:4107
glBindAttribLocationARB = _link_function('glBindAttribLocationARB', None, [GLhandleARB, GLuint, POINTER(GLcharARB)], 'ARB_vertex_shader')
# GL/glext.h:4108
glGetActiveAttribARB = _link_function('glGetActiveAttribARB', None, [GLhandleARB, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLcharARB)], 'ARB_vertex_shader')
# GL/glext.h:4109
glGetAttribLocationARB = _link_function('glGetAttribLocationARB', GLint, [GLhandleARB, POINTER(GLcharARB)], 'ARB_vertex_shader')
PFNGLBINDATTRIBLOCATIONARBPROC = CFUNCTYPE(None, GLhandleARB, GLuint, POINTER(GLcharARB)) # GL/glext.h:4111
PFNGLGETACTIVEATTRIBARBPROC = CFUNCTYPE(None, GLhandleARB, GLuint, GLsizei, POINTER(GLsizei), POINTER(GLint), POINTER(GLenum), POINTER(GLcharARB)) # GL/glext.h:4112
PFNGLGETATTRIBLOCATIONARBPROC = CFUNCTYPE(GLint, GLhandleARB, POINTER(GLcharARB)) # GL/glext.h:4113
# ARB_fragment_shader (GL/glext.h:4116)
GL_ARB_fragment_shader = 1 # GL/glext.h:4117
# ARB_shading_language_100 (GL/glext.h:4120)
GL_ARB_shading_language_100 = 1 # GL/glext.h:4121
# ARB_texture_non_power_of_two (GL/glext.h:4124)
GL_ARB_texture_non_power_of_two = 1 # GL/glext.h:4125
# ARB_point_sprite (GL/glext.h:4128)
GL_ARB_point_sprite = 1 # GL/glext.h:4129
# ARB_fragment_program_shadow (GL/glext.h:4132)
GL_ARB_fragment_program_shadow = 1 # GL/glext.h:4133
# ARB_draw_buffers (GL/glext.h:4136)
GL_ARB_draw_buffers = 1 # GL/glext.h:4137
# GL/glext.h:4139
glDrawBuffersARB = _link_function('glDrawBuffersARB', None, [GLsizei, POINTER(GLenum)], 'ARB_draw_buffers')
PFNGLDRAWBUFFERSARBPROC = CFUNCTYPE(None, GLsizei, POINTER(GLenum)) # GL/glext.h:4141
# ARB_texture_rectangle (GL/glext.h:4144)
GL_ARB_texture_rectangle = 1 # GL/glext.h:4145
# ARB_color_buffer_float (GL/glext.h:4148)
GL_ARB_color_buffer_float = 1 # GL/glext.h:4149
# GL/glext.h:4151
glClampColorARB = _link_function('glClampColorARB', None, [GLenum, GLenum], 'ARB_color_buffer_float')
PFNGLCLAMPCOLORARBPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:4153
# ARB_half_float_pixel (GL/glext.h:4156)
GL_ARB_half_float_pixel = 1 # GL/glext.h:4157
# ARB_texture_float (GL/glext.h:4160)
GL_ARB_texture_float = 1 # GL/glext.h:4161
# ARB_pixel_buffer_object (GL/glext.h:4164)
GL_ARB_pixel_buffer_object = 1 # GL/glext.h:4165
# EXT_abgr (GL/glext.h:4168)
GL_EXT_abgr = 1 # GL/glext.h:4169
# EXT_blend_color (GL/glext.h:4172)
GL_EXT_blend_color = 1 # GL/glext.h:4173
# GL/glext.h:4175
glBlendColorEXT = _link_function('glBlendColorEXT', None, [GLclampf, GLclampf, GLclampf, GLclampf], 'EXT_blend_color')
PFNGLBLENDCOLOREXTPROC = CFUNCTYPE(None, GLclampf, GLclampf, GLclampf, GLclampf) # GL/glext.h:4177
# EXT_polygon_offset (GL/glext.h:4180)
GL_EXT_polygon_offset = 1 # GL/glext.h:4181
# GL/glext.h:4183
glPolygonOffsetEXT = _link_function('glPolygonOffsetEXT', None, [GLfloat, GLfloat], 'EXT_polygon_offset')
PFNGLPOLYGONOFFSETEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat) # GL/glext.h:4185
# EXT_texture (GL/glext.h:4188)
GL_EXT_texture = 1 # GL/glext.h:4189
# EXT_texture3D (GL/glext.h:4192)
GL_EXT_texture3D = 1 # GL/glext.h:4193
# GL/glext.h:4195
glTexImage3DEXT = _link_function('glTexImage3DEXT', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], 'EXT_texture3D')
# GL/glext.h:4196
glTexSubImage3DEXT = _link_function('glTexSubImage3DEXT', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_texture3D')
PFNGLTEXIMAGE3DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4198
PFNGLTEXSUBIMAGE3DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4199
# SGIS_texture_filter4 (GL/glext.h:4202)
GL_SGIS_texture_filter4 = 1 # GL/glext.h:4203
# GL/glext.h:4205
glGetTexFilterFuncSGIS = _link_function('glGetTexFilterFuncSGIS', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGIS_texture_filter4')
# GL/glext.h:4206
glTexFilterFuncSGIS = _link_function('glTexFilterFuncSGIS', None, [GLenum, GLenum, GLsizei, POINTER(GLfloat)], 'SGIS_texture_filter4')
PFNGLGETTEXFILTERFUNCSGISPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4208
PFNGLTEXFILTERFUNCSGISPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, POINTER(GLfloat)) # GL/glext.h:4209
# EXT_subtexture (GL/glext.h:4212)
GL_EXT_subtexture = 1 # GL/glext.h:4213
# GL/glext.h:4215
glTexSubImage1DEXT = _link_function('glTexSubImage1DEXT', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_subtexture')
# GL/glext.h:4216
glTexSubImage2DEXT = _link_function('glTexSubImage2DEXT', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_subtexture')
PFNGLTEXSUBIMAGE1DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4218
PFNGLTEXSUBIMAGE2DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4219
# EXT_copy_texture (GL/glext.h:4222)
GL_EXT_copy_texture = 1 # GL/glext.h:4223
# GL/glext.h:4225
glCopyTexImage1DEXT = _link_function('glCopyTexImage1DEXT', None, [GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLint], 'EXT_copy_texture')
# GL/glext.h:4226
glCopyTexImage2DEXT = _link_function('glCopyTexImage2DEXT', None, [GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLsizei, GLint], 'EXT_copy_texture')
# GL/glext.h:4227
glCopyTexSubImage1DEXT = _link_function('glCopyTexSubImage1DEXT', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei], 'EXT_copy_texture')
# GL/glext.h:4228
glCopyTexSubImage2DEXT = _link_function('glCopyTexSubImage2DEXT', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], 'EXT_copy_texture')
# GL/glext.h:4229
glCopyTexSubImage3DEXT = _link_function('glCopyTexSubImage3DEXT', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], 'EXT_copy_texture')
PFNGLCOPYTEXIMAGE1DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLint) # GL/glext.h:4231
PFNGLCOPYTEXIMAGE2DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLsizei, GLint) # GL/glext.h:4232
PFNGLCOPYTEXSUBIMAGE1DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei) # GL/glext.h:4233
PFNGLCOPYTEXSUBIMAGE2DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:4234
PFNGLCOPYTEXSUBIMAGE3DEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:4235
# EXT_histogram (GL/glext.h:4238)
GL_EXT_histogram = 1 # GL/glext.h:4239
# GL/glext.h:4241
glGetHistogramEXT = _link_function('glGetHistogramEXT', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], 'EXT_histogram')
# GL/glext.h:4242
glGetHistogramParameterfvEXT = _link_function('glGetHistogramParameterfvEXT', None, [GLenum, GLenum, POINTER(GLfloat)], 'EXT_histogram')
# GL/glext.h:4243
glGetHistogramParameterivEXT = _link_function('glGetHistogramParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_histogram')
# GL/glext.h:4244
glGetMinmaxEXT = _link_function('glGetMinmaxEXT', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], 'EXT_histogram')
# GL/glext.h:4245
glGetMinmaxParameterfvEXT = _link_function('glGetMinmaxParameterfvEXT', None, [GLenum, GLenum, POINTER(GLfloat)], 'EXT_histogram')
# GL/glext.h:4246
glGetMinmaxParameterivEXT = _link_function('glGetMinmaxParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_histogram')
# GL/glext.h:4247
glHistogramEXT = _link_function('glHistogramEXT', None, [GLenum, GLsizei, GLenum, GLboolean], 'EXT_histogram')
# GL/glext.h:4248
glMinmaxEXT = _link_function('glMinmaxEXT', None, [GLenum, GLenum, GLboolean], 'EXT_histogram')
# GL/glext.h:4249
glResetHistogramEXT = _link_function('glResetHistogramEXT', None, [GLenum], 'EXT_histogram')
# GL/glext.h:4250
glResetMinmaxEXT = _link_function('glResetMinmaxEXT', None, [GLenum], 'EXT_histogram')
PFNGLGETHISTOGRAMEXTPROC = CFUNCTYPE(None, GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4252
PFNGLGETHISTOGRAMPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4253
PFNGLGETHISTOGRAMPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4254
PFNGLGETMINMAXEXTPROC = CFUNCTYPE(None, GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4255
PFNGLGETMINMAXPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4256
PFNGLGETMINMAXPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4257
PFNGLHISTOGRAMEXTPROC = CFUNCTYPE(None, GLenum, GLsizei, GLenum, GLboolean) # GL/glext.h:4258
PFNGLMINMAXEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLboolean) # GL/glext.h:4259
PFNGLRESETHISTOGRAMEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4260
PFNGLRESETMINMAXEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4261
# EXT_convolution (GL/glext.h:4264)
GL_EXT_convolution = 1 # GL/glext.h:4265
# GL/glext.h:4267
glConvolutionFilter1DEXT = _link_function('glConvolutionFilter1DEXT', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_convolution')
# GL/glext.h:4268
glConvolutionFilter2DEXT = _link_function('glConvolutionFilter2DEXT', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_convolution')
# GL/glext.h:4269
glConvolutionParameterfEXT = _link_function('glConvolutionParameterfEXT', None, [GLenum, GLenum, GLfloat], 'EXT_convolution')
# GL/glext.h:4270
glConvolutionParameterfvEXT = _link_function('glConvolutionParameterfvEXT', None, [GLenum, GLenum, POINTER(GLfloat)], 'EXT_convolution')
# GL/glext.h:4271
glConvolutionParameteriEXT = _link_function('glConvolutionParameteriEXT', None, [GLenum, GLenum, GLint], 'EXT_convolution')
# GL/glext.h:4272
glConvolutionParameterivEXT = _link_function('glConvolutionParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_convolution')
# GL/glext.h:4273
glCopyConvolutionFilter1DEXT = _link_function('glCopyConvolutionFilter1DEXT', None, [GLenum, GLenum, GLint, GLint, GLsizei], 'EXT_convolution')
# GL/glext.h:4274
glCopyConvolutionFilter2DEXT = _link_function('glCopyConvolutionFilter2DEXT', None, [GLenum, GLenum, GLint, GLint, GLsizei, GLsizei], 'EXT_convolution')
# GL/glext.h:4275
glGetConvolutionFilterEXT = _link_function('glGetConvolutionFilterEXT', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'EXT_convolution')
# GL/glext.h:4276
glGetConvolutionParameterfvEXT = _link_function('glGetConvolutionParameterfvEXT', None, [GLenum, GLenum, POINTER(GLfloat)], 'EXT_convolution')
# GL/glext.h:4277
glGetConvolutionParameterivEXT = _link_function('glGetConvolutionParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_convolution')
# GL/glext.h:4278
glGetSeparableFilterEXT = _link_function('glGetSeparableFilterEXT', None, [GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)], 'EXT_convolution')
# GL/glext.h:4279
glSeparableFilter2DEXT = _link_function('glSeparableFilter2DEXT', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)], 'EXT_convolution')
PFNGLCONVOLUTIONFILTER1DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4281
PFNGLCONVOLUTIONFILTER2DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4282
PFNGLCONVOLUTIONPARAMETERFEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:4283
PFNGLCONVOLUTIONPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4284
PFNGLCONVOLUTIONPARAMETERIEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:4285
PFNGLCONVOLUTIONPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4286
PFNGLCOPYCONVOLUTIONFILTER1DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei) # GL/glext.h:4287
PFNGLCOPYCONVOLUTIONFILTER2DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei, GLsizei) # GL/glext.h:4288
PFNGLGETCONVOLUTIONFILTEREXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4289
PFNGLGETCONVOLUTIONPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4290
PFNGLGETCONVOLUTIONPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4291
PFNGLGETSEPARABLEFILTEREXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)) # GL/glext.h:4292
PFNGLSEPARABLEFILTER2DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)) # GL/glext.h:4293
# EXT_color_matrix (GL/glext.h:4296)
GL_EXT_color_matrix = 1 # GL/glext.h:4297
# SGI_color_table (GL/glext.h:4300)
GL_SGI_color_table = 1 # GL/glext.h:4301
# GL/glext.h:4303
glColorTableSGI = _link_function('glColorTableSGI', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'SGI_color_table')
# GL/glext.h:4304
glColorTableParameterfvSGI = _link_function('glColorTableParameterfvSGI', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGI_color_table')
# GL/glext.h:4305
glColorTableParameterivSGI = _link_function('glColorTableParameterivSGI', None, [GLenum, GLenum, POINTER(GLint)], 'SGI_color_table')
# GL/glext.h:4306
glCopyColorTableSGI = _link_function('glCopyColorTableSGI', None, [GLenum, GLenum, GLint, GLint, GLsizei], 'SGI_color_table')
# GL/glext.h:4307
glGetColorTableSGI = _link_function('glGetColorTableSGI', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'SGI_color_table')
# GL/glext.h:4308
glGetColorTableParameterfvSGI = _link_function('glGetColorTableParameterfvSGI', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGI_color_table')
# GL/glext.h:4309
glGetColorTableParameterivSGI = _link_function('glGetColorTableParameterivSGI', None, [GLenum, GLenum, POINTER(GLint)], 'SGI_color_table')
PFNGLCOLORTABLESGIPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4311
PFNGLCOLORTABLEPARAMETERFVSGIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4312
PFNGLCOLORTABLEPARAMETERIVSGIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4313
PFNGLCOPYCOLORTABLESGIPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLint, GLsizei) # GL/glext.h:4314
PFNGLGETCOLORTABLESGIPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4315
PFNGLGETCOLORTABLEPARAMETERFVSGIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4316
PFNGLGETCOLORTABLEPARAMETERIVSGIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4317
# SGIX_pixel_texture (GL/glext.h:4320)
GL_SGIX_pixel_texture = 1 # GL/glext.h:4321
# GL/glext.h:4323
glPixelTexGenSGIX = _link_function('glPixelTexGenSGIX', None, [GLenum], 'SGIX_pixel_texture')
PFNGLPIXELTEXGENSGIXPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4325
# SGIS_pixel_texture (GL/glext.h:4328)
GL_SGIS_pixel_texture = 1 # GL/glext.h:4329
# GL/glext.h:4331
glPixelTexGenParameteriSGIS = _link_function('glPixelTexGenParameteriSGIS', None, [GLenum, GLint], 'SGIS_pixel_texture')
# GL/glext.h:4332
glPixelTexGenParameterivSGIS = _link_function('glPixelTexGenParameterivSGIS', None, [GLenum, POINTER(GLint)], 'SGIS_pixel_texture')
# GL/glext.h:4333
glPixelTexGenParameterfSGIS = _link_function('glPixelTexGenParameterfSGIS', None, [GLenum, GLfloat], 'SGIS_pixel_texture')
# GL/glext.h:4334
glPixelTexGenParameterfvSGIS = _link_function('glPixelTexGenParameterfvSGIS', None, [GLenum, POINTER(GLfloat)], 'SGIS_pixel_texture')
# GL/glext.h:4335
glGetPixelTexGenParameterivSGIS = _link_function('glGetPixelTexGenParameterivSGIS', None, [GLenum, POINTER(GLint)], 'SGIS_pixel_texture')
# GL/glext.h:4336
glGetPixelTexGenParameterfvSGIS = _link_function('glGetPixelTexGenParameterfvSGIS', None, [GLenum, POINTER(GLfloat)], 'SGIS_pixel_texture')
PFNGLPIXELTEXGENPARAMETERISGISPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:4338
PFNGLPIXELTEXGENPARAMETERIVSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:4339
PFNGLPIXELTEXGENPARAMETERFSGISPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:4340
PFNGLPIXELTEXGENPARAMETERFVSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4341
PFNGLGETPIXELTEXGENPARAMETERIVSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:4342
PFNGLGETPIXELTEXGENPARAMETERFVSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4343
# SGIS_texture4D (GL/glext.h:4346)
GL_SGIS_texture4D = 1 # GL/glext.h:4347
# GL/glext.h:4349
glTexImage4DSGIS = _link_function('glTexImage4DSGIS', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], 'SGIS_texture4D')
# GL/glext.h:4350
glTexSubImage4DSGIS = _link_function('glTexSubImage4DSGIS', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'SGIS_texture4D')
PFNGLTEXIMAGE4DSGISPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4352
PFNGLTEXSUBIMAGE4DSGISPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4353
# SGI_texture_color_table (GL/glext.h:4356)
GL_SGI_texture_color_table = 1 # GL/glext.h:4357
# EXT_cmyka (GL/glext.h:4360)
GL_EXT_cmyka = 1 # GL/glext.h:4361
# EXT_texture_object (GL/glext.h:4364)
GL_EXT_texture_object = 1 # GL/glext.h:4365
# GL/glext.h:4367
glAreTexturesResidentEXT = _link_function('glAreTexturesResidentEXT', GLboolean, [GLsizei, POINTER(GLuint), POINTER(GLboolean)], 'EXT_texture_object')
# GL/glext.h:4368
glBindTextureEXT = _link_function('glBindTextureEXT', None, [GLenum, GLuint], 'EXT_texture_object')
# GL/glext.h:4369
glDeleteTexturesEXT = _link_function('glDeleteTexturesEXT', None, [GLsizei, POINTER(GLuint)], 'EXT_texture_object')
# GL/glext.h:4370
glGenTexturesEXT = _link_function('glGenTexturesEXT', None, [GLsizei, POINTER(GLuint)], 'EXT_texture_object')
# GL/glext.h:4371
glIsTextureEXT = _link_function('glIsTextureEXT', GLboolean, [GLuint], 'EXT_texture_object')
# GL/glext.h:4372
glPrioritizeTexturesEXT = _link_function('glPrioritizeTexturesEXT', None, [GLsizei, POINTER(GLuint), POINTER(GLclampf)], 'EXT_texture_object')
PFNGLARETEXTURESRESIDENTEXTPROC = CFUNCTYPE(GLboolean, GLsizei, POINTER(GLuint), POINTER(GLboolean)) # GL/glext.h:4374
PFNGLBINDTEXTUREEXTPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:4375
PFNGLDELETETEXTURESEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:4376
PFNGLGENTEXTURESEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:4377
PFNGLISTEXTUREEXTPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:4378
PFNGLPRIORITIZETEXTURESEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint), POINTER(GLclampf)) # GL/glext.h:4379
# SGIS_detail_texture (GL/glext.h:4382)
GL_SGIS_detail_texture = 1 # GL/glext.h:4383
# GL/glext.h:4385
glDetailTexFuncSGIS = _link_function('glDetailTexFuncSGIS', None, [GLenum, GLsizei, POINTER(GLfloat)], 'SGIS_detail_texture')
# GL/glext.h:4386
glGetDetailTexFuncSGIS = _link_function('glGetDetailTexFuncSGIS', None, [GLenum, POINTER(GLfloat)], 'SGIS_detail_texture')
PFNGLDETAILTEXFUNCSGISPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLfloat)) # GL/glext.h:4388
PFNGLGETDETAILTEXFUNCSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4389
# SGIS_sharpen_texture (GL/glext.h:4392)
GL_SGIS_sharpen_texture = 1 # GL/glext.h:4393
# GL/glext.h:4395
glSharpenTexFuncSGIS = _link_function('glSharpenTexFuncSGIS', None, [GLenum, GLsizei, POINTER(GLfloat)], 'SGIS_sharpen_texture')
# GL/glext.h:4396
glGetSharpenTexFuncSGIS = _link_function('glGetSharpenTexFuncSGIS', None, [GLenum, POINTER(GLfloat)], 'SGIS_sharpen_texture')
PFNGLSHARPENTEXFUNCSGISPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLfloat)) # GL/glext.h:4398
PFNGLGETSHARPENTEXFUNCSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4399
# EXT_packed_pixels (GL/glext.h:4402)
GL_EXT_packed_pixels = 1 # GL/glext.h:4403
# SGIS_texture_lod (GL/glext.h:4406)
GL_SGIS_texture_lod = 1 # GL/glext.h:4407
# SGIS_multisample (GL/glext.h:4410)
GL_SGIS_multisample = 1 # GL/glext.h:4411
# GL/glext.h:4413
glSampleMaskSGIS = _link_function('glSampleMaskSGIS', None, [GLclampf, GLboolean], 'SGIS_multisample')
# GL/glext.h:4414
glSamplePatternSGIS = _link_function('glSamplePatternSGIS', None, [GLenum], 'SGIS_multisample')
PFNGLSAMPLEMASKSGISPROC = CFUNCTYPE(None, GLclampf, GLboolean) # GL/glext.h:4416
PFNGLSAMPLEPATTERNSGISPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4417
# EXT_rescale_normal (GL/glext.h:4420)
GL_EXT_rescale_normal = 1 # GL/glext.h:4421
# EXT_vertex_array (GL/glext.h:4424)
GL_EXT_vertex_array = 1 # GL/glext.h:4425
# GL/glext.h:4427
glArrayElementEXT = _link_function('glArrayElementEXT', None, [GLint], 'EXT_vertex_array')
# GL/glext.h:4428
glColorPointerEXT = _link_function('glColorPointerEXT', None, [GLint, GLenum, GLsizei, GLsizei, POINTER(GLvoid)], 'EXT_vertex_array')
# GL/glext.h:4429
glDrawArraysEXT = _link_function('glDrawArraysEXT', None, [GLenum, GLint, GLsizei], 'EXT_vertex_array')
# GL/glext.h:4430
glEdgeFlagPointerEXT = _link_function('glEdgeFlagPointerEXT', None, [GLsizei, GLsizei, POINTER(GLboolean)], 'EXT_vertex_array')
# GL/glext.h:4431
glGetPointervEXT = _link_function('glGetPointervEXT', None, [GLenum, POINTER(POINTER(GLvoid))], 'EXT_vertex_array')
# GL/glext.h:4432
glIndexPointerEXT = _link_function('glIndexPointerEXT', None, [GLenum, GLsizei, GLsizei, POINTER(GLvoid)], 'EXT_vertex_array')
# GL/glext.h:4433
glNormalPointerEXT = _link_function('glNormalPointerEXT', None, [GLenum, GLsizei, GLsizei, POINTER(GLvoid)], 'EXT_vertex_array')
# GL/glext.h:4434
glTexCoordPointerEXT = _link_function('glTexCoordPointerEXT', None, [GLint, GLenum, GLsizei, GLsizei, POINTER(GLvoid)], 'EXT_vertex_array')
# GL/glext.h:4435
glVertexPointerEXT = _link_function('glVertexPointerEXT', None, [GLint, GLenum, GLsizei, GLsizei, POINTER(GLvoid)], 'EXT_vertex_array')
PFNGLARRAYELEMENTEXTPROC = CFUNCTYPE(None, GLint) # GL/glext.h:4437
PFNGLCOLORPOINTEREXTPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, GLsizei, POINTER(GLvoid)) # GL/glext.h:4438
PFNGLDRAWARRAYSEXTPROC = CFUNCTYPE(None, GLenum, GLint, GLsizei) # GL/glext.h:4439
PFNGLEDGEFLAGPOINTEREXTPROC = CFUNCTYPE(None, GLsizei, GLsizei, POINTER(GLboolean)) # GL/glext.h:4440
PFNGLGETPOINTERVEXTPROC = CFUNCTYPE(None, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:4441
PFNGLINDEXPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, GLsizei, POINTER(GLvoid)) # GL/glext.h:4442
PFNGLNORMALPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, GLsizei, POINTER(GLvoid)) # GL/glext.h:4443
PFNGLTEXCOORDPOINTEREXTPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, GLsizei, POINTER(GLvoid)) # GL/glext.h:4444
PFNGLVERTEXPOINTEREXTPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, GLsizei, POINTER(GLvoid)) # GL/glext.h:4445
# EXT_misc_attribute (GL/glext.h:4448)
GL_EXT_misc_attribute = 1 # GL/glext.h:4449
# SGIS_generate_mipmap (GL/glext.h:4452)
GL_SGIS_generate_mipmap = 1 # GL/glext.h:4453
# SGIX_clipmap (GL/glext.h:4456)
GL_SGIX_clipmap = 1 # GL/glext.h:4457
# SGIX_shadow (GL/glext.h:4460)
GL_SGIX_shadow = 1 # GL/glext.h:4461
# SGIS_texture_edge_clamp (GL/glext.h:4464)
GL_SGIS_texture_edge_clamp = 1 # GL/glext.h:4465
# SGIS_texture_border_clamp (GL/glext.h:4468)
GL_SGIS_texture_border_clamp = 1 # GL/glext.h:4469
# EXT_blend_minmax (GL/glext.h:4472)
GL_EXT_blend_minmax = 1 # GL/glext.h:4473
# GL/glext.h:4475
glBlendEquationEXT = _link_function('glBlendEquationEXT', None, [GLenum], 'EXT_blend_minmax')
PFNGLBLENDEQUATIONEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4477
# EXT_blend_subtract (GL/glext.h:4480)
GL_EXT_blend_subtract = 1 # GL/glext.h:4481
# EXT_blend_logic_op (GL/glext.h:4484)
GL_EXT_blend_logic_op = 1 # GL/glext.h:4485
# SGIX_interlace (GL/glext.h:4488)
GL_SGIX_interlace = 1 # GL/glext.h:4489
# SGIX_pixel_tiles (GL/glext.h:4492)
GL_SGIX_pixel_tiles = 1 # GL/glext.h:4493
# SGIX_texture_select (GL/glext.h:4496)
GL_SGIX_texture_select = 1 # GL/glext.h:4497
# SGIX_sprite (GL/glext.h:4500)
GL_SGIX_sprite = 1 # GL/glext.h:4501
# GL/glext.h:4503
glSpriteParameterfSGIX = _link_function('glSpriteParameterfSGIX', None, [GLenum, GLfloat], 'SGIX_sprite')
# GL/glext.h:4504
glSpriteParameterfvSGIX = _link_function('glSpriteParameterfvSGIX', None, [GLenum, POINTER(GLfloat)], 'SGIX_sprite')
# GL/glext.h:4505
glSpriteParameteriSGIX = _link_function('glSpriteParameteriSGIX', None, [GLenum, GLint], 'SGIX_sprite')
# GL/glext.h:4506
glSpriteParameterivSGIX = _link_function('glSpriteParameterivSGIX', None, [GLenum, POINTER(GLint)], 'SGIX_sprite')
PFNGLSPRITEPARAMETERFSGIXPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:4508
PFNGLSPRITEPARAMETERFVSGIXPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4509
PFNGLSPRITEPARAMETERISGIXPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:4510
PFNGLSPRITEPARAMETERIVSGIXPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:4511
# SGIX_texture_multi_buffer (GL/glext.h:4514)
GL_SGIX_texture_multi_buffer = 1 # GL/glext.h:4515
# EXT_point_parameters (GL/glext.h:4518)
GL_EXT_point_parameters = 1 # GL/glext.h:4519
# GL/glext.h:4521
glPointParameterfEXT = _link_function('glPointParameterfEXT', None, [GLenum, GLfloat], 'EXT_point_parameters')
# GL/glext.h:4522
glPointParameterfvEXT = _link_function('glPointParameterfvEXT', None, [GLenum, POINTER(GLfloat)], 'EXT_point_parameters')
PFNGLPOINTPARAMETERFEXTPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:4524
PFNGLPOINTPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4525
# SGIS_point_parameters (GL/glext.h:4528)
GL_SGIS_point_parameters = 1 # GL/glext.h:4529
# GL/glext.h:4531
glPointParameterfSGIS = _link_function('glPointParameterfSGIS', None, [GLenum, GLfloat], 'SGIS_point_parameters')
# GL/glext.h:4532
glPointParameterfvSGIS = _link_function('glPointParameterfvSGIS', None, [GLenum, POINTER(GLfloat)], 'SGIS_point_parameters')
PFNGLPOINTPARAMETERFSGISPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:4534
PFNGLPOINTPARAMETERFVSGISPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4535
# SGIX_instruments (GL/glext.h:4538)
GL_SGIX_instruments = 1 # GL/glext.h:4539
# GL/glext.h:4541
glGetInstrumentsSGIX = _link_function('glGetInstrumentsSGIX', GLint, [], 'SGIX_instruments')
# GL/glext.h:4542
glInstrumentsBufferSGIX = _link_function('glInstrumentsBufferSGIX', None, [GLsizei, POINTER(GLint)], 'SGIX_instruments')
# GL/glext.h:4543
glPollInstrumentsSGIX = _link_function('glPollInstrumentsSGIX', GLint, [POINTER(GLint)], 'SGIX_instruments')
# GL/glext.h:4544
glReadInstrumentsSGIX = _link_function('glReadInstrumentsSGIX', None, [GLint], 'SGIX_instruments')
# GL/glext.h:4545
glStartInstrumentsSGIX = _link_function('glStartInstrumentsSGIX', None, [], 'SGIX_instruments')
# GL/glext.h:4546
glStopInstrumentsSGIX = _link_function('glStopInstrumentsSGIX', None, [GLint], 'SGIX_instruments')
PFNGLGETINSTRUMENTSSGIXPROC = CFUNCTYPE(GLint) # GL/glext.h:4548
PFNGLINSTRUMENTSBUFFERSGIXPROC = CFUNCTYPE(None, GLsizei, POINTER(GLint)) # GL/glext.h:4549
PFNGLPOLLINSTRUMENTSSGIXPROC = CFUNCTYPE(GLint, POINTER(GLint)) # GL/glext.h:4550
PFNGLREADINSTRUMENTSSGIXPROC = CFUNCTYPE(None, GLint) # GL/glext.h:4551
PFNGLSTARTINSTRUMENTSSGIXPROC = CFUNCTYPE(None) # GL/glext.h:4552
PFNGLSTOPINSTRUMENTSSGIXPROC = CFUNCTYPE(None, GLint) # GL/glext.h:4553
# SGIX_texture_scale_bias (GL/glext.h:4556)
GL_SGIX_texture_scale_bias = 1 # GL/glext.h:4557
# SGIX_framezoom (GL/glext.h:4560)
GL_SGIX_framezoom = 1 # GL/glext.h:4561
# GL/glext.h:4563
glFrameZoomSGIX = _link_function('glFrameZoomSGIX', None, [GLint], 'SGIX_framezoom')
PFNGLFRAMEZOOMSGIXPROC = CFUNCTYPE(None, GLint) # GL/glext.h:4565
# SGIX_tag_sample_buffer (GL/glext.h:4568)
GL_SGIX_tag_sample_buffer = 1 # GL/glext.h:4569
# GL/glext.h:4571
glTagSampleBufferSGIX = _link_function('glTagSampleBufferSGIX', None, [], 'SGIX_tag_sample_buffer')
PFNGLTAGSAMPLEBUFFERSGIXPROC = CFUNCTYPE(None) # GL/glext.h:4573
# SGIX_polynomial_ffd (GL/glext.h:4576)
GL_SGIX_polynomial_ffd = 1 # GL/glext.h:4577
# GL/glext.h:4579
glDeformationMap3dSGIX = _link_function('glDeformationMap3dSGIX', None, [GLenum, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, POINTER(GLdouble)], 'SGIX_polynomial_ffd')
# GL/glext.h:4580
glDeformationMap3fSGIX = _link_function('glDeformationMap3fSGIX', None, [GLenum, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, POINTER(GLfloat)], 'SGIX_polynomial_ffd')
GLbitfield = c_uint # /usr/include/GL/gl.h:55
# GL/glext.h:4581
glDeformSGIX = _link_function('glDeformSGIX', None, [GLbitfield], 'SGIX_polynomial_ffd')
# GL/glext.h:4582
glLoadIdentityDeformationMapSGIX = _link_function('glLoadIdentityDeformationMapSGIX', None, [GLbitfield], 'SGIX_polynomial_ffd')
PFNGLDEFORMATIONMAP3DSGIXPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, POINTER(GLdouble)) # GL/glext.h:4584
PFNGLDEFORMATIONMAP3FSGIXPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, POINTER(GLfloat)) # GL/glext.h:4585
PFNGLDEFORMSGIXPROC = CFUNCTYPE(None, GLbitfield) # GL/glext.h:4586
PFNGLLOADIDENTITYDEFORMATIONMAPSGIXPROC = CFUNCTYPE(None, GLbitfield) # GL/glext.h:4587
# SGIX_reference_plane (GL/glext.h:4590)
GL_SGIX_reference_plane = 1 # GL/glext.h:4591
# GL/glext.h:4593
glReferencePlaneSGIX = _link_function('glReferencePlaneSGIX', None, [POINTER(GLdouble)], 'SGIX_reference_plane')
PFNGLREFERENCEPLANESGIXPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:4595
# SGIX_flush_raster (GL/glext.h:4598)
GL_SGIX_flush_raster = 1 # GL/glext.h:4599
# GL/glext.h:4601
glFlushRasterSGIX = _link_function('glFlushRasterSGIX', None, [], 'SGIX_flush_raster')
PFNGLFLUSHRASTERSGIXPROC = CFUNCTYPE(None) # GL/glext.h:4603
# SGIX_depth_texture (GL/glext.h:4606)
GL_SGIX_depth_texture = 1 # GL/glext.h:4607
# SGIS_fog_function (GL/glext.h:4610)
GL_SGIS_fog_function = 1 # GL/glext.h:4611
# GL/glext.h:4613
glFogFuncSGIS = _link_function('glFogFuncSGIS', None, [GLsizei, POINTER(GLfloat)], 'SGIS_fog_function')
# GL/glext.h:4614
glGetFogFuncSGIS = _link_function('glGetFogFuncSGIS', None, [POINTER(GLfloat)], 'SGIS_fog_function')
PFNGLFOGFUNCSGISPROC = CFUNCTYPE(None, GLsizei, POINTER(GLfloat)) # GL/glext.h:4616
PFNGLGETFOGFUNCSGISPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:4617
# SGIX_fog_offset (GL/glext.h:4620)
GL_SGIX_fog_offset = 1 # GL/glext.h:4621
# HP_image_transform (GL/glext.h:4624)
GL_HP_image_transform = 1 # GL/glext.h:4625
# GL/glext.h:4627
glImageTransformParameteriHP = _link_function('glImageTransformParameteriHP', None, [GLenum, GLenum, GLint], 'HP_image_transform')
# GL/glext.h:4628
glImageTransformParameterfHP = _link_function('glImageTransformParameterfHP', None, [GLenum, GLenum, GLfloat], 'HP_image_transform')
# GL/glext.h:4629
glImageTransformParameterivHP = _link_function('glImageTransformParameterivHP', None, [GLenum, GLenum, POINTER(GLint)], 'HP_image_transform')
# GL/glext.h:4630
glImageTransformParameterfvHP = _link_function('glImageTransformParameterfvHP', None, [GLenum, GLenum, POINTER(GLfloat)], 'HP_image_transform')
# GL/glext.h:4631
glGetImageTransformParameterivHP = _link_function('glGetImageTransformParameterivHP', None, [GLenum, GLenum, POINTER(GLint)], 'HP_image_transform')
# GL/glext.h:4632
glGetImageTransformParameterfvHP = _link_function('glGetImageTransformParameterfvHP', None, [GLenum, GLenum, POINTER(GLfloat)], 'HP_image_transform')
PFNGLIMAGETRANSFORMPARAMETERIHPPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:4634
PFNGLIMAGETRANSFORMPARAMETERFHPPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:4635
PFNGLIMAGETRANSFORMPARAMETERIVHPPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4636
PFNGLIMAGETRANSFORMPARAMETERFVHPPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4637
PFNGLGETIMAGETRANSFORMPARAMETERIVHPPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4638
PFNGLGETIMAGETRANSFORMPARAMETERFVHPPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4639
# HP_convolution_border_modes (GL/glext.h:4642)
GL_HP_convolution_border_modes = 1 # GL/glext.h:4643
# SGIX_texture_add_env (GL/glext.h:4646)
GL_SGIX_texture_add_env = 1 # GL/glext.h:4647
# EXT_color_subtable (GL/glext.h:4650)
GL_EXT_color_subtable = 1 # GL/glext.h:4651
# GL/glext.h:4653
glColorSubTableEXT = _link_function('glColorSubTableEXT', None, [GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_color_subtable')
# GL/glext.h:4654
glCopyColorSubTableEXT = _link_function('glCopyColorSubTableEXT', None, [GLenum, GLsizei, GLint, GLint, GLsizei], 'EXT_color_subtable')
PFNGLCOLORSUBTABLEEXTPROC = CFUNCTYPE(None, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4656
PFNGLCOPYCOLORSUBTABLEEXTPROC = CFUNCTYPE(None, GLenum, GLsizei, GLint, GLint, GLsizei) # GL/glext.h:4657
# PGI_vertex_hints (GL/glext.h:4660)
GL_PGI_vertex_hints = 1 # GL/glext.h:4661
# PGI_misc_hints (GL/glext.h:4664)
GL_PGI_misc_hints = 1 # GL/glext.h:4665
# GL/glext.h:4667
glHintPGI = _link_function('glHintPGI', None, [GLenum, GLint], 'PGI_misc_hints')
PFNGLHINTPGIPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:4669
# EXT_paletted_texture (GL/glext.h:4672)
GL_EXT_paletted_texture = 1 # GL/glext.h:4673
# GL/glext.h:4675
glColorTableEXT = _link_function('glColorTableEXT', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], 'EXT_paletted_texture')
# GL/glext.h:4676
glGetColorTableEXT = _link_function('glGetColorTableEXT', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], 'EXT_paletted_texture')
# GL/glext.h:4677
glGetColorTableParameterivEXT = _link_function('glGetColorTableParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_paletted_texture')
# GL/glext.h:4678
glGetColorTableParameterfvEXT = _link_function('glGetColorTableParameterfvEXT', None, [GLenum, GLenum, POINTER(GLfloat)], 'EXT_paletted_texture')
PFNGLCOLORTABLEEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4680
PFNGLGETCOLORTABLEEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLvoid)) # GL/glext.h:4681
PFNGLGETCOLORTABLEPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4682
PFNGLGETCOLORTABLEPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4683
# EXT_clip_volume_hint (GL/glext.h:4686)
GL_EXT_clip_volume_hint = 1 # GL/glext.h:4687
# SGIX_list_priority (GL/glext.h:4690)
GL_SGIX_list_priority = 1 # GL/glext.h:4691
# GL/glext.h:4693
glGetListParameterfvSGIX = _link_function('glGetListParameterfvSGIX', None, [GLuint, GLenum, POINTER(GLfloat)], 'SGIX_list_priority')
# GL/glext.h:4694
glGetListParameterivSGIX = _link_function('glGetListParameterivSGIX', None, [GLuint, GLenum, POINTER(GLint)], 'SGIX_list_priority')
# GL/glext.h:4695
glListParameterfSGIX = _link_function('glListParameterfSGIX', None, [GLuint, GLenum, GLfloat], 'SGIX_list_priority')
# GL/glext.h:4696
glListParameterfvSGIX = _link_function('glListParameterfvSGIX', None, [GLuint, GLenum, POINTER(GLfloat)], 'SGIX_list_priority')
# GL/glext.h:4697
glListParameteriSGIX = _link_function('glListParameteriSGIX', None, [GLuint, GLenum, GLint], 'SGIX_list_priority')
# GL/glext.h:4698
glListParameterivSGIX = _link_function('glListParameterivSGIX', None, [GLuint, GLenum, POINTER(GLint)], 'SGIX_list_priority')
PFNGLGETLISTPARAMETERFVSGIXPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:4700
PFNGLGETLISTPARAMETERIVSGIXPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:4701
PFNGLLISTPARAMETERFSGIXPROC = CFUNCTYPE(None, GLuint, GLenum, GLfloat) # GL/glext.h:4702
PFNGLLISTPARAMETERFVSGIXPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:4703
PFNGLLISTPARAMETERISGIXPROC = CFUNCTYPE(None, GLuint, GLenum, GLint) # GL/glext.h:4704
PFNGLLISTPARAMETERIVSGIXPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:4705
# SGIX_ir_instrument1 (GL/glext.h:4708)
GL_SGIX_ir_instrument1 = 1 # GL/glext.h:4709
# SGIX_calligraphic_fragment (GL/glext.h:4712)
GL_SGIX_calligraphic_fragment = 1 # GL/glext.h:4713
# SGIX_texture_lod_bias (GL/glext.h:4716)
GL_SGIX_texture_lod_bias = 1 # GL/glext.h:4717
# SGIX_shadow_ambient (GL/glext.h:4720)
GL_SGIX_shadow_ambient = 1 # GL/glext.h:4721
# EXT_index_texture (GL/glext.h:4724)
GL_EXT_index_texture = 1 # GL/glext.h:4725
# EXT_index_material (GL/glext.h:4728)
GL_EXT_index_material = 1 # GL/glext.h:4729
# GL/glext.h:4731
glIndexMaterialEXT = _link_function('glIndexMaterialEXT', None, [GLenum, GLenum], 'EXT_index_material')
PFNGLINDEXMATERIALEXTPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:4733
# EXT_index_func (GL/glext.h:4736)
GL_EXT_index_func = 1 # GL/glext.h:4737
# GL/glext.h:4739
glIndexFuncEXT = _link_function('glIndexFuncEXT', None, [GLenum, GLclampf], 'EXT_index_func')
PFNGLINDEXFUNCEXTPROC = CFUNCTYPE(None, GLenum, GLclampf) # GL/glext.h:4741
# EXT_index_array_formats (GL/glext.h:4744)
GL_EXT_index_array_formats = 1 # GL/glext.h:4745
# EXT_compiled_vertex_array (GL/glext.h:4748)
GL_EXT_compiled_vertex_array = 1 # GL/glext.h:4749
# GL/glext.h:4751
glLockArraysEXT = _link_function('glLockArraysEXT', None, [GLint, GLsizei], 'EXT_compiled_vertex_array')
# GL/glext.h:4752
glUnlockArraysEXT = _link_function('glUnlockArraysEXT', None, [], 'EXT_compiled_vertex_array')
PFNGLLOCKARRAYSEXTPROC = CFUNCTYPE(None, GLint, GLsizei) # GL/glext.h:4754
PFNGLUNLOCKARRAYSEXTPROC = CFUNCTYPE(None) # GL/glext.h:4755
# EXT_cull_vertex (GL/glext.h:4758)
GL_EXT_cull_vertex = 1 # GL/glext.h:4759
# GL/glext.h:4761
glCullParameterdvEXT = _link_function('glCullParameterdvEXT', None, [GLenum, POINTER(GLdouble)], 'EXT_cull_vertex')
# GL/glext.h:4762
glCullParameterfvEXT = _link_function('glCullParameterfvEXT', None, [GLenum, POINTER(GLfloat)], 'EXT_cull_vertex')
PFNGLCULLPARAMETERDVEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:4764
PFNGLCULLPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4765
# SGIX_ycrcb (GL/glext.h:4768)
GL_SGIX_ycrcb = 1 # GL/glext.h:4769
# SGIX_fragment_lighting (GL/glext.h:4772)
GL_SGIX_fragment_lighting = 1 # GL/glext.h:4773
# GL/glext.h:4775
glFragmentColorMaterialSGIX = _link_function('glFragmentColorMaterialSGIX', None, [GLenum, GLenum], 'SGIX_fragment_lighting')
# GL/glext.h:4776
glFragmentLightfSGIX = _link_function('glFragmentLightfSGIX', None, [GLenum, GLenum, GLfloat], 'SGIX_fragment_lighting')
# GL/glext.h:4777
glFragmentLightfvSGIX = _link_function('glFragmentLightfvSGIX', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGIX_fragment_lighting')
# GL/glext.h:4778
glFragmentLightiSGIX = _link_function('glFragmentLightiSGIX', None, [GLenum, GLenum, GLint], 'SGIX_fragment_lighting')
# GL/glext.h:4779
glFragmentLightivSGIX = _link_function('glFragmentLightivSGIX', None, [GLenum, GLenum, POINTER(GLint)], 'SGIX_fragment_lighting')
# GL/glext.h:4780
glFragmentLightModelfSGIX = _link_function('glFragmentLightModelfSGIX', None, [GLenum, GLfloat], 'SGIX_fragment_lighting')
# GL/glext.h:4781
glFragmentLightModelfvSGIX = _link_function('glFragmentLightModelfvSGIX', None, [GLenum, POINTER(GLfloat)], 'SGIX_fragment_lighting')
# GL/glext.h:4782
glFragmentLightModeliSGIX = _link_function('glFragmentLightModeliSGIX', None, [GLenum, GLint], 'SGIX_fragment_lighting')
# GL/glext.h:4783
glFragmentLightModelivSGIX = _link_function('glFragmentLightModelivSGIX', None, [GLenum, POINTER(GLint)], 'SGIX_fragment_lighting')
# GL/glext.h:4784
glFragmentMaterialfSGIX = _link_function('glFragmentMaterialfSGIX', None, [GLenum, GLenum, GLfloat], 'SGIX_fragment_lighting')
# GL/glext.h:4785
glFragmentMaterialfvSGIX = _link_function('glFragmentMaterialfvSGIX', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGIX_fragment_lighting')
# GL/glext.h:4786
glFragmentMaterialiSGIX = _link_function('glFragmentMaterialiSGIX', None, [GLenum, GLenum, GLint], 'SGIX_fragment_lighting')
# GL/glext.h:4787
glFragmentMaterialivSGIX = _link_function('glFragmentMaterialivSGIX', None, [GLenum, GLenum, POINTER(GLint)], 'SGIX_fragment_lighting')
# GL/glext.h:4788
glGetFragmentLightfvSGIX = _link_function('glGetFragmentLightfvSGIX', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGIX_fragment_lighting')
# GL/glext.h:4789
glGetFragmentLightivSGIX = _link_function('glGetFragmentLightivSGIX', None, [GLenum, GLenum, POINTER(GLint)], 'SGIX_fragment_lighting')
# GL/glext.h:4790
glGetFragmentMaterialfvSGIX = _link_function('glGetFragmentMaterialfvSGIX', None, [GLenum, GLenum, POINTER(GLfloat)], 'SGIX_fragment_lighting')
# GL/glext.h:4791
glGetFragmentMaterialivSGIX = _link_function('glGetFragmentMaterialivSGIX', None, [GLenum, GLenum, POINTER(GLint)], 'SGIX_fragment_lighting')
# GL/glext.h:4792
glLightEnviSGIX = _link_function('glLightEnviSGIX', None, [GLenum, GLint], 'SGIX_fragment_lighting')
PFNGLFRAGMENTCOLORMATERIALSGIXPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:4794
PFNGLFRAGMENTLIGHTFSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:4795
PFNGLFRAGMENTLIGHTFVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4796
PFNGLFRAGMENTLIGHTISGIXPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:4797
PFNGLFRAGMENTLIGHTIVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4798
PFNGLFRAGMENTLIGHTMODELFSGIXPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:4799
PFNGLFRAGMENTLIGHTMODELFVSGIXPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:4800
PFNGLFRAGMENTLIGHTMODELISGIXPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:4801
PFNGLFRAGMENTLIGHTMODELIVSGIXPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:4802
PFNGLFRAGMENTMATERIALFSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:4803
PFNGLFRAGMENTMATERIALFVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4804
PFNGLFRAGMENTMATERIALISGIXPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:4805
PFNGLFRAGMENTMATERIALIVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4806
PFNGLGETFRAGMENTLIGHTFVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4807
PFNGLGETFRAGMENTLIGHTIVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4808
PFNGLGETFRAGMENTMATERIALFVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4809
PFNGLGETFRAGMENTMATERIALIVSGIXPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4810
PFNGLLIGHTENVISGIXPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:4811
# IBM_rasterpos_clip (GL/glext.h:4814)
GL_IBM_rasterpos_clip = 1 # GL/glext.h:4815
# HP_texture_lighting (GL/glext.h:4818)
GL_HP_texture_lighting = 1 # GL/glext.h:4819
# EXT_draw_range_elements (GL/glext.h:4822)
GL_EXT_draw_range_elements = 1 # GL/glext.h:4823
# GL/glext.h:4825
glDrawRangeElementsEXT = _link_function('glDrawRangeElementsEXT', None, [GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)], 'EXT_draw_range_elements')
PFNGLDRAWRANGEELEMENTSEXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)) # GL/glext.h:4827
# WIN_phong_shading (GL/glext.h:4830)
GL_WIN_phong_shading = 1 # GL/glext.h:4831
# WIN_specular_fog (GL/glext.h:4834)
GL_WIN_specular_fog = 1 # GL/glext.h:4835
# EXT_light_texture (GL/glext.h:4838)
GL_EXT_light_texture = 1 # GL/glext.h:4839
# GL/glext.h:4841
glApplyTextureEXT = _link_function('glApplyTextureEXT', None, [GLenum], 'EXT_light_texture')
# GL/glext.h:4842
glTextureLightEXT = _link_function('glTextureLightEXT', None, [GLenum], 'EXT_light_texture')
# GL/glext.h:4843
glTextureMaterialEXT = _link_function('glTextureMaterialEXT', None, [GLenum, GLenum], 'EXT_light_texture')
PFNGLAPPLYTEXTUREEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4845
PFNGLTEXTURELIGHTEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4846
PFNGLTEXTUREMATERIALEXTPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:4847
# SGIX_blend_alpha_minmax (GL/glext.h:4850)
GL_SGIX_blend_alpha_minmax = 1 # GL/glext.h:4851
# EXT_bgra (GL/glext.h:4854)
GL_EXT_bgra = 1 # GL/glext.h:4855
# SGIX_async (GL/glext.h:4858)
GL_SGIX_async = 1 # GL/glext.h:4859
# GL/glext.h:4861
glAsyncMarkerSGIX = _link_function('glAsyncMarkerSGIX', None, [GLuint], 'SGIX_async')
# GL/glext.h:4862
glFinishAsyncSGIX = _link_function('glFinishAsyncSGIX', GLint, [POINTER(GLuint)], 'SGIX_async')
# GL/glext.h:4863
glPollAsyncSGIX = _link_function('glPollAsyncSGIX', GLint, [POINTER(GLuint)], 'SGIX_async')
# GL/glext.h:4864
glGenAsyncMarkersSGIX = _link_function('glGenAsyncMarkersSGIX', GLuint, [GLsizei], 'SGIX_async')
# GL/glext.h:4865
glDeleteAsyncMarkersSGIX = _link_function('glDeleteAsyncMarkersSGIX', None, [GLuint, GLsizei], 'SGIX_async')
# GL/glext.h:4866
glIsAsyncMarkerSGIX = _link_function('glIsAsyncMarkerSGIX', GLboolean, [GLuint], 'SGIX_async')
PFNGLASYNCMARKERSGIXPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:4868
PFNGLFINISHASYNCSGIXPROC = CFUNCTYPE(GLint, POINTER(GLuint)) # GL/glext.h:4869
PFNGLPOLLASYNCSGIXPROC = CFUNCTYPE(GLint, POINTER(GLuint)) # GL/glext.h:4870
PFNGLGENASYNCMARKERSSGIXPROC = CFUNCTYPE(GLuint, GLsizei) # GL/glext.h:4871
PFNGLDELETEASYNCMARKERSSGIXPROC = CFUNCTYPE(None, GLuint, GLsizei) # GL/glext.h:4872
PFNGLISASYNCMARKERSGIXPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:4873
# SGIX_async_pixel (GL/glext.h:4876)
GL_SGIX_async_pixel = 1 # GL/glext.h:4877
# SGIX_async_histogram (GL/glext.h:4880)
GL_SGIX_async_histogram = 1 # GL/glext.h:4881
# INTEL_parallel_arrays (GL/glext.h:4884)
GL_INTEL_parallel_arrays = 1 # GL/glext.h:4885
# GL/glext.h:4887
glVertexPointervINTEL = _link_function('glVertexPointervINTEL', None, [GLint, GLenum, POINTER(POINTER(GLvoid))], 'INTEL_parallel_arrays')
# GL/glext.h:4888
glNormalPointervINTEL = _link_function('glNormalPointervINTEL', None, [GLenum, POINTER(POINTER(GLvoid))], 'INTEL_parallel_arrays')
# GL/glext.h:4889
glColorPointervINTEL = _link_function('glColorPointervINTEL', None, [GLint, GLenum, POINTER(POINTER(GLvoid))], 'INTEL_parallel_arrays')
# GL/glext.h:4890
glTexCoordPointervINTEL = _link_function('glTexCoordPointervINTEL', None, [GLint, GLenum, POINTER(POINTER(GLvoid))], 'INTEL_parallel_arrays')
PFNGLVERTEXPOINTERVINTELPROC = CFUNCTYPE(None, GLint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:4892
PFNGLNORMALPOINTERVINTELPROC = CFUNCTYPE(None, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:4893
PFNGLCOLORPOINTERVINTELPROC = CFUNCTYPE(None, GLint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:4894
PFNGLTEXCOORDPOINTERVINTELPROC = CFUNCTYPE(None, GLint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:4895
# HP_occlusion_test (GL/glext.h:4898)
GL_HP_occlusion_test = 1 # GL/glext.h:4899
# EXT_pixel_transform (GL/glext.h:4902)
GL_EXT_pixel_transform = 1 # GL/glext.h:4903
# GL/glext.h:4905
glPixelTransformParameteriEXT = _link_function('glPixelTransformParameteriEXT', None, [GLenum, GLenum, GLint], 'EXT_pixel_transform')
# GL/glext.h:4906
glPixelTransformParameterfEXT = _link_function('glPixelTransformParameterfEXT', None, [GLenum, GLenum, GLfloat], 'EXT_pixel_transform')
# GL/glext.h:4907
glPixelTransformParameterivEXT = _link_function('glPixelTransformParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_pixel_transform')
# GL/glext.h:4908
glPixelTransformParameterfvEXT = _link_function('glPixelTransformParameterfvEXT', None, [GLenum, GLenum, POINTER(GLfloat)], 'EXT_pixel_transform')
PFNGLPIXELTRANSFORMPARAMETERIEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLint) # GL/glext.h:4910
PFNGLPIXELTRANSFORMPARAMETERFEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLfloat) # GL/glext.h:4911
PFNGLPIXELTRANSFORMPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:4912
PFNGLPIXELTRANSFORMPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:4913
# EXT_pixel_transform_color_table (GL/glext.h:4916)
GL_EXT_pixel_transform_color_table = 1 # GL/glext.h:4917
# EXT_shared_texture_palette (GL/glext.h:4920)
GL_EXT_shared_texture_palette = 1 # GL/glext.h:4921
# EXT_separate_specular_color (GL/glext.h:4924)
GL_EXT_separate_specular_color = 1 # GL/glext.h:4925
# EXT_secondary_color (GL/glext.h:4928)
GL_EXT_secondary_color = 1 # GL/glext.h:4929
# GL/glext.h:4931
glSecondaryColor3bEXT = _link_function('glSecondaryColor3bEXT', None, [GLbyte, GLbyte, GLbyte], 'EXT_secondary_color')
# GL/glext.h:4932
glSecondaryColor3bvEXT = _link_function('glSecondaryColor3bvEXT', None, [POINTER(GLbyte)], 'EXT_secondary_color')
# GL/glext.h:4933
glSecondaryColor3dEXT = _link_function('glSecondaryColor3dEXT', None, [GLdouble, GLdouble, GLdouble], 'EXT_secondary_color')
# GL/glext.h:4934
glSecondaryColor3dvEXT = _link_function('glSecondaryColor3dvEXT', None, [POINTER(GLdouble)], 'EXT_secondary_color')
# GL/glext.h:4935
glSecondaryColor3fEXT = _link_function('glSecondaryColor3fEXT', None, [GLfloat, GLfloat, GLfloat], 'EXT_secondary_color')
# GL/glext.h:4936
glSecondaryColor3fvEXT = _link_function('glSecondaryColor3fvEXT', None, [POINTER(GLfloat)], 'EXT_secondary_color')
# GL/glext.h:4937
glSecondaryColor3iEXT = _link_function('glSecondaryColor3iEXT', None, [GLint, GLint, GLint], 'EXT_secondary_color')
# GL/glext.h:4938
glSecondaryColor3ivEXT = _link_function('glSecondaryColor3ivEXT', None, [POINTER(GLint)], 'EXT_secondary_color')
# GL/glext.h:4939
glSecondaryColor3sEXT = _link_function('glSecondaryColor3sEXT', None, [GLshort, GLshort, GLshort], 'EXT_secondary_color')
# GL/glext.h:4940
glSecondaryColor3svEXT = _link_function('glSecondaryColor3svEXT', None, [POINTER(GLshort)], 'EXT_secondary_color')
# GL/glext.h:4941
glSecondaryColor3ubEXT = _link_function('glSecondaryColor3ubEXT', None, [GLubyte, GLubyte, GLubyte], 'EXT_secondary_color')
# GL/glext.h:4942
glSecondaryColor3ubvEXT = _link_function('glSecondaryColor3ubvEXT', None, [POINTER(GLubyte)], 'EXT_secondary_color')
# GL/glext.h:4943
glSecondaryColor3uiEXT = _link_function('glSecondaryColor3uiEXT', None, [GLuint, GLuint, GLuint], 'EXT_secondary_color')
# GL/glext.h:4944
glSecondaryColor3uivEXT = _link_function('glSecondaryColor3uivEXT', None, [POINTER(GLuint)], 'EXT_secondary_color')
# GL/glext.h:4945
glSecondaryColor3usEXT = _link_function('glSecondaryColor3usEXT', None, [GLushort, GLushort, GLushort], 'EXT_secondary_color')
# GL/glext.h:4946
glSecondaryColor3usvEXT = _link_function('glSecondaryColor3usvEXT', None, [POINTER(GLushort)], 'EXT_secondary_color')
# GL/glext.h:4947
glSecondaryColorPointerEXT = _link_function('glSecondaryColorPointerEXT', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], 'EXT_secondary_color')
PFNGLSECONDARYCOLOR3BEXTPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:4949
PFNGLSECONDARYCOLOR3BVEXTPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:4950
PFNGLSECONDARYCOLOR3DEXTPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:4951
PFNGLSECONDARYCOLOR3DVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:4952
PFNGLSECONDARYCOLOR3FEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:4953
PFNGLSECONDARYCOLOR3FVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:4954
PFNGLSECONDARYCOLOR3IEXTPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:4955
PFNGLSECONDARYCOLOR3IVEXTPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:4956
PFNGLSECONDARYCOLOR3SEXTPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:4957
PFNGLSECONDARYCOLOR3SVEXTPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:4958
PFNGLSECONDARYCOLOR3UBEXTPROC = CFUNCTYPE(None, GLubyte, GLubyte, GLubyte) # GL/glext.h:4959
PFNGLSECONDARYCOLOR3UBVEXTPROC = CFUNCTYPE(None, POINTER(GLubyte)) # GL/glext.h:4960
PFNGLSECONDARYCOLOR3UIEXTPROC = CFUNCTYPE(None, GLuint, GLuint, GLuint) # GL/glext.h:4961
PFNGLSECONDARYCOLOR3UIVEXTPROC = CFUNCTYPE(None, POINTER(GLuint)) # GL/glext.h:4962
PFNGLSECONDARYCOLOR3USEXTPROC = CFUNCTYPE(None, GLushort, GLushort, GLushort) # GL/glext.h:4963
PFNGLSECONDARYCOLOR3USVEXTPROC = CFUNCTYPE(None, POINTER(GLushort)) # GL/glext.h:4964
PFNGLSECONDARYCOLORPOINTEREXTPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:4965
# EXT_texture_perturb_normal (GL/glext.h:4968)
GL_EXT_texture_perturb_normal = 1 # GL/glext.h:4969
# GL/glext.h:4971
glTextureNormalEXT = _link_function('glTextureNormalEXT', None, [GLenum], 'EXT_texture_perturb_normal')
PFNGLTEXTURENORMALEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:4973
# EXT_multi_draw_arrays (GL/glext.h:4976)
GL_EXT_multi_draw_arrays = 1 # GL/glext.h:4977
# GL/glext.h:4979
glMultiDrawArraysEXT = _link_function('glMultiDrawArraysEXT', None, [GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei], 'EXT_multi_draw_arrays')
# GL/glext.h:4980
glMultiDrawElementsEXT = _link_function('glMultiDrawElementsEXT', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei], 'EXT_multi_draw_arrays')
PFNGLMULTIDRAWARRAYSEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei) # GL/glext.h:4982
PFNGLMULTIDRAWELEMENTSEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei) # GL/glext.h:4983
# EXT_fog_coord (GL/glext.h:4986)
GL_EXT_fog_coord = 1 # GL/glext.h:4987
# GL/glext.h:4989
glFogCoordfEXT = _link_function('glFogCoordfEXT', None, [GLfloat], 'EXT_fog_coord')
# GL/glext.h:4990
glFogCoordfvEXT = _link_function('glFogCoordfvEXT', None, [POINTER(GLfloat)], 'EXT_fog_coord')
# GL/glext.h:4991
glFogCoorddEXT = _link_function('glFogCoorddEXT', None, [GLdouble], 'EXT_fog_coord')
# GL/glext.h:4992
glFogCoorddvEXT = _link_function('glFogCoorddvEXT', None, [POINTER(GLdouble)], 'EXT_fog_coord')
# GL/glext.h:4993
glFogCoordPointerEXT = _link_function('glFogCoordPointerEXT', None, [GLenum, GLsizei, POINTER(GLvoid)], 'EXT_fog_coord')
PFNGLFOGCOORDFEXTPROC = CFUNCTYPE(None, GLfloat) # GL/glext.h:4995
PFNGLFOGCOORDFVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:4996
PFNGLFOGCOORDDEXTPROC = CFUNCTYPE(None, GLdouble) # GL/glext.h:4997
PFNGLFOGCOORDDVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:4998
PFNGLFOGCOORDPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:4999
# REND_screen_coordinates (GL/glext.h:5002)
GL_REND_screen_coordinates = 1 # GL/glext.h:5003
# EXT_coordinate_frame (GL/glext.h:5006)
GL_EXT_coordinate_frame = 1 # GL/glext.h:5007
# GL/glext.h:5009
glTangent3bEXT = _link_function('glTangent3bEXT', None, [GLbyte, GLbyte, GLbyte], 'EXT_coordinate_frame')
# GL/glext.h:5010
glTangent3bvEXT = _link_function('glTangent3bvEXT', None, [POINTER(GLbyte)], 'EXT_coordinate_frame')
# GL/glext.h:5011
glTangent3dEXT = _link_function('glTangent3dEXT', None, [GLdouble, GLdouble, GLdouble], 'EXT_coordinate_frame')
# GL/glext.h:5012
glTangent3dvEXT = _link_function('glTangent3dvEXT', None, [POINTER(GLdouble)], 'EXT_coordinate_frame')
# GL/glext.h:5013
glTangent3fEXT = _link_function('glTangent3fEXT', None, [GLfloat, GLfloat, GLfloat], 'EXT_coordinate_frame')
# GL/glext.h:5014
glTangent3fvEXT = _link_function('glTangent3fvEXT', None, [POINTER(GLfloat)], 'EXT_coordinate_frame')
# GL/glext.h:5015
glTangent3iEXT = _link_function('glTangent3iEXT', None, [GLint, GLint, GLint], 'EXT_coordinate_frame')
# GL/glext.h:5016
glTangent3ivEXT = _link_function('glTangent3ivEXT', None, [POINTER(GLint)], 'EXT_coordinate_frame')
# GL/glext.h:5017
glTangent3sEXT = _link_function('glTangent3sEXT', None, [GLshort, GLshort, GLshort], 'EXT_coordinate_frame')
# GL/glext.h:5018
glTangent3svEXT = _link_function('glTangent3svEXT', None, [POINTER(GLshort)], 'EXT_coordinate_frame')
# GL/glext.h:5019
glBinormal3bEXT = _link_function('glBinormal3bEXT', None, [GLbyte, GLbyte, GLbyte], 'EXT_coordinate_frame')
# GL/glext.h:5020
glBinormal3bvEXT = _link_function('glBinormal3bvEXT', None, [POINTER(GLbyte)], 'EXT_coordinate_frame')
# GL/glext.h:5021
glBinormal3dEXT = _link_function('glBinormal3dEXT', None, [GLdouble, GLdouble, GLdouble], 'EXT_coordinate_frame')
# GL/glext.h:5022
glBinormal3dvEXT = _link_function('glBinormal3dvEXT', None, [POINTER(GLdouble)], 'EXT_coordinate_frame')
# GL/glext.h:5023
glBinormal3fEXT = _link_function('glBinormal3fEXT', None, [GLfloat, GLfloat, GLfloat], 'EXT_coordinate_frame')
# GL/glext.h:5024
glBinormal3fvEXT = _link_function('glBinormal3fvEXT', None, [POINTER(GLfloat)], 'EXT_coordinate_frame')
# GL/glext.h:5025
glBinormal3iEXT = _link_function('glBinormal3iEXT', None, [GLint, GLint, GLint], 'EXT_coordinate_frame')
# GL/glext.h:5026
glBinormal3ivEXT = _link_function('glBinormal3ivEXT', None, [POINTER(GLint)], 'EXT_coordinate_frame')
# GL/glext.h:5027
glBinormal3sEXT = _link_function('glBinormal3sEXT', None, [GLshort, GLshort, GLshort], 'EXT_coordinate_frame')
# GL/glext.h:5028
glBinormal3svEXT = _link_function('glBinormal3svEXT', None, [POINTER(GLshort)], 'EXT_coordinate_frame')
# GL/glext.h:5029
glTangentPointerEXT = _link_function('glTangentPointerEXT', None, [GLenum, GLsizei, POINTER(GLvoid)], 'EXT_coordinate_frame')
# GL/glext.h:5030
glBinormalPointerEXT = _link_function('glBinormalPointerEXT', None, [GLenum, GLsizei, POINTER(GLvoid)], 'EXT_coordinate_frame')
PFNGLTANGENT3BEXTPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:5032
PFNGLTANGENT3BVEXTPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:5033
PFNGLTANGENT3DEXTPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:5034
PFNGLTANGENT3DVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5035
PFNGLTANGENT3FEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:5036
PFNGLTANGENT3FVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5037
PFNGLTANGENT3IEXTPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:5038
PFNGLTANGENT3IVEXTPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5039
PFNGLTANGENT3SEXTPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:5040
PFNGLTANGENT3SVEXTPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5041
PFNGLBINORMAL3BEXTPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:5042
PFNGLBINORMAL3BVEXTPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:5043
PFNGLBINORMAL3DEXTPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:5044
PFNGLBINORMAL3DVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5045
PFNGLBINORMAL3FEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:5046
PFNGLBINORMAL3FVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5047
PFNGLBINORMAL3IEXTPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:5048
PFNGLBINORMAL3IVEXTPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5049
PFNGLBINORMAL3SEXTPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:5050
PFNGLBINORMAL3SVEXTPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5051
PFNGLTANGENTPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5052
PFNGLBINORMALPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5053
# EXT_texture_env_combine (GL/glext.h:5056)
GL_EXT_texture_env_combine = 1 # GL/glext.h:5057
# APPLE_specular_vector (GL/glext.h:5060)
GL_APPLE_specular_vector = 1 # GL/glext.h:5061
# APPLE_transform_hint (GL/glext.h:5064)
GL_APPLE_transform_hint = 1 # GL/glext.h:5065
# SGIX_fog_scale (GL/glext.h:5068)
GL_SGIX_fog_scale = 1 # GL/glext.h:5069
# SUNX_constant_data (GL/glext.h:5072)
GL_SUNX_constant_data = 1 # GL/glext.h:5073
# GL/glext.h:5075
glFinishTextureSUNX = _link_function('glFinishTextureSUNX', None, [], 'SUNX_constant_data')
PFNGLFINISHTEXTURESUNXPROC = CFUNCTYPE(None) # GL/glext.h:5077
# SUN_global_alpha (GL/glext.h:5080)
GL_SUN_global_alpha = 1 # GL/glext.h:5081
# GL/glext.h:5083
glGlobalAlphaFactorbSUN = _link_function('glGlobalAlphaFactorbSUN', None, [GLbyte], 'SUN_global_alpha')
# GL/glext.h:5084
glGlobalAlphaFactorsSUN = _link_function('glGlobalAlphaFactorsSUN', None, [GLshort], 'SUN_global_alpha')
# GL/glext.h:5085
glGlobalAlphaFactoriSUN = _link_function('glGlobalAlphaFactoriSUN', None, [GLint], 'SUN_global_alpha')
# GL/glext.h:5086
glGlobalAlphaFactorfSUN = _link_function('glGlobalAlphaFactorfSUN', None, [GLfloat], 'SUN_global_alpha')
# GL/glext.h:5087
glGlobalAlphaFactordSUN = _link_function('glGlobalAlphaFactordSUN', None, [GLdouble], 'SUN_global_alpha')
# GL/glext.h:5088
glGlobalAlphaFactorubSUN = _link_function('glGlobalAlphaFactorubSUN', None, [GLubyte], 'SUN_global_alpha')
# GL/glext.h:5089
glGlobalAlphaFactorusSUN = _link_function('glGlobalAlphaFactorusSUN', None, [GLushort], 'SUN_global_alpha')
# GL/glext.h:5090
glGlobalAlphaFactoruiSUN = _link_function('glGlobalAlphaFactoruiSUN', None, [GLuint], 'SUN_global_alpha')
PFNGLGLOBALALPHAFACTORBSUNPROC = CFUNCTYPE(None, GLbyte) # GL/glext.h:5092
PFNGLGLOBALALPHAFACTORSSUNPROC = CFUNCTYPE(None, GLshort) # GL/glext.h:5093
PFNGLGLOBALALPHAFACTORISUNPROC = CFUNCTYPE(None, GLint) # GL/glext.h:5094
PFNGLGLOBALALPHAFACTORFSUNPROC = CFUNCTYPE(None, GLfloat) # GL/glext.h:5095
PFNGLGLOBALALPHAFACTORDSUNPROC = CFUNCTYPE(None, GLdouble) # GL/glext.h:5096
PFNGLGLOBALALPHAFACTORUBSUNPROC = CFUNCTYPE(None, GLubyte) # GL/glext.h:5097
PFNGLGLOBALALPHAFACTORUSSUNPROC = CFUNCTYPE(None, GLushort) # GL/glext.h:5098
PFNGLGLOBALALPHAFACTORUISUNPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5099
# SUN_triangle_list (GL/glext.h:5102)
GL_SUN_triangle_list = 1 # GL/glext.h:5103
# GL/glext.h:5105
glReplacementCodeuiSUN = _link_function('glReplacementCodeuiSUN', None, [GLuint], 'SUN_triangle_list')
# GL/glext.h:5106
glReplacementCodeusSUN = _link_function('glReplacementCodeusSUN', None, [GLushort], 'SUN_triangle_list')
# GL/glext.h:5107
glReplacementCodeubSUN = _link_function('glReplacementCodeubSUN', None, [GLubyte], 'SUN_triangle_list')
# GL/glext.h:5108
glReplacementCodeuivSUN = _link_function('glReplacementCodeuivSUN', None, [POINTER(GLuint)], 'SUN_triangle_list')
# GL/glext.h:5109
glReplacementCodeusvSUN = _link_function('glReplacementCodeusvSUN', None, [POINTER(GLushort)], 'SUN_triangle_list')
# GL/glext.h:5110
glReplacementCodeubvSUN = _link_function('glReplacementCodeubvSUN', None, [POINTER(GLubyte)], 'SUN_triangle_list')
# GL/glext.h:5111
glReplacementCodePointerSUN = _link_function('glReplacementCodePointerSUN', None, [GLenum, GLsizei, POINTER(POINTER(GLvoid))], 'SUN_triangle_list')
PFNGLREPLACEMENTCODEUISUNPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5113
PFNGLREPLACEMENTCODEUSSUNPROC = CFUNCTYPE(None, GLushort) # GL/glext.h:5114
PFNGLREPLACEMENTCODEUBSUNPROC = CFUNCTYPE(None, GLubyte) # GL/glext.h:5115
PFNGLREPLACEMENTCODEUIVSUNPROC = CFUNCTYPE(None, POINTER(GLuint)) # GL/glext.h:5116
PFNGLREPLACEMENTCODEUSVSUNPROC = CFUNCTYPE(None, POINTER(GLushort)) # GL/glext.h:5117
PFNGLREPLACEMENTCODEUBVSUNPROC = CFUNCTYPE(None, POINTER(GLubyte)) # GL/glext.h:5118
PFNGLREPLACEMENTCODEPOINTERSUNPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(POINTER(GLvoid))) # GL/glext.h:5119
# SUN_vertex (GL/glext.h:5122)
GL_SUN_vertex = 1 # GL/glext.h:5123
# GL/glext.h:5125
glColor4ubVertex2fSUN = _link_function('glColor4ubVertex2fSUN', None, [GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5126
glColor4ubVertex2fvSUN = _link_function('glColor4ubVertex2fvSUN', None, [POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5127
glColor4ubVertex3fSUN = _link_function('glColor4ubVertex3fSUN', None, [GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5128
glColor4ubVertex3fvSUN = _link_function('glColor4ubVertex3fvSUN', None, [POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5129
glColor3fVertex3fSUN = _link_function('glColor3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5130
glColor3fVertex3fvSUN = _link_function('glColor3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5131
glNormal3fVertex3fSUN = _link_function('glNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5132
glNormal3fVertex3fvSUN = _link_function('glNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5133
glColor4fNormal3fVertex3fSUN = _link_function('glColor4fNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5134
glColor4fNormal3fVertex3fvSUN = _link_function('glColor4fNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5135
glTexCoord2fVertex3fSUN = _link_function('glTexCoord2fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5136
glTexCoord2fVertex3fvSUN = _link_function('glTexCoord2fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5137
glTexCoord4fVertex4fSUN = _link_function('glTexCoord4fVertex4fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5138
glTexCoord4fVertex4fvSUN = _link_function('glTexCoord4fVertex4fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5139
glTexCoord2fColor4ubVertex3fSUN = _link_function('glTexCoord2fColor4ubVertex3fSUN', None, [GLfloat, GLfloat, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5140
glTexCoord2fColor4ubVertex3fvSUN = _link_function('glTexCoord2fColor4ubVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5141
glTexCoord2fColor3fVertex3fSUN = _link_function('glTexCoord2fColor3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5142
glTexCoord2fColor3fVertex3fvSUN = _link_function('glTexCoord2fColor3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5143
glTexCoord2fNormal3fVertex3fSUN = _link_function('glTexCoord2fNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5144
glTexCoord2fNormal3fVertex3fvSUN = _link_function('glTexCoord2fNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5145
glTexCoord2fColor4fNormal3fVertex3fSUN = _link_function('glTexCoord2fColor4fNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5146
glTexCoord2fColor4fNormal3fVertex3fvSUN = _link_function('glTexCoord2fColor4fNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5147
glTexCoord4fColor4fNormal3fVertex4fSUN = _link_function('glTexCoord4fColor4fNormal3fVertex4fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5148
glTexCoord4fColor4fNormal3fVertex4fvSUN = _link_function('glTexCoord4fColor4fNormal3fVertex4fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5149
glReplacementCodeuiVertex3fSUN = _link_function('glReplacementCodeuiVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5150
glReplacementCodeuiVertex3fvSUN = _link_function('glReplacementCodeuiVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5151
glReplacementCodeuiColor4ubVertex3fSUN = _link_function('glReplacementCodeuiColor4ubVertex3fSUN', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5152
glReplacementCodeuiColor4ubVertex3fvSUN = _link_function('glReplacementCodeuiColor4ubVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5153
glReplacementCodeuiColor3fVertex3fSUN = _link_function('glReplacementCodeuiColor3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5154
glReplacementCodeuiColor3fVertex3fvSUN = _link_function('glReplacementCodeuiColor3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5155
glReplacementCodeuiNormal3fVertex3fSUN = _link_function('glReplacementCodeuiNormal3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5156
glReplacementCodeuiNormal3fVertex3fvSUN = _link_function('glReplacementCodeuiNormal3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5157
glReplacementCodeuiColor4fNormal3fVertex3fSUN = _link_function('glReplacementCodeuiColor4fNormal3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5158
glReplacementCodeuiColor4fNormal3fVertex3fvSUN = _link_function('glReplacementCodeuiColor4fNormal3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5159
glReplacementCodeuiTexCoord2fVertex3fSUN = _link_function('glReplacementCodeuiTexCoord2fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5160
glReplacementCodeuiTexCoord2fVertex3fvSUN = _link_function('glReplacementCodeuiTexCoord2fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5161
glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN = _link_function('glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5162
glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN = _link_function('glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5163
glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN = _link_function('glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5164
glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN = _link_function('glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
PFNGLCOLOR4UBVERTEX2FSUNPROC = CFUNCTYPE(None, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat) # GL/glext.h:5166
PFNGLCOLOR4UBVERTEX2FVSUNPROC = CFUNCTYPE(None, POINTER(GLubyte), POINTER(GLfloat)) # GL/glext.h:5167
PFNGLCOLOR4UBVERTEX3FSUNPROC = CFUNCTYPE(None, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat) # GL/glext.h:5168
PFNGLCOLOR4UBVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLubyte), POINTER(GLfloat)) # GL/glext.h:5169
PFNGLCOLOR3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5170
PFNGLCOLOR3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5171
PFNGLNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5172
PFNGLNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5173
PFNGLCOLOR4FNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5174
PFNGLCOLOR4FNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5175
PFNGLTEXCOORD2FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5176
PFNGLTEXCOORD2FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5177
PFNGLTEXCOORD4FVERTEX4FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5178
PFNGLTEXCOORD4FVERTEX4FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5179
PFNGLTEXCOORD2FCOLOR4UBVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat) # GL/glext.h:5180
PFNGLTEXCOORD2FCOLOR4UBVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLubyte), POINTER(GLfloat)) # GL/glext.h:5181
PFNGLTEXCOORD2FCOLOR3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5182
PFNGLTEXCOORD2FCOLOR3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5183
PFNGLTEXCOORD2FNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5184
PFNGLTEXCOORD2FNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5185
PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5186
PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5187
PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FSUNPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5188
PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FVSUNPROC = CFUNCTYPE(None, POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5189
PFNGLREPLACEMENTCODEUIVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat) # GL/glext.h:5190
PFNGLREPLACEMENTCODEUIVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat)) # GL/glext.h:5191
PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat) # GL/glext.h:5192
PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLubyte), POINTER(GLfloat)) # GL/glext.h:5193
PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5194
PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5195
PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5196
PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5197
PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5198
PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5199
PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5200
PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5201
PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5202
PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5203
PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5204
PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC = CFUNCTYPE(None, POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)) # GL/glext.h:5205
# EXT_blend_func_separate (GL/glext.h:5208)
GL_EXT_blend_func_separate = 1 # GL/glext.h:5209
# GL/glext.h:5211
glBlendFuncSeparateEXT = _link_function('glBlendFuncSeparateEXT', None, [GLenum, GLenum, GLenum, GLenum], 'EXT_blend_func_separate')
PFNGLBLENDFUNCSEPARATEEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:5213
# INGR_blend_func_separate (GL/glext.h:5216)
GL_INGR_blend_func_separate = 1 # GL/glext.h:5217
# GL/glext.h:5219
glBlendFuncSeparateINGR = _link_function('glBlendFuncSeparateINGR', None, [GLenum, GLenum, GLenum, GLenum], 'INGR_blend_func_separate')
PFNGLBLENDFUNCSEPARATEINGRPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:5221
# INGR_color_clamp (GL/glext.h:5224)
GL_INGR_color_clamp = 1 # GL/glext.h:5225
# INGR_interlace_read (GL/glext.h:5228)
GL_INGR_interlace_read = 1 # GL/glext.h:5229
# EXT_stencil_wrap (GL/glext.h:5232)
GL_EXT_stencil_wrap = 1 # GL/glext.h:5233
# EXT_422_pixels (GL/glext.h:5236)
GL_EXT_422_pixels = 1 # GL/glext.h:5237
# NV_texgen_reflection (GL/glext.h:5240)
GL_NV_texgen_reflection = 1 # GL/glext.h:5241
# SUN_convolution_border_modes (GL/glext.h:5244)
GL_SUN_convolution_border_modes = 1 # GL/glext.h:5245
# EXT_texture_env_add (GL/glext.h:5248)
GL_EXT_texture_env_add = 1 # GL/glext.h:5249
# EXT_texture_lod_bias (GL/glext.h:5252)
GL_EXT_texture_lod_bias = 1 # GL/glext.h:5253
# EXT_texture_filter_anisotropic (GL/glext.h:5256)
GL_EXT_texture_filter_anisotropic = 1 # GL/glext.h:5257
# EXT_vertex_weighting (GL/glext.h:5260)
GL_EXT_vertex_weighting = 1 # GL/glext.h:5261
# GL/glext.h:5263
glVertexWeightfEXT = _link_function('glVertexWeightfEXT', None, [GLfloat], 'EXT_vertex_weighting')
# GL/glext.h:5264
glVertexWeightfvEXT = _link_function('glVertexWeightfvEXT', None, [POINTER(GLfloat)], 'EXT_vertex_weighting')
# GL/glext.h:5265
glVertexWeightPointerEXT = _link_function('glVertexWeightPointerEXT', None, [GLsizei, GLenum, GLsizei, POINTER(GLvoid)], 'EXT_vertex_weighting')
PFNGLVERTEXWEIGHTFEXTPROC = CFUNCTYPE(None, GLfloat) # GL/glext.h:5267
PFNGLVERTEXWEIGHTFVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5268
PFNGLVERTEXWEIGHTPOINTEREXTPROC = CFUNCTYPE(None, GLsizei, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5269
# NV_light_max_exponent (GL/glext.h:5272)
GL_NV_light_max_exponent = 1 # GL/glext.h:5273
# NV_vertex_array_range (GL/glext.h:5276)
GL_NV_vertex_array_range = 1 # GL/glext.h:5277
# GL/glext.h:5279
glFlushVertexArrayRangeNV = _link_function('glFlushVertexArrayRangeNV', None, [], 'NV_vertex_array_range')
# GL/glext.h:5280
glVertexArrayRangeNV = _link_function('glVertexArrayRangeNV', None, [GLsizei, POINTER(GLvoid)], 'NV_vertex_array_range')
PFNGLFLUSHVERTEXARRAYRANGENVPROC = CFUNCTYPE(None) # GL/glext.h:5282
PFNGLVERTEXARRAYRANGENVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLvoid)) # GL/glext.h:5283
# NV_register_combiners (GL/glext.h:5286)
GL_NV_register_combiners = 1 # GL/glext.h:5287
# GL/glext.h:5289
glCombinerParameterfvNV = _link_function('glCombinerParameterfvNV', None, [GLenum, POINTER(GLfloat)], 'NV_register_combiners')
# GL/glext.h:5290
glCombinerParameterfNV = _link_function('glCombinerParameterfNV', None, [GLenum, GLfloat], 'NV_register_combiners')
# GL/glext.h:5291
glCombinerParameterivNV = _link_function('glCombinerParameterivNV', None, [GLenum, POINTER(GLint)], 'NV_register_combiners')
# GL/glext.h:5292
glCombinerParameteriNV = _link_function('glCombinerParameteriNV', None, [GLenum, GLint], 'NV_register_combiners')
# GL/glext.h:5293
glCombinerInputNV = _link_function('glCombinerInputNV', None, [GLenum, GLenum, GLenum, GLenum, GLenum, GLenum], 'NV_register_combiners')
# GL/glext.h:5294
glCombinerOutputNV = _link_function('glCombinerOutputNV', None, [GLenum, GLenum, GLenum, GLenum, GLenum, GLenum, GLenum, GLboolean, GLboolean, GLboolean], 'NV_register_combiners')
# GL/glext.h:5295
glFinalCombinerInputNV = _link_function('glFinalCombinerInputNV', None, [GLenum, GLenum, GLenum, GLenum], 'NV_register_combiners')
# GL/glext.h:5296
glGetCombinerInputParameterfvNV = _link_function('glGetCombinerInputParameterfvNV', None, [GLenum, GLenum, GLenum, GLenum, POINTER(GLfloat)], 'NV_register_combiners')
# GL/glext.h:5297
glGetCombinerInputParameterivNV = _link_function('glGetCombinerInputParameterivNV', None, [GLenum, GLenum, GLenum, GLenum, POINTER(GLint)], 'NV_register_combiners')
# GL/glext.h:5298
glGetCombinerOutputParameterfvNV = _link_function('glGetCombinerOutputParameterfvNV', None, [GLenum, GLenum, GLenum, POINTER(GLfloat)], 'NV_register_combiners')
# GL/glext.h:5299
glGetCombinerOutputParameterivNV = _link_function('glGetCombinerOutputParameterivNV', None, [GLenum, GLenum, GLenum, POINTER(GLint)], 'NV_register_combiners')
# GL/glext.h:5300
glGetFinalCombinerInputParameterfvNV = _link_function('glGetFinalCombinerInputParameterfvNV', None, [GLenum, GLenum, POINTER(GLfloat)], 'NV_register_combiners')
# GL/glext.h:5301
glGetFinalCombinerInputParameterivNV = _link_function('glGetFinalCombinerInputParameterivNV', None, [GLenum, GLenum, POINTER(GLint)], 'NV_register_combiners')
PFNGLCOMBINERPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:5303
PFNGLCOMBINERPARAMETERFNVPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:5304
PFNGLCOMBINERPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:5305
PFNGLCOMBINERPARAMETERINVPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:5306
PFNGLCOMBINERINPUTNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:5307
PFNGLCOMBINEROUTPUTNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum, GLenum, GLenum, GLenum, GLboolean, GLboolean, GLboolean) # GL/glext.h:5308
PFNGLFINALCOMBINERINPUTNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:5309
PFNGLGETCOMBINERINPUTPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5310
PFNGLGETCOMBINERINPUTPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5311
PFNGLGETCOMBINEROUTPUTPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5312
PFNGLGETCOMBINEROUTPUTPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5313
PFNGLGETFINALCOMBINERINPUTPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5314
PFNGLGETFINALCOMBINERINPUTPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5315
# NV_fog_distance (GL/glext.h:5318)
GL_NV_fog_distance = 1 # GL/glext.h:5319
# NV_texgen_emboss (GL/glext.h:5322)
GL_NV_texgen_emboss = 1 # GL/glext.h:5323
# NV_blend_square (GL/glext.h:5326)
GL_NV_blend_square = 1 # GL/glext.h:5327
# NV_texture_env_combine4 (GL/glext.h:5330)
GL_NV_texture_env_combine4 = 1 # GL/glext.h:5331
# MESA_resize_buffers (GL/glext.h:5334)
GL_MESA_resize_buffers = 1 # GL/glext.h:5335
# GL/glext.h:5337
glResizeBuffersMESA = _link_function('glResizeBuffersMESA', None, [], 'MESA_resize_buffers')
PFNGLRESIZEBUFFERSMESAPROC = CFUNCTYPE(None) # GL/glext.h:5339
# MESA_window_pos (GL/glext.h:5342)
GL_MESA_window_pos = 1 # GL/glext.h:5343
# GL/glext.h:5345
glWindowPos2dMESA = _link_function('glWindowPos2dMESA', None, [GLdouble, GLdouble], 'MESA_window_pos')
# GL/glext.h:5346
glWindowPos2dvMESA = _link_function('glWindowPos2dvMESA', None, [POINTER(GLdouble)], 'MESA_window_pos')
# GL/glext.h:5347
glWindowPos2fMESA = _link_function('glWindowPos2fMESA', None, [GLfloat, GLfloat], 'MESA_window_pos')
# GL/glext.h:5348
glWindowPos2fvMESA = _link_function('glWindowPos2fvMESA', None, [POINTER(GLfloat)], 'MESA_window_pos')
# GL/glext.h:5349
glWindowPos2iMESA = _link_function('glWindowPos2iMESA', None, [GLint, GLint], 'MESA_window_pos')
# GL/glext.h:5350
glWindowPos2ivMESA = _link_function('glWindowPos2ivMESA', None, [POINTER(GLint)], 'MESA_window_pos')
# GL/glext.h:5351
glWindowPos2sMESA = _link_function('glWindowPos2sMESA', None, [GLshort, GLshort], 'MESA_window_pos')
# GL/glext.h:5352
glWindowPos2svMESA = _link_function('glWindowPos2svMESA', None, [POINTER(GLshort)], 'MESA_window_pos')
# GL/glext.h:5353
glWindowPos3dMESA = _link_function('glWindowPos3dMESA', None, [GLdouble, GLdouble, GLdouble], 'MESA_window_pos')
# GL/glext.h:5354
glWindowPos3dvMESA = _link_function('glWindowPos3dvMESA', None, [POINTER(GLdouble)], 'MESA_window_pos')
# GL/glext.h:5355
glWindowPos3fMESA = _link_function('glWindowPos3fMESA', None, [GLfloat, GLfloat, GLfloat], 'MESA_window_pos')
# GL/glext.h:5356
glWindowPos3fvMESA = _link_function('glWindowPos3fvMESA', None, [POINTER(GLfloat)], 'MESA_window_pos')
# GL/glext.h:5357
glWindowPos3iMESA = _link_function('glWindowPos3iMESA', None, [GLint, GLint, GLint], 'MESA_window_pos')
# GL/glext.h:5358
glWindowPos3ivMESA = _link_function('glWindowPos3ivMESA', None, [POINTER(GLint)], 'MESA_window_pos')
# GL/glext.h:5359
glWindowPos3sMESA = _link_function('glWindowPos3sMESA', None, [GLshort, GLshort, GLshort], 'MESA_window_pos')
# GL/glext.h:5360
glWindowPos3svMESA = _link_function('glWindowPos3svMESA', None, [POINTER(GLshort)], 'MESA_window_pos')
# GL/glext.h:5361
glWindowPos4dMESA = _link_function('glWindowPos4dMESA', None, [GLdouble, GLdouble, GLdouble, GLdouble], 'MESA_window_pos')
# GL/glext.h:5362
glWindowPos4dvMESA = _link_function('glWindowPos4dvMESA', None, [POINTER(GLdouble)], 'MESA_window_pos')
# GL/glext.h:5363
glWindowPos4fMESA = _link_function('glWindowPos4fMESA', None, [GLfloat, GLfloat, GLfloat, GLfloat], 'MESA_window_pos')
# GL/glext.h:5364
glWindowPos4fvMESA = _link_function('glWindowPos4fvMESA', None, [POINTER(GLfloat)], 'MESA_window_pos')
# GL/glext.h:5365
glWindowPos4iMESA = _link_function('glWindowPos4iMESA', None, [GLint, GLint, GLint, GLint], 'MESA_window_pos')
# GL/glext.h:5366
glWindowPos4ivMESA = _link_function('glWindowPos4ivMESA', None, [POINTER(GLint)], 'MESA_window_pos')
# GL/glext.h:5367
glWindowPos4sMESA = _link_function('glWindowPos4sMESA', None, [GLshort, GLshort, GLshort, GLshort], 'MESA_window_pos')
# GL/glext.h:5368
glWindowPos4svMESA = _link_function('glWindowPos4svMESA', None, [POINTER(GLshort)], 'MESA_window_pos')
PFNGLWINDOWPOS2DMESAPROC = CFUNCTYPE(None, GLdouble, GLdouble) # GL/glext.h:5370
PFNGLWINDOWPOS2DVMESAPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5371
PFNGLWINDOWPOS2FMESAPROC = CFUNCTYPE(None, GLfloat, GLfloat) # GL/glext.h:5372
PFNGLWINDOWPOS2FVMESAPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5373
PFNGLWINDOWPOS2IMESAPROC = CFUNCTYPE(None, GLint, GLint) # GL/glext.h:5374
PFNGLWINDOWPOS2IVMESAPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5375
PFNGLWINDOWPOS2SMESAPROC = CFUNCTYPE(None, GLshort, GLshort) # GL/glext.h:5376
PFNGLWINDOWPOS2SVMESAPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5377
PFNGLWINDOWPOS3DMESAPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:5378
PFNGLWINDOWPOS3DVMESAPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5379
PFNGLWINDOWPOS3FMESAPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:5380
PFNGLWINDOWPOS3FVMESAPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5381
PFNGLWINDOWPOS3IMESAPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:5382
PFNGLWINDOWPOS3IVMESAPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5383
PFNGLWINDOWPOS3SMESAPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:5384
PFNGLWINDOWPOS3SVMESAPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5385
PFNGLWINDOWPOS4DMESAPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:5386
PFNGLWINDOWPOS4DVMESAPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5387
PFNGLWINDOWPOS4FMESAPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5388
PFNGLWINDOWPOS4FVMESAPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5389
PFNGLWINDOWPOS4IMESAPROC = CFUNCTYPE(None, GLint, GLint, GLint, GLint) # GL/glext.h:5390
PFNGLWINDOWPOS4IVMESAPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5391
PFNGLWINDOWPOS4SMESAPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:5392
PFNGLWINDOWPOS4SVMESAPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5393
# IBM_cull_vertex (GL/glext.h:5396)
GL_IBM_cull_vertex = 1 # GL/glext.h:5397
# IBM_multimode_draw_arrays (GL/glext.h:5400)
GL_IBM_multimode_draw_arrays = 1 # GL/glext.h:5401
# GL/glext.h:5403
glMultiModeDrawArraysIBM = _link_function('glMultiModeDrawArraysIBM', None, [POINTER(GLenum), POINTER(GLint), POINTER(GLsizei), GLsizei, GLint], 'IBM_multimode_draw_arrays')
# GL/glext.h:5404
glMultiModeDrawElementsIBM = _link_function('glMultiModeDrawElementsIBM', None, [POINTER(GLenum), POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei, GLint], 'IBM_multimode_draw_arrays')
PFNGLMULTIMODEDRAWARRAYSIBMPROC = CFUNCTYPE(None, POINTER(GLenum), POINTER(GLint), POINTER(GLsizei), GLsizei, GLint) # GL/glext.h:5406
PFNGLMULTIMODEDRAWELEMENTSIBMPROC = CFUNCTYPE(None, POINTER(GLenum), POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei, GLint) # GL/glext.h:5407
# IBM_vertex_array_lists (GL/glext.h:5410)
GL_IBM_vertex_array_lists = 1 # GL/glext.h:5411
# GL/glext.h:5413
glColorPointerListIBM = _link_function('glColorPointerListIBM', None, [GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5414
glSecondaryColorPointerListIBM = _link_function('glSecondaryColorPointerListIBM', None, [GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5415
glEdgeFlagPointerListIBM = _link_function('glEdgeFlagPointerListIBM', None, [GLint, POINTER(POINTER(GLboolean)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5416
glFogCoordPointerListIBM = _link_function('glFogCoordPointerListIBM', None, [GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5417
glIndexPointerListIBM = _link_function('glIndexPointerListIBM', None, [GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5418
glNormalPointerListIBM = _link_function('glNormalPointerListIBM', None, [GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5419
glTexCoordPointerListIBM = _link_function('glTexCoordPointerListIBM', None, [GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
# GL/glext.h:5420
glVertexPointerListIBM = _link_function('glVertexPointerListIBM', None, [GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint], 'IBM_vertex_array_lists')
PFNGLCOLORPOINTERLISTIBMPROC = CFUNCTYPE(None, GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5422
PFNGLSECONDARYCOLORPOINTERLISTIBMPROC = CFUNCTYPE(None, GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5423
PFNGLEDGEFLAGPOINTERLISTIBMPROC = CFUNCTYPE(None, GLint, POINTER(POINTER(GLboolean)), GLint) # GL/glext.h:5424
PFNGLFOGCOORDPOINTERLISTIBMPROC = CFUNCTYPE(None, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5425
PFNGLINDEXPOINTERLISTIBMPROC = CFUNCTYPE(None, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5426
PFNGLNORMALPOINTERLISTIBMPROC = CFUNCTYPE(None, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5427
PFNGLTEXCOORDPOINTERLISTIBMPROC = CFUNCTYPE(None, GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5428
PFNGLVERTEXPOINTERLISTIBMPROC = CFUNCTYPE(None, GLint, GLenum, GLint, POINTER(POINTER(GLvoid)), GLint) # GL/glext.h:5429
# SGIX_subsample (GL/glext.h:5432)
GL_SGIX_subsample = 1 # GL/glext.h:5433
# SGIX_ycrcba (GL/glext.h:5436)
GL_SGIX_ycrcba = 1 # GL/glext.h:5437
# SGIX_ycrcb_subsample (GL/glext.h:5440)
GL_SGIX_ycrcb_subsample = 1 # GL/glext.h:5441
# SGIX_depth_pass_instrument (GL/glext.h:5444)
GL_SGIX_depth_pass_instrument = 1 # GL/glext.h:5445
# 3DFX_texture_compression_FXT1 (GL/glext.h:5448)
GL_3DFX_texture_compression_FXT1 = 1 # GL/glext.h:5449
# 3DFX_multisample (GL/glext.h:5452)
GL_3DFX_multisample = 1 # GL/glext.h:5453
# 3DFX_tbuffer (GL/glext.h:5456)
GL_3DFX_tbuffer = 1 # GL/glext.h:5457
# GL/glext.h:5459
glTbufferMask3DFX = _link_function('glTbufferMask3DFX', None, [GLuint], '3DFX_tbuffer')
PFNGLTBUFFERMASK3DFXPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5461
# EXT_multisample (GL/glext.h:5464)
GL_EXT_multisample = 1 # GL/glext.h:5465
# GL/glext.h:5467
glSampleMaskEXT = _link_function('glSampleMaskEXT', None, [GLclampf, GLboolean], 'EXT_multisample')
# GL/glext.h:5468
glSamplePatternEXT = _link_function('glSamplePatternEXT', None, [GLenum], 'EXT_multisample')
PFNGLSAMPLEMASKEXTPROC = CFUNCTYPE(None, GLclampf, GLboolean) # GL/glext.h:5470
PFNGLSAMPLEPATTERNEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:5471
# SGIX_vertex_preclip (GL/glext.h:5474)
GL_SGIX_vertex_preclip = 1 # GL/glext.h:5475
# SGIX_convolution_accuracy (GL/glext.h:5478)
GL_SGIX_convolution_accuracy = 1 # GL/glext.h:5479
# SGIX_resample (GL/glext.h:5482)
GL_SGIX_resample = 1 # GL/glext.h:5483
# SGIS_point_line_texgen (GL/glext.h:5486)
GL_SGIS_point_line_texgen = 1 # GL/glext.h:5487
# SGIS_texture_color_mask (GL/glext.h:5490)
GL_SGIS_texture_color_mask = 1 # GL/glext.h:5491
# GL/glext.h:5493
glTextureColorMaskSGIS = _link_function('glTextureColorMaskSGIS', None, [GLboolean, GLboolean, GLboolean, GLboolean], 'SGIS_texture_color_mask')
PFNGLTEXTURECOLORMASKSGISPROC = CFUNCTYPE(None, GLboolean, GLboolean, GLboolean, GLboolean) # GL/glext.h:5495
# SGIX_igloo_interface (GL/glext.h:5498)
GL_SGIX_igloo_interface = 1 # GL/glext.h:5499
# GL/glext.h:5501
glIglooInterfaceSGIX = _link_function('glIglooInterfaceSGIX', None, [GLenum, POINTER(GLvoid)], 'SGIX_igloo_interface')
PFNGLIGLOOINTERFACESGIXPROC = CFUNCTYPE(None, GLenum, POINTER(GLvoid)) # GL/glext.h:5503
# EXT_texture_env_dot3 (GL/glext.h:5506)
GL_EXT_texture_env_dot3 = 1 # GL/glext.h:5507
# ATI_texture_mirror_once (GL/glext.h:5510)
GL_ATI_texture_mirror_once = 1 # GL/glext.h:5511
# NV_fence (GL/glext.h:5514)
GL_NV_fence = 1 # GL/glext.h:5515
# GL/glext.h:5517
glDeleteFencesNV = _link_function('glDeleteFencesNV', None, [GLsizei, POINTER(GLuint)], 'NV_fence')
# GL/glext.h:5518
glGenFencesNV = _link_function('glGenFencesNV', None, [GLsizei, POINTER(GLuint)], 'NV_fence')
# GL/glext.h:5519
glIsFenceNV = _link_function('glIsFenceNV', GLboolean, [GLuint], 'NV_fence')
# GL/glext.h:5520
glTestFenceNV = _link_function('glTestFenceNV', GLboolean, [GLuint], 'NV_fence')
# GL/glext.h:5521
glGetFenceivNV = _link_function('glGetFenceivNV', None, [GLuint, GLenum, POINTER(GLint)], 'NV_fence')
# GL/glext.h:5522
glFinishFenceNV = _link_function('glFinishFenceNV', None, [GLuint], 'NV_fence')
# GL/glext.h:5523
glSetFenceNV = _link_function('glSetFenceNV', None, [GLuint, GLenum], 'NV_fence')
PFNGLDELETEFENCESNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:5525
PFNGLGENFENCESNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:5526
PFNGLISFENCENVPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:5527
PFNGLTESTFENCENVPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:5528
PFNGLGETFENCEIVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5529
PFNGLFINISHFENCENVPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5530
PFNGLSETFENCENVPROC = CFUNCTYPE(None, GLuint, GLenum) # GL/glext.h:5531
# NV_evaluators (GL/glext.h:5534)
GL_NV_evaluators = 1 # GL/glext.h:5535
# GL/glext.h:5537
glMapControlPointsNV = _link_function('glMapControlPointsNV', None, [GLenum, GLuint, GLenum, GLsizei, GLsizei, GLint, GLint, GLboolean, POINTER(GLvoid)], 'NV_evaluators')
# GL/glext.h:5538
glMapParameterivNV = _link_function('glMapParameterivNV', None, [GLenum, GLenum, POINTER(GLint)], 'NV_evaluators')
# GL/glext.h:5539
glMapParameterfvNV = _link_function('glMapParameterfvNV', None, [GLenum, GLenum, POINTER(GLfloat)], 'NV_evaluators')
# GL/glext.h:5540
glGetMapControlPointsNV = _link_function('glGetMapControlPointsNV', None, [GLenum, GLuint, GLenum, GLsizei, GLsizei, GLboolean, POINTER(GLvoid)], 'NV_evaluators')
# GL/glext.h:5541
glGetMapParameterivNV = _link_function('glGetMapParameterivNV', None, [GLenum, GLenum, POINTER(GLint)], 'NV_evaluators')
# GL/glext.h:5542
glGetMapParameterfvNV = _link_function('glGetMapParameterfvNV', None, [GLenum, GLenum, POINTER(GLfloat)], 'NV_evaluators')
# GL/glext.h:5543
glGetMapAttribParameterivNV = _link_function('glGetMapAttribParameterivNV', None, [GLenum, GLuint, GLenum, POINTER(GLint)], 'NV_evaluators')
# GL/glext.h:5544
glGetMapAttribParameterfvNV = _link_function('glGetMapAttribParameterfvNV', None, [GLenum, GLuint, GLenum, POINTER(GLfloat)], 'NV_evaluators')
# GL/glext.h:5545
glEvalMapsNV = _link_function('glEvalMapsNV', None, [GLenum, GLenum], 'NV_evaluators')
PFNGLMAPCONTROLPOINTSNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, GLsizei, GLsizei, GLint, GLint, GLboolean, POINTER(GLvoid)) # GL/glext.h:5547
PFNGLMAPPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5548
PFNGLMAPPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5549
PFNGLGETMAPCONTROLPOINTSNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, GLsizei, GLsizei, GLboolean, POINTER(GLvoid)) # GL/glext.h:5550
PFNGLGETMAPPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5551
PFNGLGETMAPPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5552
PFNGLGETMAPATTRIBPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5553
PFNGLGETMAPATTRIBPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5554
PFNGLEVALMAPSNVPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:5555
# NV_packed_depth_stencil (GL/glext.h:5558)
GL_NV_packed_depth_stencil = 1 # GL/glext.h:5559
# NV_register_combiners2 (GL/glext.h:5562)
GL_NV_register_combiners2 = 1 # GL/glext.h:5563
# GL/glext.h:5565
glCombinerStageParameterfvNV = _link_function('glCombinerStageParameterfvNV', None, [GLenum, GLenum, POINTER(GLfloat)], 'NV_register_combiners2')
# GL/glext.h:5566
glGetCombinerStageParameterfvNV = _link_function('glGetCombinerStageParameterfvNV', None, [GLenum, GLenum, POINTER(GLfloat)], 'NV_register_combiners2')
PFNGLCOMBINERSTAGEPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5568
PFNGLGETCOMBINERSTAGEPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5569
# NV_texture_compression_vtc (GL/glext.h:5572)
GL_NV_texture_compression_vtc = 1 # GL/glext.h:5573
# NV_texture_rectangle (GL/glext.h:5576)
GL_NV_texture_rectangle = 1 # GL/glext.h:5577
# NV_texture_shader (GL/glext.h:5580)
GL_NV_texture_shader = 1 # GL/glext.h:5581
# NV_texture_shader2 (GL/glext.h:5584)
GL_NV_texture_shader2 = 1 # GL/glext.h:5585
# NV_vertex_array_range2 (GL/glext.h:5588)
GL_NV_vertex_array_range2 = 1 # GL/glext.h:5589
# NV_vertex_program (GL/glext.h:5592)
GL_NV_vertex_program = 1 # GL/glext.h:5593
# GL/glext.h:5595
glAreProgramsResidentNV = _link_function('glAreProgramsResidentNV', GLboolean, [GLsizei, POINTER(GLuint), POINTER(GLboolean)], 'NV_vertex_program')
# GL/glext.h:5596
glBindProgramNV = _link_function('glBindProgramNV', None, [GLenum, GLuint], 'NV_vertex_program')
# GL/glext.h:5597
glDeleteProgramsNV = _link_function('glDeleteProgramsNV', None, [GLsizei, POINTER(GLuint)], 'NV_vertex_program')
# GL/glext.h:5598
glExecuteProgramNV = _link_function('glExecuteProgramNV', None, [GLenum, GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5599
glGenProgramsNV = _link_function('glGenProgramsNV', None, [GLsizei, POINTER(GLuint)], 'NV_vertex_program')
# GL/glext.h:5600
glGetProgramParameterdvNV = _link_function('glGetProgramParameterdvNV', None, [GLenum, GLuint, GLenum, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5601
glGetProgramParameterfvNV = _link_function('glGetProgramParameterfvNV', None, [GLenum, GLuint, GLenum, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5602
glGetProgramivNV = _link_function('glGetProgramivNV', None, [GLuint, GLenum, POINTER(GLint)], 'NV_vertex_program')
# GL/glext.h:5603
glGetProgramStringNV = _link_function('glGetProgramStringNV', None, [GLuint, GLenum, POINTER(GLubyte)], 'NV_vertex_program')
# GL/glext.h:5604
glGetTrackMatrixivNV = _link_function('glGetTrackMatrixivNV', None, [GLenum, GLuint, GLenum, POINTER(GLint)], 'NV_vertex_program')
# GL/glext.h:5605
glGetVertexAttribdvNV = _link_function('glGetVertexAttribdvNV', None, [GLuint, GLenum, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5606
glGetVertexAttribfvNV = _link_function('glGetVertexAttribfvNV', None, [GLuint, GLenum, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5607
glGetVertexAttribivNV = _link_function('glGetVertexAttribivNV', None, [GLuint, GLenum, POINTER(GLint)], 'NV_vertex_program')
# GL/glext.h:5608
glGetVertexAttribPointervNV = _link_function('glGetVertexAttribPointervNV', None, [GLuint, GLenum, POINTER(POINTER(GLvoid))], 'NV_vertex_program')
# GL/glext.h:5609
glIsProgramNV = _link_function('glIsProgramNV', GLboolean, [GLuint], 'NV_vertex_program')
# GL/glext.h:5610
glLoadProgramNV = _link_function('glLoadProgramNV', None, [GLenum, GLuint, GLsizei, POINTER(GLubyte)], 'NV_vertex_program')
# GL/glext.h:5611
glProgramParameter4dNV = _link_function('glProgramParameter4dNV', None, [GLenum, GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'NV_vertex_program')
# GL/glext.h:5612
glProgramParameter4dvNV = _link_function('glProgramParameter4dvNV', None, [GLenum, GLuint, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5613
glProgramParameter4fNV = _link_function('glProgramParameter4fNV', None, [GLenum, GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'NV_vertex_program')
# GL/glext.h:5614
glProgramParameter4fvNV = _link_function('glProgramParameter4fvNV', None, [GLenum, GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5615
glProgramParameters4dvNV = _link_function('glProgramParameters4dvNV', None, [GLenum, GLuint, GLuint, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5616
glProgramParameters4fvNV = _link_function('glProgramParameters4fvNV', None, [GLenum, GLuint, GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5617
glRequestResidentProgramsNV = _link_function('glRequestResidentProgramsNV', None, [GLsizei, POINTER(GLuint)], 'NV_vertex_program')
# GL/glext.h:5618
glTrackMatrixNV = _link_function('glTrackMatrixNV', None, [GLenum, GLuint, GLenum, GLenum], 'NV_vertex_program')
# GL/glext.h:5619
glVertexAttribPointerNV = _link_function('glVertexAttribPointerNV', None, [GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)], 'NV_vertex_program')
# GL/glext.h:5620
glVertexAttrib1dNV = _link_function('glVertexAttrib1dNV', None, [GLuint, GLdouble], 'NV_vertex_program')
# GL/glext.h:5621
glVertexAttrib1dvNV = _link_function('glVertexAttrib1dvNV', None, [GLuint, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5622
glVertexAttrib1fNV = _link_function('glVertexAttrib1fNV', None, [GLuint, GLfloat], 'NV_vertex_program')
# GL/glext.h:5623
glVertexAttrib1fvNV = _link_function('glVertexAttrib1fvNV', None, [GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5624
glVertexAttrib1sNV = _link_function('glVertexAttrib1sNV', None, [GLuint, GLshort], 'NV_vertex_program')
# GL/glext.h:5625
glVertexAttrib1svNV = _link_function('glVertexAttrib1svNV', None, [GLuint, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5626
glVertexAttrib2dNV = _link_function('glVertexAttrib2dNV', None, [GLuint, GLdouble, GLdouble], 'NV_vertex_program')
# GL/glext.h:5627
glVertexAttrib2dvNV = _link_function('glVertexAttrib2dvNV', None, [GLuint, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5628
glVertexAttrib2fNV = _link_function('glVertexAttrib2fNV', None, [GLuint, GLfloat, GLfloat], 'NV_vertex_program')
# GL/glext.h:5629
glVertexAttrib2fvNV = _link_function('glVertexAttrib2fvNV', None, [GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5630
glVertexAttrib2sNV = _link_function('glVertexAttrib2sNV', None, [GLuint, GLshort, GLshort], 'NV_vertex_program')
# GL/glext.h:5631
glVertexAttrib2svNV = _link_function('glVertexAttrib2svNV', None, [GLuint, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5632
glVertexAttrib3dNV = _link_function('glVertexAttrib3dNV', None, [GLuint, GLdouble, GLdouble, GLdouble], 'NV_vertex_program')
# GL/glext.h:5633
glVertexAttrib3dvNV = _link_function('glVertexAttrib3dvNV', None, [GLuint, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5634
glVertexAttrib3fNV = _link_function('glVertexAttrib3fNV', None, [GLuint, GLfloat, GLfloat, GLfloat], 'NV_vertex_program')
# GL/glext.h:5635
glVertexAttrib3fvNV = _link_function('glVertexAttrib3fvNV', None, [GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5636
glVertexAttrib3sNV = _link_function('glVertexAttrib3sNV', None, [GLuint, GLshort, GLshort, GLshort], 'NV_vertex_program')
# GL/glext.h:5637
glVertexAttrib3svNV = _link_function('glVertexAttrib3svNV', None, [GLuint, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5638
glVertexAttrib4dNV = _link_function('glVertexAttrib4dNV', None, [GLuint, GLdouble, GLdouble, GLdouble, GLdouble], 'NV_vertex_program')
# GL/glext.h:5639
glVertexAttrib4dvNV = _link_function('glVertexAttrib4dvNV', None, [GLuint, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5640
glVertexAttrib4fNV = _link_function('glVertexAttrib4fNV', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat], 'NV_vertex_program')
# GL/glext.h:5641
glVertexAttrib4fvNV = _link_function('glVertexAttrib4fvNV', None, [GLuint, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5642
glVertexAttrib4sNV = _link_function('glVertexAttrib4sNV', None, [GLuint, GLshort, GLshort, GLshort, GLshort], 'NV_vertex_program')
# GL/glext.h:5643
glVertexAttrib4svNV = _link_function('glVertexAttrib4svNV', None, [GLuint, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5644
glVertexAttrib4ubNV = _link_function('glVertexAttrib4ubNV', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte], 'NV_vertex_program')
# GL/glext.h:5645
glVertexAttrib4ubvNV = _link_function('glVertexAttrib4ubvNV', None, [GLuint, POINTER(GLubyte)], 'NV_vertex_program')
# GL/glext.h:5646
glVertexAttribs1dvNV = _link_function('glVertexAttribs1dvNV', None, [GLuint, GLsizei, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5647
glVertexAttribs1fvNV = _link_function('glVertexAttribs1fvNV', None, [GLuint, GLsizei, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5648
glVertexAttribs1svNV = _link_function('glVertexAttribs1svNV', None, [GLuint, GLsizei, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5649
glVertexAttribs2dvNV = _link_function('glVertexAttribs2dvNV', None, [GLuint, GLsizei, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5650
glVertexAttribs2fvNV = _link_function('glVertexAttribs2fvNV', None, [GLuint, GLsizei, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5651
glVertexAttribs2svNV = _link_function('glVertexAttribs2svNV', None, [GLuint, GLsizei, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5652
glVertexAttribs3dvNV = _link_function('glVertexAttribs3dvNV', None, [GLuint, GLsizei, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5653
glVertexAttribs3fvNV = _link_function('glVertexAttribs3fvNV', None, [GLuint, GLsizei, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5654
glVertexAttribs3svNV = _link_function('glVertexAttribs3svNV', None, [GLuint, GLsizei, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5655
glVertexAttribs4dvNV = _link_function('glVertexAttribs4dvNV', None, [GLuint, GLsizei, POINTER(GLdouble)], 'NV_vertex_program')
# GL/glext.h:5656
glVertexAttribs4fvNV = _link_function('glVertexAttribs4fvNV', None, [GLuint, GLsizei, POINTER(GLfloat)], 'NV_vertex_program')
# GL/glext.h:5657
glVertexAttribs4svNV = _link_function('glVertexAttribs4svNV', None, [GLuint, GLsizei, POINTER(GLshort)], 'NV_vertex_program')
# GL/glext.h:5658
glVertexAttribs4ubvNV = _link_function('glVertexAttribs4ubvNV', None, [GLuint, GLsizei, POINTER(GLubyte)], 'NV_vertex_program')
PFNGLAREPROGRAMSRESIDENTNVPROC = CFUNCTYPE(GLboolean, GLsizei, POINTER(GLuint), POINTER(GLboolean)) # GL/glext.h:5660
PFNGLBINDPROGRAMNVPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:5661
PFNGLDELETEPROGRAMSNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:5662
PFNGLEXECUTEPROGRAMNVPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:5663
PFNGLGENPROGRAMSNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:5664
PFNGLGETPROGRAMPARAMETERDVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, POINTER(GLdouble)) # GL/glext.h:5665
PFNGLGETPROGRAMPARAMETERFVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5666
PFNGLGETPROGRAMIVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5667
PFNGLGETPROGRAMSTRINGNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLubyte)) # GL/glext.h:5668
PFNGLGETTRACKMATRIXIVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5669
PFNGLGETVERTEXATTRIBDVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLdouble)) # GL/glext.h:5670
PFNGLGETVERTEXATTRIBFVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5671
PFNGLGETVERTEXATTRIBIVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5672
PFNGLGETVERTEXATTRIBPOINTERVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:5673
PFNGLISPROGRAMNVPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:5674
PFNGLLOADPROGRAMNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLsizei, POINTER(GLubyte)) # GL/glext.h:5675
PFNGLPROGRAMPARAMETER4DNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:5676
PFNGLPROGRAMPARAMETER4DVNVPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLdouble)) # GL/glext.h:5677
PFNGLPROGRAMPARAMETER4FNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5678
PFNGLPROGRAMPARAMETER4FVNVPROC = CFUNCTYPE(None, GLenum, GLuint, POINTER(GLfloat)) # GL/glext.h:5679
PFNGLPROGRAMPARAMETERS4DVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, POINTER(GLdouble)) # GL/glext.h:5680
PFNGLPROGRAMPARAMETERS4FVNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, POINTER(GLfloat)) # GL/glext.h:5681
PFNGLREQUESTRESIDENTPROGRAMSNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:5682
PFNGLTRACKMATRIXNVPROC = CFUNCTYPE(None, GLenum, GLuint, GLenum, GLenum) # GL/glext.h:5683
PFNGLVERTEXATTRIBPOINTERNVPROC = CFUNCTYPE(None, GLuint, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5684
PFNGLVERTEXATTRIB1DNVPROC = CFUNCTYPE(None, GLuint, GLdouble) # GL/glext.h:5685
PFNGLVERTEXATTRIB1DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:5686
PFNGLVERTEXATTRIB1FNVPROC = CFUNCTYPE(None, GLuint, GLfloat) # GL/glext.h:5687
PFNGLVERTEXATTRIB1FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:5688
PFNGLVERTEXATTRIB1SNVPROC = CFUNCTYPE(None, GLuint, GLshort) # GL/glext.h:5689
PFNGLVERTEXATTRIB1SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:5690
PFNGLVERTEXATTRIB2DNVPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble) # GL/glext.h:5691
PFNGLVERTEXATTRIB2DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:5692
PFNGLVERTEXATTRIB2FNVPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat) # GL/glext.h:5693
PFNGLVERTEXATTRIB2FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:5694
PFNGLVERTEXATTRIB2SNVPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort) # GL/glext.h:5695
PFNGLVERTEXATTRIB2SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:5696
PFNGLVERTEXATTRIB3DNVPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble) # GL/glext.h:5697
PFNGLVERTEXATTRIB3DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:5698
PFNGLVERTEXATTRIB3FNVPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat) # GL/glext.h:5699
PFNGLVERTEXATTRIB3FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:5700
PFNGLVERTEXATTRIB3SNVPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort) # GL/glext.h:5701
PFNGLVERTEXATTRIB3SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:5702
PFNGLVERTEXATTRIB4DNVPROC = CFUNCTYPE(None, GLuint, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:5703
PFNGLVERTEXATTRIB4DVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:5704
PFNGLVERTEXATTRIB4FNVPROC = CFUNCTYPE(None, GLuint, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:5705
PFNGLVERTEXATTRIB4FVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:5706
PFNGLVERTEXATTRIB4SNVPROC = CFUNCTYPE(None, GLuint, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:5707
PFNGLVERTEXATTRIB4SVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:5708
PFNGLVERTEXATTRIB4UBNVPROC = CFUNCTYPE(None, GLuint, GLubyte, GLubyte, GLubyte, GLubyte) # GL/glext.h:5709
PFNGLVERTEXATTRIB4UBVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:5710
PFNGLVERTEXATTRIBS1DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:5711
PFNGLVERTEXATTRIBS1FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:5712
PFNGLVERTEXATTRIBS1SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:5713
PFNGLVERTEXATTRIBS2DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:5714
PFNGLVERTEXATTRIBS2FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:5715
PFNGLVERTEXATTRIBS2SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:5716
PFNGLVERTEXATTRIBS3DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:5717
PFNGLVERTEXATTRIBS3FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:5718
PFNGLVERTEXATTRIBS3SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:5719
PFNGLVERTEXATTRIBS4DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLdouble)) # GL/glext.h:5720
PFNGLVERTEXATTRIBS4FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLfloat)) # GL/glext.h:5721
PFNGLVERTEXATTRIBS4SVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLshort)) # GL/glext.h:5722
PFNGLVERTEXATTRIBS4UBVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte)) # GL/glext.h:5723
# SGIX_texture_coordinate_clamp (GL/glext.h:5726)
GL_SGIX_texture_coordinate_clamp = 1 # GL/glext.h:5727
# SGIX_scalebias_hint (GL/glext.h:5730)
GL_SGIX_scalebias_hint = 1 # GL/glext.h:5731
# OML_interlace (GL/glext.h:5734)
GL_OML_interlace = 1 # GL/glext.h:5735
# OML_subsample (GL/glext.h:5738)
GL_OML_subsample = 1 # GL/glext.h:5739
# OML_resample (GL/glext.h:5742)
GL_OML_resample = 1 # GL/glext.h:5743
# NV_copy_depth_to_color (GL/glext.h:5746)
GL_NV_copy_depth_to_color = 1 # GL/glext.h:5747
# ATI_envmap_bumpmap (GL/glext.h:5750)
GL_ATI_envmap_bumpmap = 1 # GL/glext.h:5751
# GL/glext.h:5753
glTexBumpParameterivATI = _link_function('glTexBumpParameterivATI', None, [GLenum, POINTER(GLint)], 'ATI_envmap_bumpmap')
# GL/glext.h:5754
glTexBumpParameterfvATI = _link_function('glTexBumpParameterfvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_envmap_bumpmap')
# GL/glext.h:5755
glGetTexBumpParameterivATI = _link_function('glGetTexBumpParameterivATI', None, [GLenum, POINTER(GLint)], 'ATI_envmap_bumpmap')
# GL/glext.h:5756
glGetTexBumpParameterfvATI = _link_function('glGetTexBumpParameterfvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_envmap_bumpmap')
PFNGLTEXBUMPPARAMETERIVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:5758
PFNGLTEXBUMPPARAMETERFVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:5759
PFNGLGETTEXBUMPPARAMETERIVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:5760
PFNGLGETTEXBUMPPARAMETERFVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:5761
# ATI_fragment_shader (GL/glext.h:5764)
GL_ATI_fragment_shader = 1 # GL/glext.h:5765
# GL/glext.h:5767
glGenFragmentShadersATI = _link_function('glGenFragmentShadersATI', GLuint, [GLuint], 'ATI_fragment_shader')
# GL/glext.h:5768
glBindFragmentShaderATI = _link_function('glBindFragmentShaderATI', None, [GLuint], 'ATI_fragment_shader')
# GL/glext.h:5769
glDeleteFragmentShaderATI = _link_function('glDeleteFragmentShaderATI', None, [GLuint], 'ATI_fragment_shader')
# GL/glext.h:5770
glBeginFragmentShaderATI = _link_function('glBeginFragmentShaderATI', None, [], 'ATI_fragment_shader')
# GL/glext.h:5771
glEndFragmentShaderATI = _link_function('glEndFragmentShaderATI', None, [], 'ATI_fragment_shader')
# GL/glext.h:5772
glPassTexCoordATI = _link_function('glPassTexCoordATI', None, [GLuint, GLuint, GLenum], 'ATI_fragment_shader')
# GL/glext.h:5773
glSampleMapATI = _link_function('glSampleMapATI', None, [GLuint, GLuint, GLenum], 'ATI_fragment_shader')
# GL/glext.h:5774
glColorFragmentOp1ATI = _link_function('glColorFragmentOp1ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:5775
glColorFragmentOp2ATI = _link_function('glColorFragmentOp2ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:5776
glColorFragmentOp3ATI = _link_function('glColorFragmentOp3ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:5777
glAlphaFragmentOp1ATI = _link_function('glAlphaFragmentOp1ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:5778
glAlphaFragmentOp2ATI = _link_function('glAlphaFragmentOp2ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:5779
glAlphaFragmentOp3ATI = _link_function('glAlphaFragmentOp3ATI', None, [GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint], 'ATI_fragment_shader')
# GL/glext.h:5780
glSetFragmentShaderConstantATI = _link_function('glSetFragmentShaderConstantATI', None, [GLuint, POINTER(GLfloat)], 'ATI_fragment_shader')
PFNGLGENFRAGMENTSHADERSATIPROC = CFUNCTYPE(GLuint, GLuint) # GL/glext.h:5782
PFNGLBINDFRAGMENTSHADERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5783
PFNGLDELETEFRAGMENTSHADERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5784
PFNGLBEGINFRAGMENTSHADERATIPROC = CFUNCTYPE(None) # GL/glext.h:5785
PFNGLENDFRAGMENTSHADERATIPROC = CFUNCTYPE(None) # GL/glext.h:5786
PFNGLPASSTEXCOORDATIPROC = CFUNCTYPE(None, GLuint, GLuint, GLenum) # GL/glext.h:5787
PFNGLSAMPLEMAPATIPROC = CFUNCTYPE(None, GLuint, GLuint, GLenum) # GL/glext.h:5788
PFNGLCOLORFRAGMENTOP1ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5789
PFNGLCOLORFRAGMENTOP2ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5790
PFNGLCOLORFRAGMENTOP3ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5791
PFNGLALPHAFRAGMENTOP1ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5792
PFNGLALPHAFRAGMENTOP2ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5793
PFNGLALPHAFRAGMENTOP3ATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5794
PFNGLSETFRAGMENTSHADERCONSTANTATIPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:5795
# ATI_pn_triangles (GL/glext.h:5798)
GL_ATI_pn_triangles = 1 # GL/glext.h:5799
# GL/glext.h:5801
glPNTrianglesiATI = _link_function('glPNTrianglesiATI', None, [GLenum, GLint], 'ATI_pn_triangles')
# GL/glext.h:5802
glPNTrianglesfATI = _link_function('glPNTrianglesfATI', None, [GLenum, GLfloat], 'ATI_pn_triangles')
PFNGLPNTRIANGLESIATIPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:5804
PFNGLPNTRIANGLESFATIPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:5805
# ATI_vertex_array_object (GL/glext.h:5808)
GL_ATI_vertex_array_object = 1 # GL/glext.h:5809
# GL/glext.h:5811
glNewObjectBufferATI = _link_function('glNewObjectBufferATI', GLuint, [GLsizei, POINTER(GLvoid), GLenum], 'ATI_vertex_array_object')
# GL/glext.h:5812
glIsObjectBufferATI = _link_function('glIsObjectBufferATI', GLboolean, [GLuint], 'ATI_vertex_array_object')
# GL/glext.h:5813
glUpdateObjectBufferATI = _link_function('glUpdateObjectBufferATI', None, [GLuint, GLuint, GLsizei, POINTER(GLvoid), GLenum], 'ATI_vertex_array_object')
# GL/glext.h:5814
glGetObjectBufferfvATI = _link_function('glGetObjectBufferfvATI', None, [GLuint, GLenum, POINTER(GLfloat)], 'ATI_vertex_array_object')
# GL/glext.h:5815
glGetObjectBufferivATI = _link_function('glGetObjectBufferivATI', None, [GLuint, GLenum, POINTER(GLint)], 'ATI_vertex_array_object')
# GL/glext.h:5816
glFreeObjectBufferATI = _link_function('glFreeObjectBufferATI', None, [GLuint], 'ATI_vertex_array_object')
# GL/glext.h:5817
glArrayObjectATI = _link_function('glArrayObjectATI', None, [GLenum, GLint, GLenum, GLsizei, GLuint, GLuint], 'ATI_vertex_array_object')
# GL/glext.h:5818
glGetArrayObjectfvATI = _link_function('glGetArrayObjectfvATI', None, [GLenum, GLenum, POINTER(GLfloat)], 'ATI_vertex_array_object')
# GL/glext.h:5819
glGetArrayObjectivATI = _link_function('glGetArrayObjectivATI', None, [GLenum, GLenum, POINTER(GLint)], 'ATI_vertex_array_object')
# GL/glext.h:5820
glVariantArrayObjectATI = _link_function('glVariantArrayObjectATI', None, [GLuint, GLenum, GLsizei, GLuint, GLuint], 'ATI_vertex_array_object')
# GL/glext.h:5821
glGetVariantArrayObjectfvATI = _link_function('glGetVariantArrayObjectfvATI', None, [GLuint, GLenum, POINTER(GLfloat)], 'ATI_vertex_array_object')
# GL/glext.h:5822
glGetVariantArrayObjectivATI = _link_function('glGetVariantArrayObjectivATI', None, [GLuint, GLenum, POINTER(GLint)], 'ATI_vertex_array_object')
PFNGLNEWOBJECTBUFFERATIPROC = CFUNCTYPE(GLuint, GLsizei, POINTER(GLvoid), GLenum) # GL/glext.h:5824
PFNGLISOBJECTBUFFERATIPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:5825
PFNGLUPDATEOBJECTBUFFERATIPROC = CFUNCTYPE(None, GLuint, GLuint, GLsizei, POINTER(GLvoid), GLenum) # GL/glext.h:5826
PFNGLGETOBJECTBUFFERFVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5827
PFNGLGETOBJECTBUFFERIVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5828
PFNGLFREEOBJECTBUFFERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5829
PFNGLARRAYOBJECTATIPROC = CFUNCTYPE(None, GLenum, GLint, GLenum, GLsizei, GLuint, GLuint) # GL/glext.h:5830
PFNGLGETARRAYOBJECTFVATIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5831
PFNGLGETARRAYOBJECTIVATIPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5832
PFNGLVARIANTARRAYOBJECTATIPROC = CFUNCTYPE(None, GLuint, GLenum, GLsizei, GLuint, GLuint) # GL/glext.h:5833
PFNGLGETVARIANTARRAYOBJECTFVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5834
PFNGLGETVARIANTARRAYOBJECTIVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5835
# EXT_vertex_shader (GL/glext.h:5838)
GL_EXT_vertex_shader = 1 # GL/glext.h:5839
# GL/glext.h:5841
glBeginVertexShaderEXT = _link_function('glBeginVertexShaderEXT', None, [], 'EXT_vertex_shader')
# GL/glext.h:5842
glEndVertexShaderEXT = _link_function('glEndVertexShaderEXT', None, [], 'EXT_vertex_shader')
# GL/glext.h:5843
glBindVertexShaderEXT = _link_function('glBindVertexShaderEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:5844
glGenVertexShadersEXT = _link_function('glGenVertexShadersEXT', GLuint, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:5845
glDeleteVertexShaderEXT = _link_function('glDeleteVertexShaderEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:5846
glShaderOp1EXT = _link_function('glShaderOp1EXT', None, [GLenum, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:5847
glShaderOp2EXT = _link_function('glShaderOp2EXT', None, [GLenum, GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:5848
glShaderOp3EXT = _link_function('glShaderOp3EXT', None, [GLenum, GLuint, GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:5849
glSwizzleEXT = _link_function('glSwizzleEXT', None, [GLuint, GLuint, GLenum, GLenum, GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5850
glWriteMaskEXT = _link_function('glWriteMaskEXT', None, [GLuint, GLuint, GLenum, GLenum, GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5851
glInsertComponentEXT = _link_function('glInsertComponentEXT', None, [GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:5852
glExtractComponentEXT = _link_function('glExtractComponentEXT', None, [GLuint, GLuint, GLuint], 'EXT_vertex_shader')
# GL/glext.h:5853
glGenSymbolsEXT = _link_function('glGenSymbolsEXT', GLuint, [GLenum, GLenum, GLenum, GLuint], 'EXT_vertex_shader')
# GL/glext.h:5854
glSetInvariantEXT = _link_function('glSetInvariantEXT', None, [GLuint, GLenum, POINTER(GLvoid)], 'EXT_vertex_shader')
# GL/glext.h:5855
glSetLocalConstantEXT = _link_function('glSetLocalConstantEXT', None, [GLuint, GLenum, POINTER(GLvoid)], 'EXT_vertex_shader')
# GL/glext.h:5856
glVariantbvEXT = _link_function('glVariantbvEXT', None, [GLuint, POINTER(GLbyte)], 'EXT_vertex_shader')
# GL/glext.h:5857
glVariantsvEXT = _link_function('glVariantsvEXT', None, [GLuint, POINTER(GLshort)], 'EXT_vertex_shader')
# GL/glext.h:5858
glVariantivEXT = _link_function('glVariantivEXT', None, [GLuint, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:5859
glVariantfvEXT = _link_function('glVariantfvEXT', None, [GLuint, POINTER(GLfloat)], 'EXT_vertex_shader')
# GL/glext.h:5860
glVariantdvEXT = _link_function('glVariantdvEXT', None, [GLuint, POINTER(GLdouble)], 'EXT_vertex_shader')
# GL/glext.h:5861
glVariantubvEXT = _link_function('glVariantubvEXT', None, [GLuint, POINTER(GLubyte)], 'EXT_vertex_shader')
# GL/glext.h:5862
glVariantusvEXT = _link_function('glVariantusvEXT', None, [GLuint, POINTER(GLushort)], 'EXT_vertex_shader')
# GL/glext.h:5863
glVariantuivEXT = _link_function('glVariantuivEXT', None, [GLuint, POINTER(GLuint)], 'EXT_vertex_shader')
# GL/glext.h:5864
glVariantPointerEXT = _link_function('glVariantPointerEXT', None, [GLuint, GLenum, GLuint, POINTER(GLvoid)], 'EXT_vertex_shader')
# GL/glext.h:5865
glEnableVariantClientStateEXT = _link_function('glEnableVariantClientStateEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:5866
glDisableVariantClientStateEXT = _link_function('glDisableVariantClientStateEXT', None, [GLuint], 'EXT_vertex_shader')
# GL/glext.h:5867
glBindLightParameterEXT = _link_function('glBindLightParameterEXT', GLuint, [GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5868
glBindMaterialParameterEXT = _link_function('glBindMaterialParameterEXT', GLuint, [GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5869
glBindTexGenParameterEXT = _link_function('glBindTexGenParameterEXT', GLuint, [GLenum, GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5870
glBindTextureUnitParameterEXT = _link_function('glBindTextureUnitParameterEXT', GLuint, [GLenum, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5871
glBindParameterEXT = _link_function('glBindParameterEXT', GLuint, [GLenum], 'EXT_vertex_shader')
# GL/glext.h:5872
glIsVariantEnabledEXT = _link_function('glIsVariantEnabledEXT', GLboolean, [GLuint, GLenum], 'EXT_vertex_shader')
# GL/glext.h:5873
glGetVariantBooleanvEXT = _link_function('glGetVariantBooleanvEXT', None, [GLuint, GLenum, POINTER(GLboolean)], 'EXT_vertex_shader')
# GL/glext.h:5874
glGetVariantIntegervEXT = _link_function('glGetVariantIntegervEXT', None, [GLuint, GLenum, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:5875
glGetVariantFloatvEXT = _link_function('glGetVariantFloatvEXT', None, [GLuint, GLenum, POINTER(GLfloat)], 'EXT_vertex_shader')
# GL/glext.h:5876
glGetVariantPointervEXT = _link_function('glGetVariantPointervEXT', None, [GLuint, GLenum, POINTER(POINTER(GLvoid))], 'EXT_vertex_shader')
# GL/glext.h:5877
glGetInvariantBooleanvEXT = _link_function('glGetInvariantBooleanvEXT', None, [GLuint, GLenum, POINTER(GLboolean)], 'EXT_vertex_shader')
# GL/glext.h:5878
glGetInvariantIntegervEXT = _link_function('glGetInvariantIntegervEXT', None, [GLuint, GLenum, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:5879
glGetInvariantFloatvEXT = _link_function('glGetInvariantFloatvEXT', None, [GLuint, GLenum, POINTER(GLfloat)], 'EXT_vertex_shader')
# GL/glext.h:5880
glGetLocalConstantBooleanvEXT = _link_function('glGetLocalConstantBooleanvEXT', None, [GLuint, GLenum, POINTER(GLboolean)], 'EXT_vertex_shader')
# GL/glext.h:5881
glGetLocalConstantIntegervEXT = _link_function('glGetLocalConstantIntegervEXT', None, [GLuint, GLenum, POINTER(GLint)], 'EXT_vertex_shader')
# GL/glext.h:5882
glGetLocalConstantFloatvEXT = _link_function('glGetLocalConstantFloatvEXT', None, [GLuint, GLenum, POINTER(GLfloat)], 'EXT_vertex_shader')
PFNGLBEGINVERTEXSHADEREXTPROC = CFUNCTYPE(None) # GL/glext.h:5884
PFNGLENDVERTEXSHADEREXTPROC = CFUNCTYPE(None) # GL/glext.h:5885
PFNGLBINDVERTEXSHADEREXTPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5886
PFNGLGENVERTEXSHADERSEXTPROC = CFUNCTYPE(GLuint, GLuint) # GL/glext.h:5887
PFNGLDELETEVERTEXSHADEREXTPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5888
PFNGLSHADEROP1EXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint) # GL/glext.h:5889
PFNGLSHADEROP2EXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint) # GL/glext.h:5890
PFNGLSHADEROP3EXTPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLuint, GLuint) # GL/glext.h:5891
PFNGLSWIZZLEEXTPROC = CFUNCTYPE(None, GLuint, GLuint, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:5892
PFNGLWRITEMASKEXTPROC = CFUNCTYPE(None, GLuint, GLuint, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:5893
PFNGLINSERTCOMPONENTEXTPROC = CFUNCTYPE(None, GLuint, GLuint, GLuint) # GL/glext.h:5894
PFNGLEXTRACTCOMPONENTEXTPROC = CFUNCTYPE(None, GLuint, GLuint, GLuint) # GL/glext.h:5895
PFNGLGENSYMBOLSEXTPROC = CFUNCTYPE(GLuint, GLenum, GLenum, GLenum, GLuint) # GL/glext.h:5896
PFNGLSETINVARIANTEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLvoid)) # GL/glext.h:5897
PFNGLSETLOCALCONSTANTEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLvoid)) # GL/glext.h:5898
PFNGLVARIANTBVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLbyte)) # GL/glext.h:5899
PFNGLVARIANTSVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLshort)) # GL/glext.h:5900
PFNGLVARIANTIVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLint)) # GL/glext.h:5901
PFNGLVARIANTFVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLfloat)) # GL/glext.h:5902
PFNGLVARIANTDVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLdouble)) # GL/glext.h:5903
PFNGLVARIANTUBVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLubyte)) # GL/glext.h:5904
PFNGLVARIANTUSVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLushort)) # GL/glext.h:5905
PFNGLVARIANTUIVEXTPROC = CFUNCTYPE(None, GLuint, POINTER(GLuint)) # GL/glext.h:5906
PFNGLVARIANTPOINTEREXTPROC = CFUNCTYPE(None, GLuint, GLenum, GLuint, POINTER(GLvoid)) # GL/glext.h:5907
PFNGLENABLEVARIANTCLIENTSTATEEXTPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5908
PFNGLDISABLEVARIANTCLIENTSTATEEXTPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5909
PFNGLBINDLIGHTPARAMETEREXTPROC = CFUNCTYPE(GLuint, GLenum, GLenum) # GL/glext.h:5910
PFNGLBINDMATERIALPARAMETEREXTPROC = CFUNCTYPE(GLuint, GLenum, GLenum) # GL/glext.h:5911
PFNGLBINDTEXGENPARAMETEREXTPROC = CFUNCTYPE(GLuint, GLenum, GLenum, GLenum) # GL/glext.h:5912
PFNGLBINDTEXTUREUNITPARAMETEREXTPROC = CFUNCTYPE(GLuint, GLenum, GLenum) # GL/glext.h:5913
PFNGLBINDPARAMETEREXTPROC = CFUNCTYPE(GLuint, GLenum) # GL/glext.h:5914
PFNGLISVARIANTENABLEDEXTPROC = CFUNCTYPE(GLboolean, GLuint, GLenum) # GL/glext.h:5915
PFNGLGETVARIANTBOOLEANVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLboolean)) # GL/glext.h:5916
PFNGLGETVARIANTINTEGERVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5917
PFNGLGETVARIANTFLOATVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5918
PFNGLGETVARIANTPOINTERVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(POINTER(GLvoid))) # GL/glext.h:5919
PFNGLGETINVARIANTBOOLEANVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLboolean)) # GL/glext.h:5920
PFNGLGETINVARIANTINTEGERVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5921
PFNGLGETINVARIANTFLOATVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5922
PFNGLGETLOCALCONSTANTBOOLEANVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLboolean)) # GL/glext.h:5923
PFNGLGETLOCALCONSTANTINTEGERVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:5924
PFNGLGETLOCALCONSTANTFLOATVEXTPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:5925
# ATI_vertex_streams (GL/glext.h:5928)
GL_ATI_vertex_streams = 1 # GL/glext.h:5929
# GL/glext.h:5931
glVertexStream1sATI = _link_function('glVertexStream1sATI', None, [GLenum, GLshort], 'ATI_vertex_streams')
# GL/glext.h:5932
glVertexStream1svATI = _link_function('glVertexStream1svATI', None, [GLenum, POINTER(GLshort)], 'ATI_vertex_streams')
# GL/glext.h:5933
glVertexStream1iATI = _link_function('glVertexStream1iATI', None, [GLenum, GLint], 'ATI_vertex_streams')
# GL/glext.h:5934
glVertexStream1ivATI = _link_function('glVertexStream1ivATI', None, [GLenum, POINTER(GLint)], 'ATI_vertex_streams')
# GL/glext.h:5935
glVertexStream1fATI = _link_function('glVertexStream1fATI', None, [GLenum, GLfloat], 'ATI_vertex_streams')
# GL/glext.h:5936
glVertexStream1fvATI = _link_function('glVertexStream1fvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_vertex_streams')
# GL/glext.h:5937
glVertexStream1dATI = _link_function('glVertexStream1dATI', None, [GLenum, GLdouble], 'ATI_vertex_streams')
# GL/glext.h:5938
glVertexStream1dvATI = _link_function('glVertexStream1dvATI', None, [GLenum, POINTER(GLdouble)], 'ATI_vertex_streams')
# GL/glext.h:5939
glVertexStream2sATI = _link_function('glVertexStream2sATI', None, [GLenum, GLshort, GLshort], 'ATI_vertex_streams')
# GL/glext.h:5940
glVertexStream2svATI = _link_function('glVertexStream2svATI', None, [GLenum, POINTER(GLshort)], 'ATI_vertex_streams')
# GL/glext.h:5941
glVertexStream2iATI = _link_function('glVertexStream2iATI', None, [GLenum, GLint, GLint], 'ATI_vertex_streams')
# GL/glext.h:5942
glVertexStream2ivATI = _link_function('glVertexStream2ivATI', None, [GLenum, POINTER(GLint)], 'ATI_vertex_streams')
# GL/glext.h:5943
glVertexStream2fATI = _link_function('glVertexStream2fATI', None, [GLenum, GLfloat, GLfloat], 'ATI_vertex_streams')
# GL/glext.h:5944
glVertexStream2fvATI = _link_function('glVertexStream2fvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_vertex_streams')
# GL/glext.h:5945
glVertexStream2dATI = _link_function('glVertexStream2dATI', None, [GLenum, GLdouble, GLdouble], 'ATI_vertex_streams')
# GL/glext.h:5946
glVertexStream2dvATI = _link_function('glVertexStream2dvATI', None, [GLenum, POINTER(GLdouble)], 'ATI_vertex_streams')
# GL/glext.h:5947
glVertexStream3sATI = _link_function('glVertexStream3sATI', None, [GLenum, GLshort, GLshort, GLshort], 'ATI_vertex_streams')
# GL/glext.h:5948
glVertexStream3svATI = _link_function('glVertexStream3svATI', None, [GLenum, POINTER(GLshort)], 'ATI_vertex_streams')
# GL/glext.h:5949
glVertexStream3iATI = _link_function('glVertexStream3iATI', None, [GLenum, GLint, GLint, GLint], 'ATI_vertex_streams')
# GL/glext.h:5950
glVertexStream3ivATI = _link_function('glVertexStream3ivATI', None, [GLenum, POINTER(GLint)], 'ATI_vertex_streams')
# GL/glext.h:5951
glVertexStream3fATI = _link_function('glVertexStream3fATI', None, [GLenum, GLfloat, GLfloat, GLfloat], 'ATI_vertex_streams')
# GL/glext.h:5952
glVertexStream3fvATI = _link_function('glVertexStream3fvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_vertex_streams')
# GL/glext.h:5953
glVertexStream3dATI = _link_function('glVertexStream3dATI', None, [GLenum, GLdouble, GLdouble, GLdouble], 'ATI_vertex_streams')
# GL/glext.h:5954
glVertexStream3dvATI = _link_function('glVertexStream3dvATI', None, [GLenum, POINTER(GLdouble)], 'ATI_vertex_streams')
# GL/glext.h:5955
glVertexStream4sATI = _link_function('glVertexStream4sATI', None, [GLenum, GLshort, GLshort, GLshort, GLshort], 'ATI_vertex_streams')
# GL/glext.h:5956
glVertexStream4svATI = _link_function('glVertexStream4svATI', None, [GLenum, POINTER(GLshort)], 'ATI_vertex_streams')
# GL/glext.h:5957
glVertexStream4iATI = _link_function('glVertexStream4iATI', None, [GLenum, GLint, GLint, GLint, GLint], 'ATI_vertex_streams')
# GL/glext.h:5958
glVertexStream4ivATI = _link_function('glVertexStream4ivATI', None, [GLenum, POINTER(GLint)], 'ATI_vertex_streams')
# GL/glext.h:5959
glVertexStream4fATI = _link_function('glVertexStream4fATI', None, [GLenum, GLfloat, GLfloat, GLfloat, GLfloat], 'ATI_vertex_streams')
# GL/glext.h:5960
glVertexStream4fvATI = _link_function('glVertexStream4fvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_vertex_streams')
# GL/glext.h:5961
glVertexStream4dATI = _link_function('glVertexStream4dATI', None, [GLenum, GLdouble, GLdouble, GLdouble, GLdouble], 'ATI_vertex_streams')
# GL/glext.h:5962
glVertexStream4dvATI = _link_function('glVertexStream4dvATI', None, [GLenum, POINTER(GLdouble)], 'ATI_vertex_streams')
# GL/glext.h:5963
glNormalStream3bATI = _link_function('glNormalStream3bATI', None, [GLenum, GLbyte, GLbyte, GLbyte], 'ATI_vertex_streams')
# GL/glext.h:5964
glNormalStream3bvATI = _link_function('glNormalStream3bvATI', None, [GLenum, POINTER(GLbyte)], 'ATI_vertex_streams')
# GL/glext.h:5965
glNormalStream3sATI = _link_function('glNormalStream3sATI', None, [GLenum, GLshort, GLshort, GLshort], 'ATI_vertex_streams')
# GL/glext.h:5966
glNormalStream3svATI = _link_function('glNormalStream3svATI', None, [GLenum, POINTER(GLshort)], 'ATI_vertex_streams')
# GL/glext.h:5967
glNormalStream3iATI = _link_function('glNormalStream3iATI', None, [GLenum, GLint, GLint, GLint], 'ATI_vertex_streams')
# GL/glext.h:5968
glNormalStream3ivATI = _link_function('glNormalStream3ivATI', None, [GLenum, POINTER(GLint)], 'ATI_vertex_streams')
# GL/glext.h:5969
glNormalStream3fATI = _link_function('glNormalStream3fATI', None, [GLenum, GLfloat, GLfloat, GLfloat], 'ATI_vertex_streams')
# GL/glext.h:5970
glNormalStream3fvATI = _link_function('glNormalStream3fvATI', None, [GLenum, POINTER(GLfloat)], 'ATI_vertex_streams')
# GL/glext.h:5971
glNormalStream3dATI = _link_function('glNormalStream3dATI', None, [GLenum, GLdouble, GLdouble, GLdouble], 'ATI_vertex_streams')
# GL/glext.h:5972
glNormalStream3dvATI = _link_function('glNormalStream3dvATI', None, [GLenum, POINTER(GLdouble)], 'ATI_vertex_streams')
# GL/glext.h:5973
glClientActiveVertexStreamATI = _link_function('glClientActiveVertexStreamATI', None, [GLenum], 'ATI_vertex_streams')
# GL/glext.h:5974
glVertexBlendEnviATI = _link_function('glVertexBlendEnviATI', None, [GLenum, GLint], 'ATI_vertex_streams')
# GL/glext.h:5975
glVertexBlendEnvfATI = _link_function('glVertexBlendEnvfATI', None, [GLenum, GLfloat], 'ATI_vertex_streams')
PFNGLVERTEXSTREAM1SATIPROC = CFUNCTYPE(None, GLenum, GLshort) # GL/glext.h:5977
PFNGLVERTEXSTREAM1SVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:5978
PFNGLVERTEXSTREAM1IATIPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:5979
PFNGLVERTEXSTREAM1IVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:5980
PFNGLVERTEXSTREAM1FATIPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:5981
PFNGLVERTEXSTREAM1FVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:5982
PFNGLVERTEXSTREAM1DATIPROC = CFUNCTYPE(None, GLenum, GLdouble) # GL/glext.h:5983
PFNGLVERTEXSTREAM1DVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:5984
PFNGLVERTEXSTREAM2SATIPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort) # GL/glext.h:5985
PFNGLVERTEXSTREAM2SVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:5986
PFNGLVERTEXSTREAM2IATIPROC = CFUNCTYPE(None, GLenum, GLint, GLint) # GL/glext.h:5987
PFNGLVERTEXSTREAM2IVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:5988
PFNGLVERTEXSTREAM2FATIPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat) # GL/glext.h:5989
PFNGLVERTEXSTREAM2FVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:5990
PFNGLVERTEXSTREAM2DATIPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble) # GL/glext.h:5991
PFNGLVERTEXSTREAM2DVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:5992
PFNGLVERTEXSTREAM3SATIPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort) # GL/glext.h:5993
PFNGLVERTEXSTREAM3SVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:5994
PFNGLVERTEXSTREAM3IATIPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint) # GL/glext.h:5995
PFNGLVERTEXSTREAM3IVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:5996
PFNGLVERTEXSTREAM3FATIPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat) # GL/glext.h:5997
PFNGLVERTEXSTREAM3FVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:5998
PFNGLVERTEXSTREAM3DATIPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble) # GL/glext.h:5999
PFNGLVERTEXSTREAM3DVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:6000
PFNGLVERTEXSTREAM4SATIPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort, GLshort) # GL/glext.h:6001
PFNGLVERTEXSTREAM4SVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:6002
PFNGLVERTEXSTREAM4IATIPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint) # GL/glext.h:6003
PFNGLVERTEXSTREAM4IVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:6004
PFNGLVERTEXSTREAM4FATIPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:6005
PFNGLVERTEXSTREAM4FVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:6006
PFNGLVERTEXSTREAM4DATIPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:6007
PFNGLVERTEXSTREAM4DVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:6008
PFNGLNORMALSTREAM3BATIPROC = CFUNCTYPE(None, GLenum, GLbyte, GLbyte, GLbyte) # GL/glext.h:6009
PFNGLNORMALSTREAM3BVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLbyte)) # GL/glext.h:6010
PFNGLNORMALSTREAM3SATIPROC = CFUNCTYPE(None, GLenum, GLshort, GLshort, GLshort) # GL/glext.h:6011
PFNGLNORMALSTREAM3SVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLshort)) # GL/glext.h:6012
PFNGLNORMALSTREAM3IATIPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint) # GL/glext.h:6013
PFNGLNORMALSTREAM3IVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:6014
PFNGLNORMALSTREAM3FATIPROC = CFUNCTYPE(None, GLenum, GLfloat, GLfloat, GLfloat) # GL/glext.h:6015
PFNGLNORMALSTREAM3FVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLfloat)) # GL/glext.h:6016
PFNGLNORMALSTREAM3DATIPROC = CFUNCTYPE(None, GLenum, GLdouble, GLdouble, GLdouble) # GL/glext.h:6017
PFNGLNORMALSTREAM3DVATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLdouble)) # GL/glext.h:6018
PFNGLCLIENTACTIVEVERTEXSTREAMATIPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:6019
PFNGLVERTEXBLENDENVIATIPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:6020
PFNGLVERTEXBLENDENVFATIPROC = CFUNCTYPE(None, GLenum, GLfloat) # GL/glext.h:6021
# ATI_element_array (GL/glext.h:6024)
GL_ATI_element_array = 1 # GL/glext.h:6025
# GL/glext.h:6027
glElementPointerATI = _link_function('glElementPointerATI', None, [GLenum, POINTER(GLvoid)], 'ATI_element_array')
# GL/glext.h:6028
glDrawElementArrayATI = _link_function('glDrawElementArrayATI', None, [GLenum, GLsizei], 'ATI_element_array')
# GL/glext.h:6029
glDrawRangeElementArrayATI = _link_function('glDrawRangeElementArrayATI', None, [GLenum, GLuint, GLuint, GLsizei], 'ATI_element_array')
PFNGLELEMENTPOINTERATIPROC = CFUNCTYPE(None, GLenum, POINTER(GLvoid)) # GL/glext.h:6031
PFNGLDRAWELEMENTARRAYATIPROC = CFUNCTYPE(None, GLenum, GLsizei) # GL/glext.h:6032
PFNGLDRAWRANGEELEMENTARRAYATIPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLsizei) # GL/glext.h:6033
# SUN_mesh_array (GL/glext.h:6036)
GL_SUN_mesh_array = 1 # GL/glext.h:6037
# GL/glext.h:6039
glDrawMeshArraysSUN = _link_function('glDrawMeshArraysSUN', None, [GLenum, GLint, GLsizei, GLsizei], 'SUN_mesh_array')
PFNGLDRAWMESHARRAYSSUNPROC = CFUNCTYPE(None, GLenum, GLint, GLsizei, GLsizei) # GL/glext.h:6041
# SUN_slice_accum (GL/glext.h:6044)
GL_SUN_slice_accum = 1 # GL/glext.h:6045
# NV_multisample_filter_hint (GL/glext.h:6048)
GL_NV_multisample_filter_hint = 1 # GL/glext.h:6049
# NV_depth_clamp (GL/glext.h:6052)
GL_NV_depth_clamp = 1 # GL/glext.h:6053
# NV_occlusion_query (GL/glext.h:6056)
GL_NV_occlusion_query = 1 # GL/glext.h:6057
# GL/glext.h:6059
glGenOcclusionQueriesNV = _link_function('glGenOcclusionQueriesNV', None, [GLsizei, POINTER(GLuint)], 'NV_occlusion_query')
# GL/glext.h:6060
glDeleteOcclusionQueriesNV = _link_function('glDeleteOcclusionQueriesNV', None, [GLsizei, POINTER(GLuint)], 'NV_occlusion_query')
# GL/glext.h:6061
glIsOcclusionQueryNV = _link_function('glIsOcclusionQueryNV', GLboolean, [GLuint], 'NV_occlusion_query')
# GL/glext.h:6062
glBeginOcclusionQueryNV = _link_function('glBeginOcclusionQueryNV', None, [GLuint], 'NV_occlusion_query')
# GL/glext.h:6063
glEndOcclusionQueryNV = _link_function('glEndOcclusionQueryNV', None, [], 'NV_occlusion_query')
# GL/glext.h:6064
glGetOcclusionQueryivNV = _link_function('glGetOcclusionQueryivNV', None, [GLuint, GLenum, POINTER(GLint)], 'NV_occlusion_query')
# GL/glext.h:6065
glGetOcclusionQueryuivNV = _link_function('glGetOcclusionQueryuivNV', None, [GLuint, GLenum, POINTER(GLuint)], 'NV_occlusion_query')
PFNGLGENOCCLUSIONQUERIESNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6067
PFNGLDELETEOCCLUSIONQUERIESNVPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6068
PFNGLISOCCLUSIONQUERYNVPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6069
PFNGLBEGINOCCLUSIONQUERYNVPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6070
PFNGLENDOCCLUSIONQUERYNVPROC = CFUNCTYPE(None) # GL/glext.h:6071
PFNGLGETOCCLUSIONQUERYIVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:6072
PFNGLGETOCCLUSIONQUERYUIVNVPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLuint)) # GL/glext.h:6073
# NV_point_sprite (GL/glext.h:6076)
GL_NV_point_sprite = 1 # GL/glext.h:6077
# GL/glext.h:6079
glPointParameteriNV = _link_function('glPointParameteriNV', None, [GLenum, GLint], 'NV_point_sprite')
# GL/glext.h:6080
glPointParameterivNV = _link_function('glPointParameterivNV', None, [GLenum, POINTER(GLint)], 'NV_point_sprite')
PFNGLPOINTPARAMETERINVPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:6082
PFNGLPOINTPARAMETERIVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLint)) # GL/glext.h:6083
# NV_texture_shader3 (GL/glext.h:6086)
GL_NV_texture_shader3 = 1 # GL/glext.h:6087
# NV_vertex_program1_1 (GL/glext.h:6090)
GL_NV_vertex_program1_1 = 1 # GL/glext.h:6091
# EXT_shadow_funcs (GL/glext.h:6094)
GL_EXT_shadow_funcs = 1 # GL/glext.h:6095
# EXT_stencil_two_side (GL/glext.h:6098)
GL_EXT_stencil_two_side = 1 # GL/glext.h:6099
# GL/glext.h:6101
glActiveStencilFaceEXT = _link_function('glActiveStencilFaceEXT', None, [GLenum], 'EXT_stencil_two_side')
PFNGLACTIVESTENCILFACEEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:6103
# ATI_text_fragment_shader (GL/glext.h:6106)
GL_ATI_text_fragment_shader = 1 # GL/glext.h:6107
# APPLE_client_storage (GL/glext.h:6110)
GL_APPLE_client_storage = 1 # GL/glext.h:6111
# APPLE_element_array (GL/glext.h:6114)
GL_APPLE_element_array = 1 # GL/glext.h:6115
# GL/glext.h:6117
glElementPointerAPPLE = _link_function('glElementPointerAPPLE', None, [GLenum, POINTER(GLvoid)], 'APPLE_element_array')
# GL/glext.h:6118
glDrawElementArrayAPPLE = _link_function('glDrawElementArrayAPPLE', None, [GLenum, GLint, GLsizei], 'APPLE_element_array')
# GL/glext.h:6119
glDrawRangeElementArrayAPPLE = _link_function('glDrawRangeElementArrayAPPLE', None, [GLenum, GLuint, GLuint, GLint, GLsizei], 'APPLE_element_array')
# GL/glext.h:6120
glMultiDrawElementArrayAPPLE = _link_function('glMultiDrawElementArrayAPPLE', None, [GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei], 'APPLE_element_array')
# GL/glext.h:6121
glMultiDrawRangeElementArrayAPPLE = _link_function('glMultiDrawRangeElementArrayAPPLE', None, [GLenum, GLuint, GLuint, POINTER(GLint), POINTER(GLsizei), GLsizei], 'APPLE_element_array')
PFNGLELEMENTPOINTERAPPLEPROC = CFUNCTYPE(None, GLenum, POINTER(GLvoid)) # GL/glext.h:6123
PFNGLDRAWELEMENTARRAYAPPLEPROC = CFUNCTYPE(None, GLenum, GLint, GLsizei) # GL/glext.h:6124
PFNGLDRAWRANGEELEMENTARRAYAPPLEPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLint, GLsizei) # GL/glext.h:6125
PFNGLMULTIDRAWELEMENTARRAYAPPLEPROC = CFUNCTYPE(None, GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei) # GL/glext.h:6126
PFNGLMULTIDRAWRANGEELEMENTARRAYAPPLEPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, POINTER(GLint), POINTER(GLsizei), GLsizei) # GL/glext.h:6127
# APPLE_fence (GL/glext.h:6130)
GL_APPLE_fence = 1 # GL/glext.h:6131
# GL/glext.h:6133
glGenFencesAPPLE = _link_function('glGenFencesAPPLE', None, [GLsizei, POINTER(GLuint)], 'APPLE_fence')
# GL/glext.h:6134
glDeleteFencesAPPLE = _link_function('glDeleteFencesAPPLE', None, [GLsizei, POINTER(GLuint)], 'APPLE_fence')
# GL/glext.h:6135
glSetFenceAPPLE = _link_function('glSetFenceAPPLE', None, [GLuint], 'APPLE_fence')
# GL/glext.h:6136
glIsFenceAPPLE = _link_function('glIsFenceAPPLE', GLboolean, [GLuint], 'APPLE_fence')
# GL/glext.h:6137
glTestFenceAPPLE = _link_function('glTestFenceAPPLE', GLboolean, [GLuint], 'APPLE_fence')
# GL/glext.h:6138
glFinishFenceAPPLE = _link_function('glFinishFenceAPPLE', None, [GLuint], 'APPLE_fence')
# GL/glext.h:6139
glTestObjectAPPLE = _link_function('glTestObjectAPPLE', GLboolean, [GLenum, GLuint], 'APPLE_fence')
# GL/glext.h:6140
glFinishObjectAPPLE = _link_function('glFinishObjectAPPLE', None, [GLenum, GLint], 'APPLE_fence')
PFNGLGENFENCESAPPLEPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6142
PFNGLDELETEFENCESAPPLEPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6143
PFNGLSETFENCEAPPLEPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6144
PFNGLISFENCEAPPLEPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6145
PFNGLTESTFENCEAPPLEPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6146
PFNGLFINISHFENCEAPPLEPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6147
PFNGLTESTOBJECTAPPLEPROC = CFUNCTYPE(GLboolean, GLenum, GLuint) # GL/glext.h:6148
PFNGLFINISHOBJECTAPPLEPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:6149
# APPLE_vertex_array_object (GL/glext.h:6152)
GL_APPLE_vertex_array_object = 1 # GL/glext.h:6153
# GL/glext.h:6155
glBindVertexArrayAPPLE = _link_function('glBindVertexArrayAPPLE', None, [GLuint], 'APPLE_vertex_array_object')
# GL/glext.h:6156
glDeleteVertexArraysAPPLE = _link_function('glDeleteVertexArraysAPPLE', None, [GLsizei, POINTER(GLuint)], 'APPLE_vertex_array_object')
# GL/glext.h:6157
glGenVertexArraysAPPLE = _link_function('glGenVertexArraysAPPLE', None, [GLsizei, POINTER(GLuint)], 'APPLE_vertex_array_object')
# GL/glext.h:6158
glIsVertexArrayAPPLE = _link_function('glIsVertexArrayAPPLE', GLboolean, [GLuint], 'APPLE_vertex_array_object')
PFNGLBINDVERTEXARRAYAPPLEPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6160
PFNGLDELETEVERTEXARRAYSAPPLEPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6161
PFNGLGENVERTEXARRAYSAPPLEPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6162
PFNGLISVERTEXARRAYAPPLEPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6163
# APPLE_vertex_array_range (GL/glext.h:6166)
GL_APPLE_vertex_array_range = 1 # GL/glext.h:6167
# GL/glext.h:6169
glVertexArrayRangeAPPLE = _link_function('glVertexArrayRangeAPPLE', None, [GLsizei, POINTER(GLvoid)], 'APPLE_vertex_array_range')
# GL/glext.h:6170
glFlushVertexArrayRangeAPPLE = _link_function('glFlushVertexArrayRangeAPPLE', None, [GLsizei, POINTER(GLvoid)], 'APPLE_vertex_array_range')
# GL/glext.h:6171
glVertexArrayParameteriAPPLE = _link_function('glVertexArrayParameteriAPPLE', None, [GLenum, GLint], 'APPLE_vertex_array_range')
PFNGLVERTEXARRAYRANGEAPPLEPROC = CFUNCTYPE(None, GLsizei, POINTER(GLvoid)) # GL/glext.h:6173
PFNGLFLUSHVERTEXARRAYRANGEAPPLEPROC = CFUNCTYPE(None, GLsizei, POINTER(GLvoid)) # GL/glext.h:6174
PFNGLVERTEXARRAYPARAMETERIAPPLEPROC = CFUNCTYPE(None, GLenum, GLint) # GL/glext.h:6175
# APPLE_ycbcr_422 (GL/glext.h:6178)
GL_APPLE_ycbcr_422 = 1 # GL/glext.h:6179
# S3_s3tc (GL/glext.h:6182)
GL_S3_s3tc = 1 # GL/glext.h:6183
# ATI_draw_buffers (GL/glext.h:6186)
GL_ATI_draw_buffers = 1 # GL/glext.h:6187
# GL/glext.h:6189
glDrawBuffersATI = _link_function('glDrawBuffersATI', None, [GLsizei, POINTER(GLenum)], 'ATI_draw_buffers')
PFNGLDRAWBUFFERSATIPROC = CFUNCTYPE(None, GLsizei, POINTER(GLenum)) # GL/glext.h:6191
# ATI_pixel_format_float (GL/glext.h:6194)
GL_ATI_pixel_format_float = 1 # GL/glext.h:6195
# ATI_texture_env_combine3 (GL/glext.h:6201)
GL_ATI_texture_env_combine3 = 1 # GL/glext.h:6202
# ATI_texture_float (GL/glext.h:6205)
GL_ATI_texture_float = 1 # GL/glext.h:6206
# NV_float_buffer (GL/glext.h:6209)
GL_NV_float_buffer = 1 # GL/glext.h:6210
# NV_fragment_program (GL/glext.h:6213)
GL_NV_fragment_program = 1 # GL/glext.h:6214
# GL/glext.h:6217
glProgramNamedParameter4fNV = _link_function('glProgramNamedParameter4fNV', None, [GLuint, GLsizei, POINTER(GLubyte), GLfloat, GLfloat, GLfloat, GLfloat], 'NV_fragment_program')
# GL/glext.h:6218
glProgramNamedParameter4dNV = _link_function('glProgramNamedParameter4dNV', None, [GLuint, GLsizei, POINTER(GLubyte), GLdouble, GLdouble, GLdouble, GLdouble], 'NV_fragment_program')
# GL/glext.h:6219
glProgramNamedParameter4fvNV = _link_function('glProgramNamedParameter4fvNV', None, [GLuint, GLsizei, POINTER(GLubyte), POINTER(GLfloat)], 'NV_fragment_program')
# GL/glext.h:6220
glProgramNamedParameter4dvNV = _link_function('glProgramNamedParameter4dvNV', None, [GLuint, GLsizei, POINTER(GLubyte), POINTER(GLdouble)], 'NV_fragment_program')
# GL/glext.h:6221
glGetProgramNamedParameterfvNV = _link_function('glGetProgramNamedParameterfvNV', None, [GLuint, GLsizei, POINTER(GLubyte), POINTER(GLfloat)], 'NV_fragment_program')
# GL/glext.h:6222
glGetProgramNamedParameterdvNV = _link_function('glGetProgramNamedParameterdvNV', None, [GLuint, GLsizei, POINTER(GLubyte), POINTER(GLdouble)], 'NV_fragment_program')
PFNGLPROGRAMNAMEDPARAMETER4FNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte), GLfloat, GLfloat, GLfloat, GLfloat) # GL/glext.h:6224
PFNGLPROGRAMNAMEDPARAMETER4DNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte), GLdouble, GLdouble, GLdouble, GLdouble) # GL/glext.h:6225
PFNGLPROGRAMNAMEDPARAMETER4FVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte), POINTER(GLfloat)) # GL/glext.h:6226
PFNGLPROGRAMNAMEDPARAMETER4DVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte), POINTER(GLdouble)) # GL/glext.h:6227
PFNGLGETPROGRAMNAMEDPARAMETERFVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte), POINTER(GLfloat)) # GL/glext.h:6228
PFNGLGETPROGRAMNAMEDPARAMETERDVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLubyte), POINTER(GLdouble)) # GL/glext.h:6229
# NV_half_float (GL/glext.h:6232)
GL_NV_half_float = 1 # GL/glext.h:6233
# GL/glext.h:6235
glVertex2hNV = _link_function('glVertex2hNV', None, [GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6236
glVertex2hvNV = _link_function('glVertex2hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6237
glVertex3hNV = _link_function('glVertex3hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6238
glVertex3hvNV = _link_function('glVertex3hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6239
glVertex4hNV = _link_function('glVertex4hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6240
glVertex4hvNV = _link_function('glVertex4hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6241
glNormal3hNV = _link_function('glNormal3hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6242
glNormal3hvNV = _link_function('glNormal3hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6243
glColor3hNV = _link_function('glColor3hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6244
glColor3hvNV = _link_function('glColor3hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6245
glColor4hNV = _link_function('glColor4hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6246
glColor4hvNV = _link_function('glColor4hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6247
glTexCoord1hNV = _link_function('glTexCoord1hNV', None, [GLhalfNV], 'NV_half_float')
# GL/glext.h:6248
glTexCoord1hvNV = _link_function('glTexCoord1hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6249
glTexCoord2hNV = _link_function('glTexCoord2hNV', None, [GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6250
glTexCoord2hvNV = _link_function('glTexCoord2hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6251
glTexCoord3hNV = _link_function('glTexCoord3hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6252
glTexCoord3hvNV = _link_function('glTexCoord3hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6253
glTexCoord4hNV = _link_function('glTexCoord4hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6254
glTexCoord4hvNV = _link_function('glTexCoord4hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6255
glMultiTexCoord1hNV = _link_function('glMultiTexCoord1hNV', None, [GLenum, GLhalfNV], 'NV_half_float')
# GL/glext.h:6256
glMultiTexCoord1hvNV = _link_function('glMultiTexCoord1hvNV', None, [GLenum, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6257
glMultiTexCoord2hNV = _link_function('glMultiTexCoord2hNV', None, [GLenum, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6258
glMultiTexCoord2hvNV = _link_function('glMultiTexCoord2hvNV', None, [GLenum, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6259
glMultiTexCoord3hNV = _link_function('glMultiTexCoord3hNV', None, [GLenum, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6260
glMultiTexCoord3hvNV = _link_function('glMultiTexCoord3hvNV', None, [GLenum, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6261
glMultiTexCoord4hNV = _link_function('glMultiTexCoord4hNV', None, [GLenum, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6262
glMultiTexCoord4hvNV = _link_function('glMultiTexCoord4hvNV', None, [GLenum, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6263
glFogCoordhNV = _link_function('glFogCoordhNV', None, [GLhalfNV], 'NV_half_float')
# GL/glext.h:6264
glFogCoordhvNV = _link_function('glFogCoordhvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6265
glSecondaryColor3hNV = _link_function('glSecondaryColor3hNV', None, [GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6266
glSecondaryColor3hvNV = _link_function('glSecondaryColor3hvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6267
glVertexWeighthNV = _link_function('glVertexWeighthNV', None, [GLhalfNV], 'NV_half_float')
# GL/glext.h:6268
glVertexWeighthvNV = _link_function('glVertexWeighthvNV', None, [POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6269
glVertexAttrib1hNV = _link_function('glVertexAttrib1hNV', None, [GLuint, GLhalfNV], 'NV_half_float')
# GL/glext.h:6270
glVertexAttrib1hvNV = _link_function('glVertexAttrib1hvNV', None, [GLuint, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6271
glVertexAttrib2hNV = _link_function('glVertexAttrib2hNV', None, [GLuint, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6272
glVertexAttrib2hvNV = _link_function('glVertexAttrib2hvNV', None, [GLuint, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6273
glVertexAttrib3hNV = _link_function('glVertexAttrib3hNV', None, [GLuint, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6274
glVertexAttrib3hvNV = _link_function('glVertexAttrib3hvNV', None, [GLuint, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6275
glVertexAttrib4hNV = _link_function('glVertexAttrib4hNV', None, [GLuint, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV], 'NV_half_float')
# GL/glext.h:6276
glVertexAttrib4hvNV = _link_function('glVertexAttrib4hvNV', None, [GLuint, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6277
glVertexAttribs1hvNV = _link_function('glVertexAttribs1hvNV', None, [GLuint, GLsizei, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6278
glVertexAttribs2hvNV = _link_function('glVertexAttribs2hvNV', None, [GLuint, GLsizei, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6279
glVertexAttribs3hvNV = _link_function('glVertexAttribs3hvNV', None, [GLuint, GLsizei, POINTER(GLhalfNV)], 'NV_half_float')
# GL/glext.h:6280
glVertexAttribs4hvNV = _link_function('glVertexAttribs4hvNV', None, [GLuint, GLsizei, POINTER(GLhalfNV)], 'NV_half_float')
PFNGLVERTEX2HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV) # GL/glext.h:6282
PFNGLVERTEX2HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6283
PFNGLVERTEX3HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6284
PFNGLVERTEX3HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6285
PFNGLVERTEX4HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6286
PFNGLVERTEX4HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6287
PFNGLNORMAL3HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6288
PFNGLNORMAL3HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6289
PFNGLCOLOR3HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6290
PFNGLCOLOR3HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6291
PFNGLCOLOR4HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6292
PFNGLCOLOR4HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6293
PFNGLTEXCOORD1HNVPROC = CFUNCTYPE(None, GLhalfNV) # GL/glext.h:6294
PFNGLTEXCOORD1HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6295
PFNGLTEXCOORD2HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV) # GL/glext.h:6296
PFNGLTEXCOORD2HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6297
PFNGLTEXCOORD3HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6298
PFNGLTEXCOORD3HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6299
PFNGLTEXCOORD4HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6300
PFNGLTEXCOORD4HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6301
PFNGLMULTITEXCOORD1HNVPROC = CFUNCTYPE(None, GLenum, GLhalfNV) # GL/glext.h:6302
PFNGLMULTITEXCOORD1HVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLhalfNV)) # GL/glext.h:6303
PFNGLMULTITEXCOORD2HNVPROC = CFUNCTYPE(None, GLenum, GLhalfNV, GLhalfNV) # GL/glext.h:6304
PFNGLMULTITEXCOORD2HVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLhalfNV)) # GL/glext.h:6305
PFNGLMULTITEXCOORD3HNVPROC = CFUNCTYPE(None, GLenum, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6306
PFNGLMULTITEXCOORD3HVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLhalfNV)) # GL/glext.h:6307
PFNGLMULTITEXCOORD4HNVPROC = CFUNCTYPE(None, GLenum, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6308
PFNGLMULTITEXCOORD4HVNVPROC = CFUNCTYPE(None, GLenum, POINTER(GLhalfNV)) # GL/glext.h:6309
PFNGLFOGCOORDHNVPROC = CFUNCTYPE(None, GLhalfNV) # GL/glext.h:6310
PFNGLFOGCOORDHVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6311
PFNGLSECONDARYCOLOR3HNVPROC = CFUNCTYPE(None, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6312
PFNGLSECONDARYCOLOR3HVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6313
PFNGLVERTEXWEIGHTHNVPROC = CFUNCTYPE(None, GLhalfNV) # GL/glext.h:6314
PFNGLVERTEXWEIGHTHVNVPROC = CFUNCTYPE(None, POINTER(GLhalfNV)) # GL/glext.h:6315
PFNGLVERTEXATTRIB1HNVPROC = CFUNCTYPE(None, GLuint, GLhalfNV) # GL/glext.h:6316
PFNGLVERTEXATTRIB1HVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLhalfNV)) # GL/glext.h:6317
PFNGLVERTEXATTRIB2HNVPROC = CFUNCTYPE(None, GLuint, GLhalfNV, GLhalfNV) # GL/glext.h:6318
PFNGLVERTEXATTRIB2HVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLhalfNV)) # GL/glext.h:6319
PFNGLVERTEXATTRIB3HNVPROC = CFUNCTYPE(None, GLuint, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6320
PFNGLVERTEXATTRIB3HVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLhalfNV)) # GL/glext.h:6321
PFNGLVERTEXATTRIB4HNVPROC = CFUNCTYPE(None, GLuint, GLhalfNV, GLhalfNV, GLhalfNV, GLhalfNV) # GL/glext.h:6322
PFNGLVERTEXATTRIB4HVNVPROC = CFUNCTYPE(None, GLuint, POINTER(GLhalfNV)) # GL/glext.h:6323
PFNGLVERTEXATTRIBS1HVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLhalfNV)) # GL/glext.h:6324
PFNGLVERTEXATTRIBS2HVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLhalfNV)) # GL/glext.h:6325
PFNGLVERTEXATTRIBS3HVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLhalfNV)) # GL/glext.h:6326
PFNGLVERTEXATTRIBS4HVNVPROC = CFUNCTYPE(None, GLuint, GLsizei, POINTER(GLhalfNV)) # GL/glext.h:6327
# NV_pixel_data_range (GL/glext.h:6330)
GL_NV_pixel_data_range = 1 # GL/glext.h:6331
# GL/glext.h:6333
glPixelDataRangeNV = _link_function('glPixelDataRangeNV', None, [GLenum, GLsizei, POINTER(GLvoid)], 'NV_pixel_data_range')
# GL/glext.h:6334
glFlushPixelDataRangeNV = _link_function('glFlushPixelDataRangeNV', None, [GLenum], 'NV_pixel_data_range')
PFNGLPIXELDATARANGENVPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:6336
PFNGLFLUSHPIXELDATARANGENVPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:6337
# NV_primitive_restart (GL/glext.h:6340)
GL_NV_primitive_restart = 1 # GL/glext.h:6341
# GL/glext.h:6343
glPrimitiveRestartNV = _link_function('glPrimitiveRestartNV', None, [], 'NV_primitive_restart')
# GL/glext.h:6344
glPrimitiveRestartIndexNV = _link_function('glPrimitiveRestartIndexNV', None, [GLuint], 'NV_primitive_restart')
PFNGLPRIMITIVERESTARTNVPROC = CFUNCTYPE(None) # GL/glext.h:6346
PFNGLPRIMITIVERESTARTINDEXNVPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6347
# NV_texture_expand_normal (GL/glext.h:6350)
GL_NV_texture_expand_normal = 1 # GL/glext.h:6351
# NV_vertex_program2 (GL/glext.h:6354)
GL_NV_vertex_program2 = 1 # GL/glext.h:6355
# ATI_map_object_buffer (GL/glext.h:6358)
GL_ATI_map_object_buffer = 1 # GL/glext.h:6359
# GL/glext.h:6361
glMapObjectBufferATI = _link_function('glMapObjectBufferATI', POINTER(GLvoid), [GLuint], 'ATI_map_object_buffer')
# GL/glext.h:6362
glUnmapObjectBufferATI = _link_function('glUnmapObjectBufferATI', None, [GLuint], 'ATI_map_object_buffer')
PFNGLMAPOBJECTBUFFERATIPROC = CFUNCTYPE(POINTER(GLvoid), GLuint) # GL/glext.h:6364
PFNGLUNMAPOBJECTBUFFERATIPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:6365
# ATI_separate_stencil (GL/glext.h:6368)
GL_ATI_separate_stencil = 1 # GL/glext.h:6369
# GL/glext.h:6371
glStencilOpSeparateATI = _link_function('glStencilOpSeparateATI', None, [GLenum, GLenum, GLenum, GLenum], 'ATI_separate_stencil')
# GL/glext.h:6372
glStencilFuncSeparateATI = _link_function('glStencilFuncSeparateATI', None, [GLenum, GLenum, GLint, GLuint], 'ATI_separate_stencil')
PFNGLSTENCILOPSEPARATEATIPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLenum) # GL/glext.h:6374
PFNGLSTENCILFUNCSEPARATEATIPROC = CFUNCTYPE(None, GLenum, GLenum, GLint, GLuint) # GL/glext.h:6375
# ATI_vertex_attrib_array_object (GL/glext.h:6378)
GL_ATI_vertex_attrib_array_object = 1 # GL/glext.h:6379
# GL/glext.h:6381
glVertexAttribArrayObjectATI = _link_function('glVertexAttribArrayObjectATI', None, [GLuint, GLint, GLenum, GLboolean, GLsizei, GLuint, GLuint], 'ATI_vertex_attrib_array_object')
# GL/glext.h:6382
glGetVertexAttribArrayObjectfvATI = _link_function('glGetVertexAttribArrayObjectfvATI', None, [GLuint, GLenum, POINTER(GLfloat)], 'ATI_vertex_attrib_array_object')
# GL/glext.h:6383
glGetVertexAttribArrayObjectivATI = _link_function('glGetVertexAttribArrayObjectivATI', None, [GLuint, GLenum, POINTER(GLint)], 'ATI_vertex_attrib_array_object')
PFNGLVERTEXATTRIBARRAYOBJECTATIPROC = CFUNCTYPE(None, GLuint, GLint, GLenum, GLboolean, GLsizei, GLuint, GLuint) # GL/glext.h:6385
PFNGLGETVERTEXATTRIBARRAYOBJECTFVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLfloat)) # GL/glext.h:6386
PFNGLGETVERTEXATTRIBARRAYOBJECTIVATIPROC = CFUNCTYPE(None, GLuint, GLenum, POINTER(GLint)) # GL/glext.h:6387
# OES_read_format (GL/glext.h:6390)
GL_OES_read_format = 1 # GL/glext.h:6391
# EXT_depth_bounds_test (GL/glext.h:6394)
GL_EXT_depth_bounds_test = 1 # GL/glext.h:6395
GLclampd = c_double # /usr/include/GL/gl.h:66
# GL/glext.h:6397
glDepthBoundsEXT = _link_function('glDepthBoundsEXT', None, [GLclampd, GLclampd], 'EXT_depth_bounds_test')
PFNGLDEPTHBOUNDSEXTPROC = CFUNCTYPE(None, GLclampd, GLclampd) # GL/glext.h:6399
# EXT_texture_mirror_clamp (GL/glext.h:6402)
GL_EXT_texture_mirror_clamp = 1 # GL/glext.h:6403
# EXT_blend_equation_separate (GL/glext.h:6406)
GL_EXT_blend_equation_separate = 1 # GL/glext.h:6407
# GL/glext.h:6409
glBlendEquationSeparateEXT = _link_function('glBlendEquationSeparateEXT', None, [GLenum, GLenum], 'EXT_blend_equation_separate')
PFNGLBLENDEQUATIONSEPARATEEXTPROC = CFUNCTYPE(None, GLenum, GLenum) # GL/glext.h:6411
# MESA_pack_invert (GL/glext.h:6414)
GL_MESA_pack_invert = 1 # GL/glext.h:6415
# MESA_ycbcr_texture (GL/glext.h:6418)
GL_MESA_ycbcr_texture = 1 # GL/glext.h:6419
# EXT_pixel_buffer_object (GL/glext.h:6422)
GL_EXT_pixel_buffer_object = 1 # GL/glext.h:6423
# NV_fragment_program_option (GL/glext.h:6426)
GL_NV_fragment_program_option = 1 # GL/glext.h:6427
# NV_fragment_program2 (GL/glext.h:6430)
GL_NV_fragment_program2 = 1 # GL/glext.h:6431
# NV_vertex_program2_option (GL/glext.h:6434)
GL_NV_vertex_program2_option = 1 # GL/glext.h:6435
# NV_vertex_program3 (GL/glext.h:6438)
GL_NV_vertex_program3 = 1 # GL/glext.h:6439
# EXT_framebuffer_object (GL/glext.h:6442)
GL_EXT_framebuffer_object = 1 # GL/glext.h:6443
# GL/glext.h:6445
glIsRenderbufferEXT = _link_function('glIsRenderbufferEXT', GLboolean, [GLuint], 'EXT_framebuffer_object')
# GL/glext.h:6446
glBindRenderbufferEXT = _link_function('glBindRenderbufferEXT', None, [GLenum, GLuint], 'EXT_framebuffer_object')
# GL/glext.h:6447
glDeleteRenderbuffersEXT = _link_function('glDeleteRenderbuffersEXT', None, [GLsizei, POINTER(GLuint)], 'EXT_framebuffer_object')
# GL/glext.h:6448
glGenRenderbuffersEXT = _link_function('glGenRenderbuffersEXT', None, [GLsizei, POINTER(GLuint)], 'EXT_framebuffer_object')
# GL/glext.h:6449
glRenderbufferStorageEXT = _link_function('glRenderbufferStorageEXT', None, [GLenum, GLenum, GLsizei, GLsizei], 'EXT_framebuffer_object')
# GL/glext.h:6450
glGetRenderbufferParameterivEXT = _link_function('glGetRenderbufferParameterivEXT', None, [GLenum, GLenum, POINTER(GLint)], 'EXT_framebuffer_object')
# GL/glext.h:6451
glIsFramebufferEXT = _link_function('glIsFramebufferEXT', GLboolean, [GLuint], 'EXT_framebuffer_object')
# GL/glext.h:6452
glBindFramebufferEXT = _link_function('glBindFramebufferEXT', None, [GLenum, GLuint], 'EXT_framebuffer_object')
# GL/glext.h:6453
glDeleteFramebuffersEXT = _link_function('glDeleteFramebuffersEXT', None, [GLsizei, POINTER(GLuint)], 'EXT_framebuffer_object')
# GL/glext.h:6454
glGenFramebuffersEXT = _link_function('glGenFramebuffersEXT', None, [GLsizei, POINTER(GLuint)], 'EXT_framebuffer_object')
# GL/glext.h:6455
glCheckFramebufferStatusEXT = _link_function('glCheckFramebufferStatusEXT', GLenum, [GLenum], 'EXT_framebuffer_object')
# GL/glext.h:6456
glFramebufferTexture1DEXT = _link_function('glFramebufferTexture1DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint], 'EXT_framebuffer_object')
# GL/glext.h:6457
glFramebufferTexture2DEXT = _link_function('glFramebufferTexture2DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint], 'EXT_framebuffer_object')
# GL/glext.h:6458
glFramebufferTexture3DEXT = _link_function('glFramebufferTexture3DEXT', None, [GLenum, GLenum, GLenum, GLuint, GLint, GLint], 'EXT_framebuffer_object')
# GL/glext.h:6459
glFramebufferRenderbufferEXT = _link_function('glFramebufferRenderbufferEXT', None, [GLenum, GLenum, GLenum, GLuint], 'EXT_framebuffer_object')
# GL/glext.h:6460
glGetFramebufferAttachmentParameterivEXT = _link_function('glGetFramebufferAttachmentParameterivEXT', None, [GLenum, GLenum, GLenum, POINTER(GLint)], 'EXT_framebuffer_object')
# GL/glext.h:6461
glGenerateMipmapEXT = _link_function('glGenerateMipmapEXT', None, [GLenum], 'EXT_framebuffer_object')
PFNGLISRENDERBUFFEREXTPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6463
PFNGLBINDRENDERBUFFEREXTPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:6464
PFNGLDELETERENDERBUFFERSEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6465
PFNGLGENRENDERBUFFERSEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6466
PFNGLRENDERBUFFERSTORAGEEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLsizei, GLsizei) # GL/glext.h:6467
PFNGLGETRENDERBUFFERPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:6468
PFNGLISFRAMEBUFFEREXTPROC = CFUNCTYPE(GLboolean, GLuint) # GL/glext.h:6469
PFNGLBINDFRAMEBUFFEREXTPROC = CFUNCTYPE(None, GLenum, GLuint) # GL/glext.h:6470
PFNGLDELETEFRAMEBUFFERSEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6471
PFNGLGENFRAMEBUFFERSEXTPROC = CFUNCTYPE(None, GLsizei, POINTER(GLuint)) # GL/glext.h:6472
PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC = CFUNCTYPE(GLenum, GLenum) # GL/glext.h:6473
PFNGLFRAMEBUFFERTEXTURE1DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLuint, GLint) # GL/glext.h:6474
PFNGLFRAMEBUFFERTEXTURE2DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLuint, GLint) # GL/glext.h:6475
PFNGLFRAMEBUFFERTEXTURE3DEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLuint, GLint, GLint) # GL/glext.h:6476
PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, GLuint) # GL/glext.h:6477
PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:6478
PFNGLGENERATEMIPMAPEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:6479
# GREMEDY_string_marker (GL/glext.h:6482)
GL_GREMEDY_string_marker = 1 # GL/glext.h:6483
# GL/glext.h:6485
glStringMarkerGREMEDY = _link_function('glStringMarkerGREMEDY', None, [GLsizei, POINTER(GLvoid)], 'GREMEDY_string_marker')
PFNGLSTRINGMARKERGREMEDYPROC = CFUNCTYPE(None, GLsizei, POINTER(GLvoid)) # GL/glext.h:6487
# GLEXT_LEGACY (/usr/include/GL/gl.h:1633)
__all__ = ['GL_GLEXT_VERSION', 'GL_UNSIGNED_BYTE_3_3_2',
'GL_UNSIGNED_SHORT_4_4_4_4', 'GL_UNSIGNED_SHORT_5_5_5_1',
'GL_UNSIGNED_INT_8_8_8_8', 'GL_UNSIGNED_INT_10_10_10_2', 'GL_RESCALE_NORMAL',
'GL_TEXTURE_BINDING_3D', 'GL_PACK_SKIP_IMAGES', 'GL_PACK_IMAGE_HEIGHT',
'GL_UNPACK_SKIP_IMAGES', 'GL_UNPACK_IMAGE_HEIGHT', 'GL_TEXTURE_3D',
'GL_PROXY_TEXTURE_3D', 'GL_TEXTURE_DEPTH', 'GL_TEXTURE_WRAP_R',
'GL_MAX_3D_TEXTURE_SIZE', 'GL_UNSIGNED_BYTE_2_3_3_REV',
'GL_UNSIGNED_SHORT_5_6_5', 'GL_UNSIGNED_SHORT_5_6_5_REV',
'GL_UNSIGNED_SHORT_4_4_4_4_REV', 'GL_UNSIGNED_SHORT_1_5_5_5_REV',
'GL_UNSIGNED_INT_8_8_8_8_REV', 'GL_UNSIGNED_INT_2_10_10_10_REV', 'GL_BGR',
'GL_BGRA', 'GL_MAX_ELEMENTS_VERTICES', 'GL_MAX_ELEMENTS_INDICES',
'GL_CLAMP_TO_EDGE', 'GL_TEXTURE_MIN_LOD', 'GL_TEXTURE_MAX_LOD',
'GL_TEXTURE_BASE_LEVEL', 'GL_TEXTURE_MAX_LEVEL',
'GL_LIGHT_MODEL_COLOR_CONTROL', 'GL_SINGLE_COLOR',
'GL_SEPARATE_SPECULAR_COLOR', 'GL_SMOOTH_POINT_SIZE_RANGE',
'GL_SMOOTH_POINT_SIZE_GRANULARITY', 'GL_SMOOTH_LINE_WIDTH_RANGE',
'GL_SMOOTH_LINE_WIDTH_GRANULARITY', 'GL_ALIASED_POINT_SIZE_RANGE',
'GL_ALIASED_LINE_WIDTH_RANGE', 'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR', 'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA', 'GL_BLEND_COLOR', 'GL_FUNC_ADD', 'GL_MIN',
'GL_MAX', 'GL_BLEND_EQUATION', 'GL_FUNC_SUBTRACT', 'GL_FUNC_REVERSE_SUBTRACT',
'GL_CONVOLUTION_1D', 'GL_CONVOLUTION_2D', 'GL_SEPARABLE_2D',
'GL_CONVOLUTION_BORDER_MODE', 'GL_CONVOLUTION_FILTER_SCALE',
'GL_CONVOLUTION_FILTER_BIAS', 'GL_REDUCE', 'GL_CONVOLUTION_FORMAT',
'GL_CONVOLUTION_WIDTH', 'GL_CONVOLUTION_HEIGHT', 'GL_MAX_CONVOLUTION_WIDTH',
'GL_MAX_CONVOLUTION_HEIGHT', 'GL_POST_CONVOLUTION_RED_SCALE',
'GL_POST_CONVOLUTION_GREEN_SCALE', 'GL_POST_CONVOLUTION_BLUE_SCALE',
'GL_POST_CONVOLUTION_ALPHA_SCALE', 'GL_POST_CONVOLUTION_RED_BIAS',
'GL_POST_CONVOLUTION_GREEN_BIAS', 'GL_POST_CONVOLUTION_BLUE_BIAS',
'GL_POST_CONVOLUTION_ALPHA_BIAS', 'GL_HISTOGRAM', 'GL_PROXY_HISTOGRAM',
'GL_HISTOGRAM_WIDTH', 'GL_HISTOGRAM_FORMAT', 'GL_HISTOGRAM_RED_SIZE',
'GL_HISTOGRAM_GREEN_SIZE', 'GL_HISTOGRAM_BLUE_SIZE',
'GL_HISTOGRAM_ALPHA_SIZE', 'GL_HISTOGRAM_LUMINANCE_SIZE', 'GL_HISTOGRAM_SINK',
'GL_MINMAX', 'GL_MINMAX_FORMAT', 'GL_MINMAX_SINK', 'GL_TABLE_TOO_LARGE',
'GL_COLOR_MATRIX', 'GL_COLOR_MATRIX_STACK_DEPTH',
'GL_MAX_COLOR_MATRIX_STACK_DEPTH', 'GL_POST_COLOR_MATRIX_RED_SCALE',
'GL_POST_COLOR_MATRIX_GREEN_SCALE', 'GL_POST_COLOR_MATRIX_BLUE_SCALE',
'GL_POST_COLOR_MATRIX_ALPHA_SCALE', 'GL_POST_COLOR_MATRIX_RED_BIAS',
'GL_POST_COLOR_MATRIX_GREEN_BIAS', 'GL_POST_COLOR_MATRIX_BLUE_BIAS',
'GL_POST_COLOR_MATRIX_ALPHA_BIAS', 'GL_COLOR_TABLE',
'GL_POST_CONVOLUTION_COLOR_TABLE', 'GL_POST_COLOR_MATRIX_COLOR_TABLE',
'GL_PROXY_COLOR_TABLE', 'GL_PROXY_POST_CONVOLUTION_COLOR_TABLE',
'GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE', 'GL_COLOR_TABLE_SCALE',
'GL_COLOR_TABLE_BIAS', 'GL_COLOR_TABLE_FORMAT', 'GL_COLOR_TABLE_WIDTH',
'GL_COLOR_TABLE_RED_SIZE', 'GL_COLOR_TABLE_GREEN_SIZE',
'GL_COLOR_TABLE_BLUE_SIZE', 'GL_COLOR_TABLE_ALPHA_SIZE',
'GL_COLOR_TABLE_LUMINANCE_SIZE', 'GL_COLOR_TABLE_INTENSITY_SIZE',
'GL_CONSTANT_BORDER', 'GL_REPLICATE_BORDER', 'GL_CONVOLUTION_BORDER_COLOR',
'GL_TEXTURE0', 'GL_TEXTURE1', 'GL_TEXTURE2', 'GL_TEXTURE3', 'GL_TEXTURE4',
'GL_TEXTURE5', 'GL_TEXTURE6', 'GL_TEXTURE7', 'GL_TEXTURE8', 'GL_TEXTURE9',
'GL_TEXTURE10', 'GL_TEXTURE11', 'GL_TEXTURE12', 'GL_TEXTURE13',
'GL_TEXTURE14', 'GL_TEXTURE15', 'GL_TEXTURE16', 'GL_TEXTURE17',
'GL_TEXTURE18', 'GL_TEXTURE19', 'GL_TEXTURE20', 'GL_TEXTURE21',
'GL_TEXTURE22', 'GL_TEXTURE23', 'GL_TEXTURE24', 'GL_TEXTURE25',
'GL_TEXTURE26', 'GL_TEXTURE27', 'GL_TEXTURE28', 'GL_TEXTURE29',
'GL_TEXTURE30', 'GL_TEXTURE31', 'GL_ACTIVE_TEXTURE',
'GL_CLIENT_ACTIVE_TEXTURE', 'GL_MAX_TEXTURE_UNITS',
'GL_TRANSPOSE_MODELVIEW_MATRIX', 'GL_TRANSPOSE_PROJECTION_MATRIX',
'GL_TRANSPOSE_TEXTURE_MATRIX', 'GL_TRANSPOSE_COLOR_MATRIX', 'GL_MULTISAMPLE',
'GL_SAMPLE_ALPHA_TO_COVERAGE', 'GL_SAMPLE_ALPHA_TO_ONE', 'GL_SAMPLE_COVERAGE',
'GL_SAMPLE_BUFFERS', 'GL_SAMPLES', 'GL_SAMPLE_COVERAGE_VALUE',
'GL_SAMPLE_COVERAGE_INVERT', 'GL_MULTISAMPLE_BIT', 'GL_NORMAL_MAP',
'GL_REFLECTION_MAP', 'GL_TEXTURE_CUBE_MAP', 'GL_TEXTURE_BINDING_CUBE_MAP',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
'GL_PROXY_TEXTURE_CUBE_MAP', 'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
'GL_COMPRESSED_ALPHA', 'GL_COMPRESSED_LUMINANCE',
'GL_COMPRESSED_LUMINANCE_ALPHA', 'GL_COMPRESSED_INTENSITY',
'GL_COMPRESSED_RGB', 'GL_COMPRESSED_RGBA', 'GL_TEXTURE_COMPRESSION_HINT',
'GL_TEXTURE_COMPRESSED_IMAGE_SIZE', 'GL_TEXTURE_COMPRESSED',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS', 'GL_COMPRESSED_TEXTURE_FORMATS',
'GL_CLAMP_TO_BORDER', 'GL_COMBINE', 'GL_COMBINE_RGB', 'GL_COMBINE_ALPHA',
'GL_SOURCE0_RGB', 'GL_SOURCE1_RGB', 'GL_SOURCE2_RGB', 'GL_SOURCE0_ALPHA',
'GL_SOURCE1_ALPHA', 'GL_SOURCE2_ALPHA', 'GL_OPERAND0_RGB', 'GL_OPERAND1_RGB',
'GL_OPERAND2_RGB', 'GL_OPERAND0_ALPHA', 'GL_OPERAND1_ALPHA',
'GL_OPERAND2_ALPHA', 'GL_RGB_SCALE', 'GL_ADD_SIGNED', 'GL_INTERPOLATE',
'GL_SUBTRACT', 'GL_CONSTANT', 'GL_PRIMARY_COLOR', 'GL_PREVIOUS',
'GL_DOT3_RGB', 'GL_DOT3_RGBA', 'GL_BLEND_DST_RGB', 'GL_BLEND_SRC_RGB',
'GL_BLEND_DST_ALPHA', 'GL_BLEND_SRC_ALPHA', 'GL_POINT_SIZE_MIN',
'GL_POINT_SIZE_MAX', 'GL_POINT_FADE_THRESHOLD_SIZE',
'GL_POINT_DISTANCE_ATTENUATION', 'GL_GENERATE_MIPMAP',
'GL_GENERATE_MIPMAP_HINT', 'GL_DEPTH_COMPONENT16', 'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT32', 'GL_MIRRORED_REPEAT', 'GL_FOG_COORDINATE_SOURCE',
'GL_FOG_COORDINATE', 'GL_FRAGMENT_DEPTH', 'GL_CURRENT_FOG_COORDINATE',
'GL_FOG_COORDINATE_ARRAY_TYPE', 'GL_FOG_COORDINATE_ARRAY_STRIDE',
'GL_FOG_COORDINATE_ARRAY_POINTER', 'GL_FOG_COORDINATE_ARRAY', 'GL_COLOR_SUM',
'GL_CURRENT_SECONDARY_COLOR', 'GL_SECONDARY_COLOR_ARRAY_SIZE',
'GL_SECONDARY_COLOR_ARRAY_TYPE', 'GL_SECONDARY_COLOR_ARRAY_STRIDE',
'GL_SECONDARY_COLOR_ARRAY_POINTER', 'GL_SECONDARY_COLOR_ARRAY',
'GL_MAX_TEXTURE_LOD_BIAS', 'GL_TEXTURE_FILTER_CONTROL', 'GL_TEXTURE_LOD_BIAS',
'GL_INCR_WRAP', 'GL_DECR_WRAP', 'GL_TEXTURE_DEPTH_SIZE',
'GL_DEPTH_TEXTURE_MODE', 'GL_TEXTURE_COMPARE_MODE', 'GL_TEXTURE_COMPARE_FUNC',
'GL_COMPARE_R_TO_TEXTURE', 'GL_BUFFER_SIZE', 'GL_BUFFER_USAGE',
'GL_QUERY_COUNTER_BITS', 'GL_CURRENT_QUERY', 'GL_QUERY_RESULT',
'GL_QUERY_RESULT_AVAILABLE', 'GL_ARRAY_BUFFER', 'GL_ELEMENT_ARRAY_BUFFER',
'GL_ARRAY_BUFFER_BINDING', 'GL_ELEMENT_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ARRAY_BUFFER_BINDING', 'GL_NORMAL_ARRAY_BUFFER_BINDING',
'GL_COLOR_ARRAY_BUFFER_BINDING', 'GL_INDEX_ARRAY_BUFFER_BINDING',
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING', 'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING',
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING',
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING', 'GL_WEIGHT_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING', 'GL_READ_ONLY', 'GL_WRITE_ONLY',
'GL_READ_WRITE', 'GL_BUFFER_ACCESS', 'GL_BUFFER_MAPPED',
'GL_BUFFER_MAP_POINTER', 'GL_STREAM_DRAW', 'GL_STREAM_READ', 'GL_STREAM_COPY',
'GL_STATIC_DRAW', 'GL_STATIC_READ', 'GL_STATIC_COPY', 'GL_DYNAMIC_DRAW',
'GL_DYNAMIC_READ', 'GL_DYNAMIC_COPY', 'GL_SAMPLES_PASSED', 'GL_FOG_COORD_SRC',
'GL_FOG_COORD', 'GL_CURRENT_FOG_COORD', 'GL_FOG_COORD_ARRAY_TYPE',
'GL_FOG_COORD_ARRAY_STRIDE', 'GL_FOG_COORD_ARRAY_POINTER',
'GL_FOG_COORD_ARRAY', 'GL_FOG_COORD_ARRAY_BUFFER_BINDING', 'GL_SRC0_RGB',
'GL_SRC1_RGB', 'GL_SRC2_RGB', 'GL_SRC0_ALPHA', 'GL_SRC1_ALPHA',
'GL_SRC2_ALPHA', 'GL_BLEND_EQUATION_RGB', 'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
'GL_VERTEX_ATTRIB_ARRAY_SIZE', 'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
'GL_VERTEX_ATTRIB_ARRAY_TYPE', 'GL_CURRENT_VERTEX_ATTRIB',
'GL_VERTEX_PROGRAM_POINT_SIZE', 'GL_VERTEX_PROGRAM_TWO_SIDE',
'GL_VERTEX_ATTRIB_ARRAY_POINTER', 'GL_STENCIL_BACK_FUNC',
'GL_STENCIL_BACK_FAIL', 'GL_STENCIL_BACK_PASS_DEPTH_FAIL',
'GL_STENCIL_BACK_PASS_DEPTH_PASS', 'GL_MAX_DRAW_BUFFERS', 'GL_DRAW_BUFFER0',
'GL_DRAW_BUFFER1', 'GL_DRAW_BUFFER2', 'GL_DRAW_BUFFER3', 'GL_DRAW_BUFFER4',
'GL_DRAW_BUFFER5', 'GL_DRAW_BUFFER6', 'GL_DRAW_BUFFER7', 'GL_DRAW_BUFFER8',
'GL_DRAW_BUFFER9', 'GL_DRAW_BUFFER10', 'GL_DRAW_BUFFER11', 'GL_DRAW_BUFFER12',
'GL_DRAW_BUFFER13', 'GL_DRAW_BUFFER14', 'GL_DRAW_BUFFER15',
'GL_BLEND_EQUATION_ALPHA', 'GL_POINT_SPRITE', 'GL_COORD_REPLACE',
'GL_MAX_VERTEX_ATTRIBS', 'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
'GL_MAX_TEXTURE_COORDS', 'GL_MAX_TEXTURE_IMAGE_UNITS', 'GL_FRAGMENT_SHADER',
'GL_VERTEX_SHADER', 'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS',
'GL_MAX_VERTEX_UNIFORM_COMPONENTS', 'GL_MAX_VARYING_FLOATS',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS', 'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
'GL_SHADER_TYPE', 'GL_FLOAT_VEC2', 'GL_FLOAT_VEC3', 'GL_FLOAT_VEC4',
'GL_INT_VEC2', 'GL_INT_VEC3', 'GL_INT_VEC4', 'GL_BOOL', 'GL_BOOL_VEC2',
'GL_BOOL_VEC3', 'GL_BOOL_VEC4', 'GL_FLOAT_MAT2', 'GL_FLOAT_MAT3',
'GL_FLOAT_MAT4', 'GL_SAMPLER_1D', 'GL_SAMPLER_2D', 'GL_SAMPLER_3D',
'GL_SAMPLER_CUBE', 'GL_SAMPLER_1D_SHADOW', 'GL_SAMPLER_2D_SHADOW',
'GL_DELETE_STATUS', 'GL_COMPILE_STATUS', 'GL_LINK_STATUS',
'GL_VALIDATE_STATUS', 'GL_INFO_LOG_LENGTH', 'GL_ATTACHED_SHADERS',
'GL_ACTIVE_UNIFORMS', 'GL_ACTIVE_UNIFORM_MAX_LENGTH',
'GL_SHADER_SOURCE_LENGTH', 'GL_ACTIVE_ATTRIBUTES',
'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH', 'GL_FRAGMENT_SHADER_DERIVATIVE_HINT',
'GL_SHADING_LANGUAGE_VERSION', 'GL_CURRENT_PROGRAM',
'GL_POINT_SPRITE_COORD_ORIGIN', 'GL_LOWER_LEFT', 'GL_UPPER_LEFT',
'GL_STENCIL_BACK_REF', 'GL_STENCIL_BACK_VALUE_MASK',
'GL_STENCIL_BACK_WRITEMASK', 'GL_TEXTURE0_ARB', 'GL_TEXTURE1_ARB',
'GL_TEXTURE2_ARB', 'GL_TEXTURE3_ARB', 'GL_TEXTURE4_ARB', 'GL_TEXTURE5_ARB',
'GL_TEXTURE6_ARB', 'GL_TEXTURE7_ARB', 'GL_TEXTURE8_ARB', 'GL_TEXTURE9_ARB',
'GL_TEXTURE10_ARB', 'GL_TEXTURE11_ARB', 'GL_TEXTURE12_ARB',
'GL_TEXTURE13_ARB', 'GL_TEXTURE14_ARB', 'GL_TEXTURE15_ARB',
'GL_TEXTURE16_ARB', 'GL_TEXTURE17_ARB', 'GL_TEXTURE18_ARB',
'GL_TEXTURE19_ARB', 'GL_TEXTURE20_ARB', 'GL_TEXTURE21_ARB',
'GL_TEXTURE22_ARB', 'GL_TEXTURE23_ARB', 'GL_TEXTURE24_ARB',
'GL_TEXTURE25_ARB', 'GL_TEXTURE26_ARB', 'GL_TEXTURE27_ARB',
'GL_TEXTURE28_ARB', 'GL_TEXTURE29_ARB', 'GL_TEXTURE30_ARB',
'GL_TEXTURE31_ARB', 'GL_ACTIVE_TEXTURE_ARB', 'GL_CLIENT_ACTIVE_TEXTURE_ARB',
'GL_MAX_TEXTURE_UNITS_ARB', 'GL_TRANSPOSE_MODELVIEW_MATRIX_ARB',
'GL_TRANSPOSE_PROJECTION_MATRIX_ARB', 'GL_TRANSPOSE_TEXTURE_MATRIX_ARB',
'GL_TRANSPOSE_COLOR_MATRIX_ARB', 'GL_MULTISAMPLE_ARB',
'GL_SAMPLE_ALPHA_TO_COVERAGE_ARB', 'GL_SAMPLE_ALPHA_TO_ONE_ARB',
'GL_SAMPLE_COVERAGE_ARB', 'GL_SAMPLE_BUFFERS_ARB', 'GL_SAMPLES_ARB',
'GL_SAMPLE_COVERAGE_VALUE_ARB', 'GL_SAMPLE_COVERAGE_INVERT_ARB',
'GL_MULTISAMPLE_BIT_ARB', 'GL_NORMAL_MAP_ARB', 'GL_REFLECTION_MAP_ARB',
'GL_TEXTURE_CUBE_MAP_ARB', 'GL_TEXTURE_BINDING_CUBE_MAP_ARB',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB',
'GL_PROXY_TEXTURE_CUBE_MAP_ARB', 'GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB',
'GL_COMPRESSED_ALPHA_ARB', 'GL_COMPRESSED_LUMINANCE_ARB',
'GL_COMPRESSED_LUMINANCE_ALPHA_ARB', 'GL_COMPRESSED_INTENSITY_ARB',
'GL_COMPRESSED_RGB_ARB', 'GL_COMPRESSED_RGBA_ARB',
'GL_TEXTURE_COMPRESSION_HINT_ARB', 'GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB',
'GL_TEXTURE_COMPRESSED_ARB', 'GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB',
'GL_COMPRESSED_TEXTURE_FORMATS_ARB', 'GL_CLAMP_TO_BORDER_ARB',
'GL_POINT_SIZE_MIN_ARB', 'GL_POINT_SIZE_MAX_ARB',
'GL_POINT_FADE_THRESHOLD_SIZE_ARB', 'GL_POINT_DISTANCE_ATTENUATION_ARB',
'GL_MAX_VERTEX_UNITS_ARB', 'GL_ACTIVE_VERTEX_UNITS_ARB',
'GL_WEIGHT_SUM_UNITY_ARB', 'GL_VERTEX_BLEND_ARB', 'GL_CURRENT_WEIGHT_ARB',
'GL_WEIGHT_ARRAY_TYPE_ARB', 'GL_WEIGHT_ARRAY_STRIDE_ARB',
'GL_WEIGHT_ARRAY_SIZE_ARB', 'GL_WEIGHT_ARRAY_POINTER_ARB',
'GL_WEIGHT_ARRAY_ARB', 'GL_MODELVIEW0_ARB', 'GL_MODELVIEW1_ARB',
'GL_MODELVIEW2_ARB', 'GL_MODELVIEW3_ARB', 'GL_MODELVIEW4_ARB',
'GL_MODELVIEW5_ARB', 'GL_MODELVIEW6_ARB', 'GL_MODELVIEW7_ARB',
'GL_MODELVIEW8_ARB', 'GL_MODELVIEW9_ARB', 'GL_MODELVIEW10_ARB',
'GL_MODELVIEW11_ARB', 'GL_MODELVIEW12_ARB', 'GL_MODELVIEW13_ARB',
'GL_MODELVIEW14_ARB', 'GL_MODELVIEW15_ARB', 'GL_MODELVIEW16_ARB',
'GL_MODELVIEW17_ARB', 'GL_MODELVIEW18_ARB', 'GL_MODELVIEW19_ARB',
'GL_MODELVIEW20_ARB', 'GL_MODELVIEW21_ARB', 'GL_MODELVIEW22_ARB',
'GL_MODELVIEW23_ARB', 'GL_MODELVIEW24_ARB', 'GL_MODELVIEW25_ARB',
'GL_MODELVIEW26_ARB', 'GL_MODELVIEW27_ARB', 'GL_MODELVIEW28_ARB',
'GL_MODELVIEW29_ARB', 'GL_MODELVIEW30_ARB', 'GL_MODELVIEW31_ARB',
'GL_MATRIX_PALETTE_ARB', 'GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB',
'GL_MAX_PALETTE_MATRICES_ARB', 'GL_CURRENT_PALETTE_MATRIX_ARB',
'GL_MATRIX_INDEX_ARRAY_ARB', 'GL_CURRENT_MATRIX_INDEX_ARB',
'GL_MATRIX_INDEX_ARRAY_SIZE_ARB', 'GL_MATRIX_INDEX_ARRAY_TYPE_ARB',
'GL_MATRIX_INDEX_ARRAY_STRIDE_ARB', 'GL_MATRIX_INDEX_ARRAY_POINTER_ARB',
'GL_COMBINE_ARB', 'GL_COMBINE_RGB_ARB', 'GL_COMBINE_ALPHA_ARB',
'GL_SOURCE0_RGB_ARB', 'GL_SOURCE1_RGB_ARB', 'GL_SOURCE2_RGB_ARB',
'GL_SOURCE0_ALPHA_ARB', 'GL_SOURCE1_ALPHA_ARB', 'GL_SOURCE2_ALPHA_ARB',
'GL_OPERAND0_RGB_ARB', 'GL_OPERAND1_RGB_ARB', 'GL_OPERAND2_RGB_ARB',
'GL_OPERAND0_ALPHA_ARB', 'GL_OPERAND1_ALPHA_ARB', 'GL_OPERAND2_ALPHA_ARB',
'GL_RGB_SCALE_ARB', 'GL_ADD_SIGNED_ARB', 'GL_INTERPOLATE_ARB',
'GL_SUBTRACT_ARB', 'GL_CONSTANT_ARB', 'GL_PRIMARY_COLOR_ARB',
'GL_PREVIOUS_ARB', 'GL_DOT3_RGB_ARB', 'GL_DOT3_RGBA_ARB',
'GL_MIRRORED_REPEAT_ARB', 'GL_DEPTH_COMPONENT16_ARB',
'GL_DEPTH_COMPONENT24_ARB', 'GL_DEPTH_COMPONENT32_ARB',
'GL_TEXTURE_DEPTH_SIZE_ARB', 'GL_DEPTH_TEXTURE_MODE_ARB',
'GL_TEXTURE_COMPARE_MODE_ARB', 'GL_TEXTURE_COMPARE_FUNC_ARB',
'GL_COMPARE_R_TO_TEXTURE_ARB', 'GL_TEXTURE_COMPARE_FAIL_VALUE_ARB',
'GL_COLOR_SUM_ARB', 'GL_VERTEX_PROGRAM_ARB',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB', 'GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB', 'GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB',
'GL_CURRENT_VERTEX_ATTRIB_ARB', 'GL_PROGRAM_LENGTH_ARB',
'GL_PROGRAM_STRING_ARB', 'GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB',
'GL_MAX_PROGRAM_MATRICES_ARB', 'GL_CURRENT_MATRIX_STACK_DEPTH_ARB',
'GL_CURRENT_MATRIX_ARB', 'GL_VERTEX_PROGRAM_POINT_SIZE_ARB',
'GL_VERTEX_PROGRAM_TWO_SIDE_ARB', 'GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB',
'GL_PROGRAM_ERROR_POSITION_ARB', 'GL_PROGRAM_BINDING_ARB',
'GL_MAX_VERTEX_ATTRIBS_ARB', 'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB',
'GL_PROGRAM_ERROR_STRING_ARB', 'GL_PROGRAM_FORMAT_ASCII_ARB',
'GL_PROGRAM_FORMAT_ARB', 'GL_PROGRAM_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_INSTRUCTIONS_ARB', 'GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB', 'GL_PROGRAM_TEMPORARIES_ARB',
'GL_MAX_PROGRAM_TEMPORARIES_ARB', 'GL_PROGRAM_NATIVE_TEMPORARIES_ARB',
'GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB', 'GL_PROGRAM_PARAMETERS_ARB',
'GL_MAX_PROGRAM_PARAMETERS_ARB', 'GL_PROGRAM_NATIVE_PARAMETERS_ARB',
'GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB', 'GL_PROGRAM_ATTRIBS_ARB',
'GL_MAX_PROGRAM_ATTRIBS_ARB', 'GL_PROGRAM_NATIVE_ATTRIBS_ARB',
'GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB', 'GL_PROGRAM_ADDRESS_REGISTERS_ARB',
'GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB',
'GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB',
'GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB',
'GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB', 'GL_MAX_PROGRAM_ENV_PARAMETERS_ARB',
'GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB', 'GL_TRANSPOSE_CURRENT_MATRIX_ARB',
'GL_MATRIX0_ARB', 'GL_MATRIX1_ARB', 'GL_MATRIX2_ARB', 'GL_MATRIX3_ARB',
'GL_MATRIX4_ARB', 'GL_MATRIX5_ARB', 'GL_MATRIX6_ARB', 'GL_MATRIX7_ARB',
'GL_MATRIX8_ARB', 'GL_MATRIX9_ARB', 'GL_MATRIX10_ARB', 'GL_MATRIX11_ARB',
'GL_MATRIX12_ARB', 'GL_MATRIX13_ARB', 'GL_MATRIX14_ARB', 'GL_MATRIX15_ARB',
'GL_MATRIX16_ARB', 'GL_MATRIX17_ARB', 'GL_MATRIX18_ARB', 'GL_MATRIX19_ARB',
'GL_MATRIX20_ARB', 'GL_MATRIX21_ARB', 'GL_MATRIX22_ARB', 'GL_MATRIX23_ARB',
'GL_MATRIX24_ARB', 'GL_MATRIX25_ARB', 'GL_MATRIX26_ARB', 'GL_MATRIX27_ARB',
'GL_MATRIX28_ARB', 'GL_MATRIX29_ARB', 'GL_MATRIX30_ARB', 'GL_MATRIX31_ARB',
'GL_FRAGMENT_PROGRAM_ARB', 'GL_PROGRAM_ALU_INSTRUCTIONS_ARB',
'GL_PROGRAM_TEX_INSTRUCTIONS_ARB', 'GL_PROGRAM_TEX_INDIRECTIONS_ARB',
'GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB',
'GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB',
'GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB',
'GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB', 'GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB', 'GL_MAX_TEXTURE_COORDS_ARB',
'GL_MAX_TEXTURE_IMAGE_UNITS_ARB', 'GL_BUFFER_SIZE_ARB', 'GL_BUFFER_USAGE_ARB',
'GL_ARRAY_BUFFER_ARB', 'GL_ELEMENT_ARRAY_BUFFER_ARB',
'GL_ARRAY_BUFFER_BINDING_ARB', 'GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB',
'GL_VERTEX_ARRAY_BUFFER_BINDING_ARB', 'GL_NORMAL_ARRAY_BUFFER_BINDING_ARB',
'GL_COLOR_ARRAY_BUFFER_BINDING_ARB', 'GL_INDEX_ARRAY_BUFFER_BINDING_ARB',
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB',
'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB',
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB',
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB',
'GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB', 'GL_READ_ONLY_ARB',
'GL_WRITE_ONLY_ARB', 'GL_READ_WRITE_ARB', 'GL_BUFFER_ACCESS_ARB',
'GL_BUFFER_MAPPED_ARB', 'GL_BUFFER_MAP_POINTER_ARB', 'GL_STREAM_DRAW_ARB',
'GL_STREAM_READ_ARB', 'GL_STREAM_COPY_ARB', 'GL_STATIC_DRAW_ARB',
'GL_STATIC_READ_ARB', 'GL_STATIC_COPY_ARB', 'GL_DYNAMIC_DRAW_ARB',
'GL_DYNAMIC_READ_ARB', 'GL_DYNAMIC_COPY_ARB', 'GL_QUERY_COUNTER_BITS_ARB',
'GL_CURRENT_QUERY_ARB', 'GL_QUERY_RESULT_ARB',
'GL_QUERY_RESULT_AVAILABLE_ARB', 'GL_SAMPLES_PASSED_ARB',
'GL_PROGRAM_OBJECT_ARB', 'GL_SHADER_OBJECT_ARB', 'GL_OBJECT_TYPE_ARB',
'GL_OBJECT_SUBTYPE_ARB', 'GL_FLOAT_VEC2_ARB', 'GL_FLOAT_VEC3_ARB',
'GL_FLOAT_VEC4_ARB', 'GL_INT_VEC2_ARB', 'GL_INT_VEC3_ARB', 'GL_INT_VEC4_ARB',
'GL_BOOL_ARB', 'GL_BOOL_VEC2_ARB', 'GL_BOOL_VEC3_ARB', 'GL_BOOL_VEC4_ARB',
'GL_FLOAT_MAT2_ARB', 'GL_FLOAT_MAT3_ARB', 'GL_FLOAT_MAT4_ARB',
'GL_SAMPLER_1D_ARB', 'GL_SAMPLER_2D_ARB', 'GL_SAMPLER_3D_ARB',
'GL_SAMPLER_CUBE_ARB', 'GL_SAMPLER_1D_SHADOW_ARB', 'GL_SAMPLER_2D_SHADOW_ARB',
'GL_SAMPLER_2D_RECT_ARB', 'GL_SAMPLER_2D_RECT_SHADOW_ARB',
'GL_OBJECT_DELETE_STATUS_ARB', 'GL_OBJECT_COMPILE_STATUS_ARB',
'GL_OBJECT_LINK_STATUS_ARB', 'GL_OBJECT_VALIDATE_STATUS_ARB',
'GL_OBJECT_INFO_LOG_LENGTH_ARB', 'GL_OBJECT_ATTACHED_OBJECTS_ARB',
'GL_OBJECT_ACTIVE_UNIFORMS_ARB', 'GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB',
'GL_OBJECT_SHADER_SOURCE_LENGTH_ARB', 'GL_VERTEX_SHADER_ARB',
'GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB', 'GL_MAX_VARYING_FLOATS_ARB',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB', 'GL_OBJECT_ACTIVE_ATTRIBUTES_ARB',
'GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB', 'GL_FRAGMENT_SHADER_ARB',
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB',
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB', 'GL_SHADING_LANGUAGE_VERSION_ARB',
'GL_POINT_SPRITE_ARB', 'GL_COORD_REPLACE_ARB', 'GL_MAX_DRAW_BUFFERS_ARB',
'GL_DRAW_BUFFER0_ARB', 'GL_DRAW_BUFFER1_ARB', 'GL_DRAW_BUFFER2_ARB',
'GL_DRAW_BUFFER3_ARB', 'GL_DRAW_BUFFER4_ARB', 'GL_DRAW_BUFFER5_ARB',
'GL_DRAW_BUFFER6_ARB', 'GL_DRAW_BUFFER7_ARB', 'GL_DRAW_BUFFER8_ARB',
'GL_DRAW_BUFFER9_ARB', 'GL_DRAW_BUFFER10_ARB', 'GL_DRAW_BUFFER11_ARB',
'GL_DRAW_BUFFER12_ARB', 'GL_DRAW_BUFFER13_ARB', 'GL_DRAW_BUFFER14_ARB',
'GL_DRAW_BUFFER15_ARB', 'GL_TEXTURE_RECTANGLE_ARB',
'GL_TEXTURE_BINDING_RECTANGLE_ARB', 'GL_PROXY_TEXTURE_RECTANGLE_ARB',
'GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB', 'GL_RGBA_FLOAT_MODE_ARB',
'GL_CLAMP_VERTEX_COLOR_ARB', 'GL_CLAMP_FRAGMENT_COLOR_ARB',
'GL_CLAMP_READ_COLOR_ARB', 'GL_FIXED_ONLY_ARB', 'GL_HALF_FLOAT_ARB',
'GL_TEXTURE_RED_TYPE_ARB', 'GL_TEXTURE_GREEN_TYPE_ARB',
'GL_TEXTURE_BLUE_TYPE_ARB', 'GL_TEXTURE_ALPHA_TYPE_ARB',
'GL_TEXTURE_LUMINANCE_TYPE_ARB', 'GL_TEXTURE_INTENSITY_TYPE_ARB',
'GL_TEXTURE_DEPTH_TYPE_ARB', 'GL_UNSIGNED_NORMALIZED_ARB', 'GL_RGBA32F_ARB',
'GL_RGB32F_ARB', 'GL_ALPHA32F_ARB', 'GL_INTENSITY32F_ARB',
'GL_LUMINANCE32F_ARB', 'GL_LUMINANCE_ALPHA32F_ARB', 'GL_RGBA16F_ARB',
'GL_RGB16F_ARB', 'GL_ALPHA16F_ARB', 'GL_INTENSITY16F_ARB',
'GL_LUMINANCE16F_ARB', 'GL_LUMINANCE_ALPHA16F_ARB',
'GL_PIXEL_PACK_BUFFER_ARB', 'GL_PIXEL_UNPACK_BUFFER_ARB',
'GL_PIXEL_PACK_BUFFER_BINDING_ARB', 'GL_PIXEL_UNPACK_BUFFER_BINDING_ARB',
'GL_ABGR_EXT', 'GL_CONSTANT_COLOR_EXT', 'GL_ONE_MINUS_CONSTANT_COLOR_EXT',
'GL_CONSTANT_ALPHA_EXT', 'GL_ONE_MINUS_CONSTANT_ALPHA_EXT',
'GL_BLEND_COLOR_EXT', 'GL_POLYGON_OFFSET_EXT', 'GL_POLYGON_OFFSET_FACTOR_EXT',
'GL_POLYGON_OFFSET_BIAS_EXT', 'GL_ALPHA4_EXT', 'GL_ALPHA8_EXT',
'GL_ALPHA12_EXT', 'GL_ALPHA16_EXT', 'GL_LUMINANCE4_EXT', 'GL_LUMINANCE8_EXT',
'GL_LUMINANCE12_EXT', 'GL_LUMINANCE16_EXT', 'GL_LUMINANCE4_ALPHA4_EXT',
'GL_LUMINANCE6_ALPHA2_EXT', 'GL_LUMINANCE8_ALPHA8_EXT',
'GL_LUMINANCE12_ALPHA4_EXT', 'GL_LUMINANCE12_ALPHA12_EXT',
'GL_LUMINANCE16_ALPHA16_EXT', 'GL_INTENSITY_EXT', 'GL_INTENSITY4_EXT',
'GL_INTENSITY8_EXT', 'GL_INTENSITY12_EXT', 'GL_INTENSITY16_EXT',
'GL_RGB2_EXT', 'GL_RGB4_EXT', 'GL_RGB5_EXT', 'GL_RGB8_EXT', 'GL_RGB10_EXT',
'GL_RGB12_EXT', 'GL_RGB16_EXT', 'GL_RGBA2_EXT', 'GL_RGBA4_EXT',
'GL_RGB5_A1_EXT', 'GL_RGBA8_EXT', 'GL_RGB10_A2_EXT', 'GL_RGBA12_EXT',
'GL_RGBA16_EXT', 'GL_TEXTURE_RED_SIZE_EXT', 'GL_TEXTURE_GREEN_SIZE_EXT',
'GL_TEXTURE_BLUE_SIZE_EXT', 'GL_TEXTURE_ALPHA_SIZE_EXT',
'GL_TEXTURE_LUMINANCE_SIZE_EXT', 'GL_TEXTURE_INTENSITY_SIZE_EXT',
'GL_REPLACE_EXT', 'GL_PROXY_TEXTURE_1D_EXT', 'GL_PROXY_TEXTURE_2D_EXT',
'GL_TEXTURE_TOO_LARGE_EXT', 'GL_PACK_SKIP_IMAGES_EXT',
'GL_PACK_IMAGE_HEIGHT_EXT', 'GL_UNPACK_SKIP_IMAGES_EXT',
'GL_UNPACK_IMAGE_HEIGHT_EXT', 'GL_TEXTURE_3D_EXT', 'GL_PROXY_TEXTURE_3D_EXT',
'GL_TEXTURE_DEPTH_EXT', 'GL_TEXTURE_WRAP_R_EXT', 'GL_MAX_3D_TEXTURE_SIZE_EXT',
'GL_FILTER4_SGIS', 'GL_TEXTURE_FILTER4_SIZE_SGIS', 'GL_HISTOGRAM_EXT',
'GL_PROXY_HISTOGRAM_EXT', 'GL_HISTOGRAM_WIDTH_EXT', 'GL_HISTOGRAM_FORMAT_EXT',
'GL_HISTOGRAM_RED_SIZE_EXT', 'GL_HISTOGRAM_GREEN_SIZE_EXT',
'GL_HISTOGRAM_BLUE_SIZE_EXT', 'GL_HISTOGRAM_ALPHA_SIZE_EXT',
'GL_HISTOGRAM_LUMINANCE_SIZE_EXT', 'GL_HISTOGRAM_SINK_EXT', 'GL_MINMAX_EXT',
'GL_MINMAX_FORMAT_EXT', 'GL_MINMAX_SINK_EXT', 'GL_TABLE_TOO_LARGE_EXT',
'GL_CONVOLUTION_1D_EXT', 'GL_CONVOLUTION_2D_EXT', 'GL_SEPARABLE_2D_EXT',
'GL_CONVOLUTION_BORDER_MODE_EXT', 'GL_CONVOLUTION_FILTER_SCALE_EXT',
'GL_CONVOLUTION_FILTER_BIAS_EXT', 'GL_REDUCE_EXT',
'GL_CONVOLUTION_FORMAT_EXT', 'GL_CONVOLUTION_WIDTH_EXT',
'GL_CONVOLUTION_HEIGHT_EXT', 'GL_MAX_CONVOLUTION_WIDTH_EXT',
'GL_MAX_CONVOLUTION_HEIGHT_EXT', 'GL_POST_CONVOLUTION_RED_SCALE_EXT',
'GL_POST_CONVOLUTION_GREEN_SCALE_EXT', 'GL_POST_CONVOLUTION_BLUE_SCALE_EXT',
'GL_POST_CONVOLUTION_ALPHA_SCALE_EXT', 'GL_POST_CONVOLUTION_RED_BIAS_EXT',
'GL_POST_CONVOLUTION_GREEN_BIAS_EXT', 'GL_POST_CONVOLUTION_BLUE_BIAS_EXT',
'GL_POST_CONVOLUTION_ALPHA_BIAS_EXT', 'GL_COLOR_MATRIX_SGI',
'GL_COLOR_MATRIX_STACK_DEPTH_SGI', 'GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI',
'GL_POST_COLOR_MATRIX_RED_SCALE_SGI', 'GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI',
'GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI', 'GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI',
'GL_POST_COLOR_MATRIX_RED_BIAS_SGI', 'GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI',
'GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI', 'GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI',
'GL_COLOR_TABLE_SGI', 'GL_POST_CONVOLUTION_COLOR_TABLE_SGI',
'GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI', 'GL_PROXY_COLOR_TABLE_SGI',
'GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI',
'GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI', 'GL_COLOR_TABLE_SCALE_SGI',
'GL_COLOR_TABLE_BIAS_SGI', 'GL_COLOR_TABLE_FORMAT_SGI',
'GL_COLOR_TABLE_WIDTH_SGI', 'GL_COLOR_TABLE_RED_SIZE_SGI',
'GL_COLOR_TABLE_GREEN_SIZE_SGI', 'GL_COLOR_TABLE_BLUE_SIZE_SGI',
'GL_COLOR_TABLE_ALPHA_SIZE_SGI', 'GL_COLOR_TABLE_LUMINANCE_SIZE_SGI',
'GL_COLOR_TABLE_INTENSITY_SIZE_SGI', 'GL_PIXEL_TEXTURE_SGIS',
'GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS', 'GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS',
'GL_PIXEL_GROUP_COLOR_SGIS', 'GL_PIXEL_TEX_GEN_SGIX',
'GL_PIXEL_TEX_GEN_MODE_SGIX', 'GL_PACK_SKIP_VOLUMES_SGIS',
'GL_PACK_IMAGE_DEPTH_SGIS', 'GL_UNPACK_SKIP_VOLUMES_SGIS',
'GL_UNPACK_IMAGE_DEPTH_SGIS', 'GL_TEXTURE_4D_SGIS',
'GL_PROXY_TEXTURE_4D_SGIS', 'GL_TEXTURE_4DSIZE_SGIS',
'GL_TEXTURE_WRAP_Q_SGIS', 'GL_MAX_4D_TEXTURE_SIZE_SGIS',
'GL_TEXTURE_4D_BINDING_SGIS', 'GL_TEXTURE_COLOR_TABLE_SGI',
'GL_PROXY_TEXTURE_COLOR_TABLE_SGI', 'GL_CMYK_EXT', 'GL_CMYKA_EXT',
'GL_PACK_CMYK_HINT_EXT', 'GL_UNPACK_CMYK_HINT_EXT', 'GL_TEXTURE_PRIORITY_EXT',
'GL_TEXTURE_RESIDENT_EXT', 'GL_TEXTURE_1D_BINDING_EXT',
'GL_TEXTURE_2D_BINDING_EXT', 'GL_TEXTURE_3D_BINDING_EXT',
'GL_DETAIL_TEXTURE_2D_SGIS', 'GL_DETAIL_TEXTURE_2D_BINDING_SGIS',
'GL_LINEAR_DETAIL_SGIS', 'GL_LINEAR_DETAIL_ALPHA_SGIS',
'GL_LINEAR_DETAIL_COLOR_SGIS', 'GL_DETAIL_TEXTURE_LEVEL_SGIS',
'GL_DETAIL_TEXTURE_MODE_SGIS', 'GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS',
'GL_LINEAR_SHARPEN_SGIS', 'GL_LINEAR_SHARPEN_ALPHA_SGIS',
'GL_LINEAR_SHARPEN_COLOR_SGIS', 'GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS',
'GL_UNSIGNED_BYTE_3_3_2_EXT', 'GL_UNSIGNED_SHORT_4_4_4_4_EXT',
'GL_UNSIGNED_SHORT_5_5_5_1_EXT', 'GL_UNSIGNED_INT_8_8_8_8_EXT',
'GL_UNSIGNED_INT_10_10_10_2_EXT', 'GL_TEXTURE_MIN_LOD_SGIS',
'GL_TEXTURE_MAX_LOD_SGIS', 'GL_TEXTURE_BASE_LEVEL_SGIS',
'GL_TEXTURE_MAX_LEVEL_SGIS', 'GL_MULTISAMPLE_SGIS',
'GL_SAMPLE_ALPHA_TO_MASK_SGIS', 'GL_SAMPLE_ALPHA_TO_ONE_SGIS',
'GL_SAMPLE_MASK_SGIS', 'GL_1PASS_SGIS', 'GL_2PASS_0_SGIS', 'GL_2PASS_1_SGIS',
'GL_4PASS_0_SGIS', 'GL_4PASS_1_SGIS', 'GL_4PASS_2_SGIS', 'GL_4PASS_3_SGIS',
'GL_SAMPLE_BUFFERS_SGIS', 'GL_SAMPLES_SGIS', 'GL_SAMPLE_MASK_VALUE_SGIS',
'GL_SAMPLE_MASK_INVERT_SGIS', 'GL_SAMPLE_PATTERN_SGIS',
'GL_RESCALE_NORMAL_EXT', 'GL_VERTEX_ARRAY_EXT', 'GL_NORMAL_ARRAY_EXT',
'GL_COLOR_ARRAY_EXT', 'GL_INDEX_ARRAY_EXT', 'GL_TEXTURE_COORD_ARRAY_EXT',
'GL_EDGE_FLAG_ARRAY_EXT', 'GL_VERTEX_ARRAY_SIZE_EXT',
'GL_VERTEX_ARRAY_TYPE_EXT', 'GL_VERTEX_ARRAY_STRIDE_EXT',
'GL_VERTEX_ARRAY_COUNT_EXT', 'GL_NORMAL_ARRAY_TYPE_EXT',
'GL_NORMAL_ARRAY_STRIDE_EXT', 'GL_NORMAL_ARRAY_COUNT_EXT',
'GL_COLOR_ARRAY_SIZE_EXT', 'GL_COLOR_ARRAY_TYPE_EXT',
'GL_COLOR_ARRAY_STRIDE_EXT', 'GL_COLOR_ARRAY_COUNT_EXT',
'GL_INDEX_ARRAY_TYPE_EXT', 'GL_INDEX_ARRAY_STRIDE_EXT',
'GL_INDEX_ARRAY_COUNT_EXT', 'GL_TEXTURE_COORD_ARRAY_SIZE_EXT',
'GL_TEXTURE_COORD_ARRAY_TYPE_EXT', 'GL_TEXTURE_COORD_ARRAY_STRIDE_EXT',
'GL_TEXTURE_COORD_ARRAY_COUNT_EXT', 'GL_EDGE_FLAG_ARRAY_STRIDE_EXT',
'GL_EDGE_FLAG_ARRAY_COUNT_EXT', 'GL_VERTEX_ARRAY_POINTER_EXT',
'GL_NORMAL_ARRAY_POINTER_EXT', 'GL_COLOR_ARRAY_POINTER_EXT',
'GL_INDEX_ARRAY_POINTER_EXT', 'GL_TEXTURE_COORD_ARRAY_POINTER_EXT',
'GL_EDGE_FLAG_ARRAY_POINTER_EXT', 'GL_GENERATE_MIPMAP_SGIS',
'GL_GENERATE_MIPMAP_HINT_SGIS', 'GL_LINEAR_CLIPMAP_LINEAR_SGIX',
'GL_TEXTURE_CLIPMAP_CENTER_SGIX', 'GL_TEXTURE_CLIPMAP_FRAME_SGIX',
'GL_TEXTURE_CLIPMAP_OFFSET_SGIX', 'GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX',
'GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX', 'GL_TEXTURE_CLIPMAP_DEPTH_SGIX',
'GL_MAX_CLIPMAP_DEPTH_SGIX', 'GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX',
'GL_NEAREST_CLIPMAP_NEAREST_SGIX', 'GL_NEAREST_CLIPMAP_LINEAR_SGIX',
'GL_LINEAR_CLIPMAP_NEAREST_SGIX', 'GL_TEXTURE_COMPARE_SGIX',
'GL_TEXTURE_COMPARE_OPERATOR_SGIX', 'GL_TEXTURE_LEQUAL_R_SGIX',
'GL_TEXTURE_GEQUAL_R_SGIX', 'GL_CLAMP_TO_EDGE_SGIS',
'GL_CLAMP_TO_BORDER_SGIS', 'GL_FUNC_ADD_EXT', 'GL_MIN_EXT', 'GL_MAX_EXT',
'GL_BLEND_EQUATION_EXT', 'GL_FUNC_SUBTRACT_EXT',
'GL_FUNC_REVERSE_SUBTRACT_EXT', 'GL_INTERLACE_SGIX',
'GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX', 'GL_PIXEL_TILE_CACHE_INCREMENT_SGIX',
'GL_PIXEL_TILE_WIDTH_SGIX', 'GL_PIXEL_TILE_HEIGHT_SGIX',
'GL_PIXEL_TILE_GRID_WIDTH_SGIX', 'GL_PIXEL_TILE_GRID_HEIGHT_SGIX',
'GL_PIXEL_TILE_GRID_DEPTH_SGIX', 'GL_PIXEL_TILE_CACHE_SIZE_SGIX',
'GL_DUAL_ALPHA4_SGIS', 'GL_DUAL_ALPHA8_SGIS', 'GL_DUAL_ALPHA12_SGIS',
'GL_DUAL_ALPHA16_SGIS', 'GL_DUAL_LUMINANCE4_SGIS', 'GL_DUAL_LUMINANCE8_SGIS',
'GL_DUAL_LUMINANCE12_SGIS', 'GL_DUAL_LUMINANCE16_SGIS',
'GL_DUAL_INTENSITY4_SGIS', 'GL_DUAL_INTENSITY8_SGIS',
'GL_DUAL_INTENSITY12_SGIS', 'GL_DUAL_INTENSITY16_SGIS',
'GL_DUAL_LUMINANCE_ALPHA4_SGIS', 'GL_DUAL_LUMINANCE_ALPHA8_SGIS',
'GL_QUAD_ALPHA4_SGIS', 'GL_QUAD_ALPHA8_SGIS', 'GL_QUAD_LUMINANCE4_SGIS',
'GL_QUAD_LUMINANCE8_SGIS', 'GL_QUAD_INTENSITY4_SGIS',
'GL_QUAD_INTENSITY8_SGIS', 'GL_DUAL_TEXTURE_SELECT_SGIS',
'GL_QUAD_TEXTURE_SELECT_SGIS', 'GL_SPRITE_SGIX', 'GL_SPRITE_MODE_SGIX',
'GL_SPRITE_AXIS_SGIX', 'GL_SPRITE_TRANSLATION_SGIX', 'GL_SPRITE_AXIAL_SGIX',
'GL_SPRITE_OBJECT_ALIGNED_SGIX', 'GL_SPRITE_EYE_ALIGNED_SGIX',
'GL_TEXTURE_MULTI_BUFFER_HINT_SGIX', 'GL_POINT_SIZE_MIN_EXT',
'GL_POINT_SIZE_MAX_EXT', 'GL_POINT_FADE_THRESHOLD_SIZE_EXT',
'GL_DISTANCE_ATTENUATION_EXT', 'GL_POINT_SIZE_MIN_SGIS',
'GL_POINT_SIZE_MAX_SGIS', 'GL_POINT_FADE_THRESHOLD_SIZE_SGIS',
'GL_DISTANCE_ATTENUATION_SGIS', 'GL_INSTRUMENT_BUFFER_POINTER_SGIX',
'GL_INSTRUMENT_MEASUREMENTS_SGIX', 'GL_POST_TEXTURE_FILTER_BIAS_SGIX',
'GL_POST_TEXTURE_FILTER_SCALE_SGIX', 'GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX',
'GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX', 'GL_FRAMEZOOM_SGIX',
'GL_FRAMEZOOM_FACTOR_SGIX', 'GL_MAX_FRAMEZOOM_FACTOR_SGIX',
'GL_TEXTURE_DEFORMATION_BIT_SGIX', 'GL_GEOMETRY_DEFORMATION_BIT_SGIX',
'GL_GEOMETRY_DEFORMATION_SGIX', 'GL_TEXTURE_DEFORMATION_SGIX',
'GL_DEFORMATIONS_MASK_SGIX', 'GL_MAX_DEFORMATION_ORDER_SGIX',
'GL_REFERENCE_PLANE_SGIX', 'GL_REFERENCE_PLANE_EQUATION_SGIX',
'GL_DEPTH_COMPONENT16_SGIX', 'GL_DEPTH_COMPONENT24_SGIX',
'GL_DEPTH_COMPONENT32_SGIX', 'GL_FOG_FUNC_SGIS', 'GL_FOG_FUNC_POINTS_SGIS',
'GL_MAX_FOG_FUNC_POINTS_SGIS', 'GL_FOG_OFFSET_SGIX',
'GL_FOG_OFFSET_VALUE_SGIX', 'GL_IMAGE_SCALE_X_HP', 'GL_IMAGE_SCALE_Y_HP',
'GL_IMAGE_TRANSLATE_X_HP', 'GL_IMAGE_TRANSLATE_Y_HP',
'GL_IMAGE_ROTATE_ANGLE_HP', 'GL_IMAGE_ROTATE_ORIGIN_X_HP',
'GL_IMAGE_ROTATE_ORIGIN_Y_HP', 'GL_IMAGE_MAG_FILTER_HP',
'GL_IMAGE_MIN_FILTER_HP', 'GL_IMAGE_CUBIC_WEIGHT_HP', 'GL_CUBIC_HP',
'GL_AVERAGE_HP', 'GL_IMAGE_TRANSFORM_2D_HP',
'GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP',
'GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP', 'GL_IGNORE_BORDER_HP',
'GL_CONSTANT_BORDER_HP', 'GL_REPLICATE_BORDER_HP',
'GL_CONVOLUTION_BORDER_COLOR_HP', 'GL_TEXTURE_ENV_BIAS_SGIX',
'GL_VERTEX_DATA_HINT_PGI', 'GL_VERTEX_CONSISTENT_HINT_PGI',
'GL_MATERIAL_SIDE_HINT_PGI', 'GL_MAX_VERTEX_HINT_PGI', 'GL_COLOR3_BIT_PGI',
'GL_COLOR4_BIT_PGI', 'GL_EDGEFLAG_BIT_PGI', 'GL_INDEX_BIT_PGI',
'GL_MAT_AMBIENT_BIT_PGI', 'GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI',
'GL_MAT_DIFFUSE_BIT_PGI', 'GL_MAT_EMISSION_BIT_PGI',
'GL_MAT_COLOR_INDEXES_BIT_PGI', 'GL_MAT_SHININESS_BIT_PGI',
'GL_MAT_SPECULAR_BIT_PGI', 'GL_NORMAL_BIT_PGI', 'GL_TEXCOORD1_BIT_PGI',
'GL_TEXCOORD2_BIT_PGI', 'GL_TEXCOORD3_BIT_PGI', 'GL_TEXCOORD4_BIT_PGI',
'GL_VERTEX23_BIT_PGI', 'GL_VERTEX4_BIT_PGI',
'GL_PREFER_DOUBLEBUFFER_HINT_PGI', 'GL_CONSERVE_MEMORY_HINT_PGI',
'GL_RECLAIM_MEMORY_HINT_PGI', 'GL_NATIVE_GRAPHICS_HANDLE_PGI',
'GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI', 'GL_NATIVE_GRAPHICS_END_HINT_PGI',
'GL_ALWAYS_FAST_HINT_PGI', 'GL_ALWAYS_SOFT_HINT_PGI',
'GL_ALLOW_DRAW_OBJ_HINT_PGI', 'GL_ALLOW_DRAW_WIN_HINT_PGI',
'GL_ALLOW_DRAW_FRG_HINT_PGI', 'GL_ALLOW_DRAW_MEM_HINT_PGI',
'GL_STRICT_DEPTHFUNC_HINT_PGI', 'GL_STRICT_LIGHTING_HINT_PGI',
'GL_STRICT_SCISSOR_HINT_PGI', 'GL_FULL_STIPPLE_HINT_PGI',
'GL_CLIP_NEAR_HINT_PGI', 'GL_CLIP_FAR_HINT_PGI', 'GL_WIDE_LINE_HINT_PGI',
'GL_BACK_NORMALS_HINT_PGI', 'GL_COLOR_INDEX1_EXT', 'GL_COLOR_INDEX2_EXT',
'GL_COLOR_INDEX4_EXT', 'GL_COLOR_INDEX8_EXT', 'GL_COLOR_INDEX12_EXT',
'GL_COLOR_INDEX16_EXT', 'GL_TEXTURE_INDEX_SIZE_EXT',
'GL_CLIP_VOLUME_CLIPPING_HINT_EXT', 'GL_LIST_PRIORITY_SGIX',
'GL_IR_INSTRUMENT1_SGIX', 'GL_CALLIGRAPHIC_FRAGMENT_SGIX',
'GL_TEXTURE_LOD_BIAS_S_SGIX', 'GL_TEXTURE_LOD_BIAS_T_SGIX',
'GL_TEXTURE_LOD_BIAS_R_SGIX', 'GL_SHADOW_AMBIENT_SGIX',
'GL_INDEX_MATERIAL_EXT', 'GL_INDEX_MATERIAL_PARAMETER_EXT',
'GL_INDEX_MATERIAL_FACE_EXT', 'GL_INDEX_TEST_EXT', 'GL_INDEX_TEST_FUNC_EXT',
'GL_INDEX_TEST_REF_EXT', 'GL_IUI_V2F_EXT', 'GL_IUI_V3F_EXT',
'GL_IUI_N3F_V2F_EXT', 'GL_IUI_N3F_V3F_EXT', 'GL_T2F_IUI_V2F_EXT',
'GL_T2F_IUI_V3F_EXT', 'GL_T2F_IUI_N3F_V2F_EXT', 'GL_T2F_IUI_N3F_V3F_EXT',
'GL_ARRAY_ELEMENT_LOCK_FIRST_EXT', 'GL_ARRAY_ELEMENT_LOCK_COUNT_EXT',
'GL_CULL_VERTEX_EXT', 'GL_CULL_VERTEX_EYE_POSITION_EXT',
'GL_CULL_VERTEX_OBJECT_POSITION_EXT', 'GL_YCRCB_422_SGIX',
'GL_YCRCB_444_SGIX', 'GL_FRAGMENT_LIGHTING_SGIX',
'GL_FRAGMENT_COLOR_MATERIAL_SGIX', 'GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX',
'GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX', 'GL_MAX_FRAGMENT_LIGHTS_SGIX',
'GL_MAX_ACTIVE_LIGHTS_SGIX', 'GL_CURRENT_RASTER_NORMAL_SGIX',
'GL_LIGHT_ENV_MODE_SGIX', 'GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX',
'GL_FRAGMENT_LIGHT0_SGIX', 'GL_FRAGMENT_LIGHT1_SGIX',
'GL_FRAGMENT_LIGHT2_SGIX', 'GL_FRAGMENT_LIGHT3_SGIX',
'GL_FRAGMENT_LIGHT4_SGIX', 'GL_FRAGMENT_LIGHT5_SGIX',
'GL_FRAGMENT_LIGHT6_SGIX', 'GL_FRAGMENT_LIGHT7_SGIX',
'GL_RASTER_POSITION_UNCLIPPED_IBM', 'GL_TEXTURE_LIGHTING_MODE_HP',
'GL_TEXTURE_POST_SPECULAR_HP', 'GL_TEXTURE_PRE_SPECULAR_HP',
'GL_MAX_ELEMENTS_VERTICES_EXT', 'GL_MAX_ELEMENTS_INDICES_EXT', 'GL_PHONG_WIN',
'GL_PHONG_HINT_WIN', 'GL_FOG_SPECULAR_TEXTURE_WIN',
'GL_FRAGMENT_MATERIAL_EXT', 'GL_FRAGMENT_NORMAL_EXT', 'GL_FRAGMENT_COLOR_EXT',
'GL_ATTENUATION_EXT', 'GL_SHADOW_ATTENUATION_EXT',
'GL_TEXTURE_APPLICATION_MODE_EXT', 'GL_TEXTURE_LIGHT_EXT',
'GL_TEXTURE_MATERIAL_FACE_EXT', 'GL_TEXTURE_MATERIAL_PARAMETER_EXT',
'GL_ALPHA_MIN_SGIX', 'GL_ALPHA_MAX_SGIX', 'GL_PIXEL_TEX_GEN_Q_CEILING_SGIX',
'GL_PIXEL_TEX_GEN_Q_ROUND_SGIX', 'GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX',
'GL_PIXEL_TEX_GEN_ALPHA_REPLACE_SGIX',
'GL_PIXEL_TEX_GEN_ALPHA_NO_REPLACE_SGIX', 'GL_PIXEL_TEX_GEN_ALPHA_LS_SGIX',
'GL_PIXEL_TEX_GEN_ALPHA_MS_SGIX', 'GL_BGR_EXT', 'GL_BGRA_EXT',
'GL_ASYNC_MARKER_SGIX', 'GL_ASYNC_TEX_IMAGE_SGIX',
'GL_ASYNC_DRAW_PIXELS_SGIX', 'GL_ASYNC_READ_PIXELS_SGIX',
'GL_MAX_ASYNC_TEX_IMAGE_SGIX', 'GL_MAX_ASYNC_DRAW_PIXELS_SGIX',
'GL_MAX_ASYNC_READ_PIXELS_SGIX', 'GL_ASYNC_HISTOGRAM_SGIX',
'GL_MAX_ASYNC_HISTOGRAM_SGIX', 'GL_PARALLEL_ARRAYS_INTEL',
'GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL', 'GL_OCCLUSION_TEST_HP',
'GL_OCCLUSION_TEST_RESULT_HP', 'GL_PIXEL_TRANSFORM_2D_EXT',
'GL_PIXEL_MAG_FILTER_EXT', 'GL_PIXEL_MIN_FILTER_EXT',
'GL_PIXEL_CUBIC_WEIGHT_EXT', 'GL_CUBIC_EXT', 'GL_AVERAGE_EXT',
'GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT',
'GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT',
'GL_PIXEL_TRANSFORM_2D_MATRIX_EXT', 'GL_SHARED_TEXTURE_PALETTE_EXT',
'GL_LIGHT_MODEL_COLOR_CONTROL_EXT', 'GL_SINGLE_COLOR_EXT',
'GL_SEPARATE_SPECULAR_COLOR_EXT', 'GL_COLOR_SUM_EXT',
'GL_CURRENT_SECONDARY_COLOR_EXT', 'GL_SECONDARY_COLOR_ARRAY_SIZE_EXT',
'GL_SECONDARY_COLOR_ARRAY_TYPE_EXT', 'GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT',
'GL_SECONDARY_COLOR_ARRAY_POINTER_EXT', 'GL_SECONDARY_COLOR_ARRAY_EXT',
'GL_PERTURB_EXT', 'GL_TEXTURE_NORMAL_EXT', 'GL_FOG_COORDINATE_SOURCE_EXT',
'GL_FOG_COORDINATE_EXT', 'GL_FRAGMENT_DEPTH_EXT',
'GL_CURRENT_FOG_COORDINATE_EXT', 'GL_FOG_COORDINATE_ARRAY_TYPE_EXT',
'GL_FOG_COORDINATE_ARRAY_STRIDE_EXT', 'GL_FOG_COORDINATE_ARRAY_POINTER_EXT',
'GL_FOG_COORDINATE_ARRAY_EXT', 'GL_SCREEN_COORDINATES_REND',
'GL_INVERTED_SCREEN_W_REND', 'GL_TANGENT_ARRAY_EXT', 'GL_BINORMAL_ARRAY_EXT',
'GL_CURRENT_TANGENT_EXT', 'GL_CURRENT_BINORMAL_EXT',
'GL_TANGENT_ARRAY_TYPE_EXT', 'GL_TANGENT_ARRAY_STRIDE_EXT',
'GL_BINORMAL_ARRAY_TYPE_EXT', 'GL_BINORMAL_ARRAY_STRIDE_EXT',
'GL_TANGENT_ARRAY_POINTER_EXT', 'GL_BINORMAL_ARRAY_POINTER_EXT',
'GL_MAP1_TANGENT_EXT', 'GL_MAP2_TANGENT_EXT', 'GL_MAP1_BINORMAL_EXT',
'GL_MAP2_BINORMAL_EXT', 'GL_COMBINE_EXT', 'GL_COMBINE_RGB_EXT',
'GL_COMBINE_ALPHA_EXT', 'GL_RGB_SCALE_EXT', 'GL_ADD_SIGNED_EXT',
'GL_INTERPOLATE_EXT', 'GL_CONSTANT_EXT', 'GL_PRIMARY_COLOR_EXT',
'GL_PREVIOUS_EXT', 'GL_SOURCE0_RGB_EXT', 'GL_SOURCE1_RGB_EXT',
'GL_SOURCE2_RGB_EXT', 'GL_SOURCE0_ALPHA_EXT', 'GL_SOURCE1_ALPHA_EXT',
'GL_SOURCE2_ALPHA_EXT', 'GL_OPERAND0_RGB_EXT', 'GL_OPERAND1_RGB_EXT',
'GL_OPERAND2_RGB_EXT', 'GL_OPERAND0_ALPHA_EXT', 'GL_OPERAND1_ALPHA_EXT',
'GL_OPERAND2_ALPHA_EXT', 'GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE',
'GL_TRANSFORM_HINT_APPLE', 'GL_FOG_SCALE_SGIX', 'GL_FOG_SCALE_VALUE_SGIX',
'GL_UNPACK_CONSTANT_DATA_SUNX', 'GL_TEXTURE_CONSTANT_DATA_SUNX',
'GL_GLOBAL_ALPHA_SUN', 'GL_GLOBAL_ALPHA_FACTOR_SUN', 'GL_RESTART_SUN',
'GL_REPLACE_MIDDLE_SUN', 'GL_REPLACE_OLDEST_SUN', 'GL_TRIANGLE_LIST_SUN',
'GL_REPLACEMENT_CODE_SUN', 'GL_REPLACEMENT_CODE_ARRAY_SUN',
'GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN', 'GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN',
'GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN', 'GL_R1UI_V3F_SUN',
'GL_R1UI_C4UB_V3F_SUN', 'GL_R1UI_C3F_V3F_SUN', 'GL_R1UI_N3F_V3F_SUN',
'GL_R1UI_C4F_N3F_V3F_SUN', 'GL_R1UI_T2F_V3F_SUN', 'GL_R1UI_T2F_N3F_V3F_SUN',
'GL_R1UI_T2F_C4F_N3F_V3F_SUN', 'GL_BLEND_DST_RGB_EXT', 'GL_BLEND_SRC_RGB_EXT',
'GL_BLEND_DST_ALPHA_EXT', 'GL_BLEND_SRC_ALPHA_EXT', 'GL_RED_MIN_CLAMP_INGR',
'GL_GREEN_MIN_CLAMP_INGR', 'GL_BLUE_MIN_CLAMP_INGR',
'GL_ALPHA_MIN_CLAMP_INGR', 'GL_RED_MAX_CLAMP_INGR', 'GL_GREEN_MAX_CLAMP_INGR',
'GL_BLUE_MAX_CLAMP_INGR', 'GL_ALPHA_MAX_CLAMP_INGR', 'GL_INTERLACE_READ_INGR',
'GL_INCR_WRAP_EXT', 'GL_DECR_WRAP_EXT', 'GL_422_EXT', 'GL_422_REV_EXT',
'GL_422_AVERAGE_EXT', 'GL_422_REV_AVERAGE_EXT', 'GL_NORMAL_MAP_NV',
'GL_REFLECTION_MAP_NV', 'GL_NORMAL_MAP_EXT', 'GL_REFLECTION_MAP_EXT',
'GL_TEXTURE_CUBE_MAP_EXT', 'GL_TEXTURE_BINDING_CUBE_MAP_EXT',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT', 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT',
'GL_PROXY_TEXTURE_CUBE_MAP_EXT', 'GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT',
'GL_WRAP_BORDER_SUN', 'GL_MAX_TEXTURE_LOD_BIAS_EXT',
'GL_TEXTURE_FILTER_CONTROL_EXT', 'GL_TEXTURE_LOD_BIAS_EXT',
'GL_TEXTURE_MAX_ANISOTROPY_EXT', 'GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT',
'GL_MODELVIEW0_STACK_DEPTH_EXT', 'GL_MODELVIEW1_STACK_DEPTH_EXT',
'GL_MODELVIEW0_MATRIX_EXT', 'GL_MODELVIEW1_MATRIX_EXT',
'GL_VERTEX_WEIGHTING_EXT', 'GL_MODELVIEW0_EXT', 'GL_MODELVIEW1_EXT',
'GL_CURRENT_VERTEX_WEIGHT_EXT', 'GL_VERTEX_WEIGHT_ARRAY_EXT',
'GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT', 'GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT',
'GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT', 'GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT',
'GL_MAX_SHININESS_NV', 'GL_MAX_SPOT_EXPONENT_NV', 'GL_VERTEX_ARRAY_RANGE_NV',
'GL_VERTEX_ARRAY_RANGE_LENGTH_NV', 'GL_VERTEX_ARRAY_RANGE_VALID_NV',
'GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV', 'GL_VERTEX_ARRAY_RANGE_POINTER_NV',
'GL_REGISTER_COMBINERS_NV', 'GL_VARIABLE_A_NV', 'GL_VARIABLE_B_NV',
'GL_VARIABLE_C_NV', 'GL_VARIABLE_D_NV', 'GL_VARIABLE_E_NV',
'GL_VARIABLE_F_NV', 'GL_VARIABLE_G_NV', 'GL_CONSTANT_COLOR0_NV',
'GL_CONSTANT_COLOR1_NV', 'GL_PRIMARY_COLOR_NV', 'GL_SECONDARY_COLOR_NV',
'GL_SPARE0_NV', 'GL_SPARE1_NV', 'GL_DISCARD_NV', 'GL_E_TIMES_F_NV',
'GL_SPARE0_PLUS_SECONDARY_COLOR_NV', 'GL_UNSIGNED_IDENTITY_NV',
'GL_UNSIGNED_INVERT_NV', 'GL_EXPAND_NORMAL_NV', 'GL_EXPAND_NEGATE_NV',
'GL_HALF_BIAS_NORMAL_NV', 'GL_HALF_BIAS_NEGATE_NV', 'GL_SIGNED_IDENTITY_NV',
'GL_SIGNED_NEGATE_NV', 'GL_SCALE_BY_TWO_NV', 'GL_SCALE_BY_FOUR_NV',
'GL_SCALE_BY_ONE_HALF_NV', 'GL_BIAS_BY_NEGATIVE_ONE_HALF_NV',
'GL_COMBINER_INPUT_NV', 'GL_COMBINER_MAPPING_NV',
'GL_COMBINER_COMPONENT_USAGE_NV', 'GL_COMBINER_AB_DOT_PRODUCT_NV',
'GL_COMBINER_CD_DOT_PRODUCT_NV', 'GL_COMBINER_MUX_SUM_NV',
'GL_COMBINER_SCALE_NV', 'GL_COMBINER_BIAS_NV', 'GL_COMBINER_AB_OUTPUT_NV',
'GL_COMBINER_CD_OUTPUT_NV', 'GL_COMBINER_SUM_OUTPUT_NV',
'GL_MAX_GENERAL_COMBINERS_NV', 'GL_NUM_GENERAL_COMBINERS_NV',
'GL_COLOR_SUM_CLAMP_NV', 'GL_COMBINER0_NV', 'GL_COMBINER1_NV',
'GL_COMBINER2_NV', 'GL_COMBINER3_NV', 'GL_COMBINER4_NV', 'GL_COMBINER5_NV',
'GL_COMBINER6_NV', 'GL_COMBINER7_NV', 'GL_FOG_DISTANCE_MODE_NV',
'GL_EYE_RADIAL_NV', 'GL_EYE_PLANE_ABSOLUTE_NV', 'GL_EMBOSS_LIGHT_NV',
'GL_EMBOSS_CONSTANT_NV', 'GL_EMBOSS_MAP_NV', 'GL_COMBINE4_NV',
'GL_SOURCE3_RGB_NV', 'GL_SOURCE3_ALPHA_NV', 'GL_OPERAND3_RGB_NV',
'GL_OPERAND3_ALPHA_NV', 'GL_COMPRESSED_RGB_S3TC_DXT1_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT1_EXT', 'GL_COMPRESSED_RGBA_S3TC_DXT3_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT5_EXT', 'GL_CULL_VERTEX_IBM',
'GL_VERTEX_ARRAY_LIST_IBM', 'GL_NORMAL_ARRAY_LIST_IBM',
'GL_COLOR_ARRAY_LIST_IBM', 'GL_INDEX_ARRAY_LIST_IBM',
'GL_TEXTURE_COORD_ARRAY_LIST_IBM', 'GL_EDGE_FLAG_ARRAY_LIST_IBM',
'GL_FOG_COORDINATE_ARRAY_LIST_IBM', 'GL_SECONDARY_COLOR_ARRAY_LIST_IBM',
'GL_VERTEX_ARRAY_LIST_STRIDE_IBM', 'GL_NORMAL_ARRAY_LIST_STRIDE_IBM',
'GL_COLOR_ARRAY_LIST_STRIDE_IBM', 'GL_INDEX_ARRAY_LIST_STRIDE_IBM',
'GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM',
'GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM',
'GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM',
'GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM', 'GL_PACK_SUBSAMPLE_RATE_SGIX',
'GL_UNPACK_SUBSAMPLE_RATE_SGIX', 'GL_PIXEL_SUBSAMPLE_4444_SGIX',
'GL_PIXEL_SUBSAMPLE_2424_SGIX', 'GL_PIXEL_SUBSAMPLE_4242_SGIX',
'GL_YCRCB_SGIX', 'GL_YCRCBA_SGIX', 'GL_DEPTH_PASS_INSTRUMENT_SGIX',
'GL_DEPTH_PASS_INSTRUMENT_COUNTERS_SGIX', 'GL_DEPTH_PASS_INSTRUMENT_MAX_SGIX',
'GL_COMPRESSED_RGB_FXT1_3DFX', 'GL_COMPRESSED_RGBA_FXT1_3DFX',
'GL_MULTISAMPLE_3DFX', 'GL_SAMPLE_BUFFERS_3DFX', 'GL_SAMPLES_3DFX',
'GL_MULTISAMPLE_BIT_3DFX', 'GL_MULTISAMPLE_EXT',
'GL_SAMPLE_ALPHA_TO_MASK_EXT', 'GL_SAMPLE_ALPHA_TO_ONE_EXT',
'GL_SAMPLE_MASK_EXT', 'GL_1PASS_EXT', 'GL_2PASS_0_EXT', 'GL_2PASS_1_EXT',
'GL_4PASS_0_EXT', 'GL_4PASS_1_EXT', 'GL_4PASS_2_EXT', 'GL_4PASS_3_EXT',
'GL_SAMPLE_BUFFERS_EXT', 'GL_SAMPLES_EXT', 'GL_SAMPLE_MASK_VALUE_EXT',
'GL_SAMPLE_MASK_INVERT_EXT', 'GL_SAMPLE_PATTERN_EXT',
'GL_MULTISAMPLE_BIT_EXT', 'GL_VERTEX_PRECLIP_SGIX',
'GL_VERTEX_PRECLIP_HINT_SGIX', 'GL_CONVOLUTION_HINT_SGIX',
'GL_PACK_RESAMPLE_SGIX', 'GL_UNPACK_RESAMPLE_SGIX',
'GL_RESAMPLE_REPLICATE_SGIX', 'GL_RESAMPLE_ZERO_FILL_SGIX',
'GL_RESAMPLE_DECIMATE_SGIX', 'GL_EYE_DISTANCE_TO_POINT_SGIS',
'GL_OBJECT_DISTANCE_TO_POINT_SGIS', 'GL_EYE_DISTANCE_TO_LINE_SGIS',
'GL_OBJECT_DISTANCE_TO_LINE_SGIS', 'GL_EYE_POINT_SGIS',
'GL_OBJECT_POINT_SGIS', 'GL_EYE_LINE_SGIS', 'GL_OBJECT_LINE_SGIS',
'GL_TEXTURE_COLOR_WRITEMASK_SGIS', 'GL_DOT3_RGB_EXT', 'GL_DOT3_RGBA_EXT',
'GL_MIRROR_CLAMP_ATI', 'GL_MIRROR_CLAMP_TO_EDGE_ATI', 'GL_ALL_COMPLETED_NV',
'GL_FENCE_STATUS_NV', 'GL_FENCE_CONDITION_NV', 'GL_MIRRORED_REPEAT_IBM',
'GL_EVAL_2D_NV', 'GL_EVAL_TRIANGULAR_2D_NV', 'GL_MAP_TESSELLATION_NV',
'GL_MAP_ATTRIB_U_ORDER_NV', 'GL_MAP_ATTRIB_V_ORDER_NV',
'GL_EVAL_FRACTIONAL_TESSELLATION_NV', 'GL_EVAL_VERTEX_ATTRIB0_NV',
'GL_EVAL_VERTEX_ATTRIB1_NV', 'GL_EVAL_VERTEX_ATTRIB2_NV',
'GL_EVAL_VERTEX_ATTRIB3_NV', 'GL_EVAL_VERTEX_ATTRIB4_NV',
'GL_EVAL_VERTEX_ATTRIB5_NV', 'GL_EVAL_VERTEX_ATTRIB6_NV',
'GL_EVAL_VERTEX_ATTRIB7_NV', 'GL_EVAL_VERTEX_ATTRIB8_NV',
'GL_EVAL_VERTEX_ATTRIB9_NV', 'GL_EVAL_VERTEX_ATTRIB10_NV',
'GL_EVAL_VERTEX_ATTRIB11_NV', 'GL_EVAL_VERTEX_ATTRIB12_NV',
'GL_EVAL_VERTEX_ATTRIB13_NV', 'GL_EVAL_VERTEX_ATTRIB14_NV',
'GL_EVAL_VERTEX_ATTRIB15_NV', 'GL_MAX_MAP_TESSELLATION_NV',
'GL_MAX_RATIONAL_EVAL_ORDER_NV', 'GL_DEPTH_STENCIL_NV',
'GL_UNSIGNED_INT_24_8_NV', 'GL_PER_STAGE_CONSTANTS_NV',
'GL_TEXTURE_RECTANGLE_NV', 'GL_TEXTURE_BINDING_RECTANGLE_NV',
'GL_PROXY_TEXTURE_RECTANGLE_NV', 'GL_MAX_RECTANGLE_TEXTURE_SIZE_NV',
'GL_OFFSET_TEXTURE_RECTANGLE_NV', 'GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV',
'GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV',
'GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV', 'GL_UNSIGNED_INT_S8_S8_8_8_NV',
'GL_UNSIGNED_INT_8_8_S8_S8_REV_NV', 'GL_DSDT_MAG_INTENSITY_NV',
'GL_SHADER_CONSISTENT_NV', 'GL_TEXTURE_SHADER_NV', 'GL_SHADER_OPERATION_NV',
'GL_CULL_MODES_NV', 'GL_OFFSET_TEXTURE_MATRIX_NV',
'GL_OFFSET_TEXTURE_SCALE_NV', 'GL_OFFSET_TEXTURE_BIAS_NV',
'GL_OFFSET_TEXTURE_2D_MATRIX_NV', 'GL_OFFSET_TEXTURE_2D_SCALE_NV',
'GL_OFFSET_TEXTURE_2D_BIAS_NV', 'GL_PREVIOUS_TEXTURE_INPUT_NV',
'GL_CONST_EYE_NV', 'GL_PASS_THROUGH_NV', 'GL_CULL_FRAGMENT_NV',
'GL_OFFSET_TEXTURE_2D_NV', 'GL_DEPENDENT_AR_TEXTURE_2D_NV',
'GL_DEPENDENT_GB_TEXTURE_2D_NV', 'GL_DOT_PRODUCT_NV',
'GL_DOT_PRODUCT_DEPTH_REPLACE_NV', 'GL_DOT_PRODUCT_TEXTURE_2D_NV',
'GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV', 'GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV',
'GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV',
'GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV', 'GL_HILO_NV', 'GL_DSDT_NV',
'GL_DSDT_MAG_NV', 'GL_DSDT_MAG_VIB_NV', 'GL_HILO16_NV', 'GL_SIGNED_HILO_NV',
'GL_SIGNED_HILO16_NV', 'GL_SIGNED_RGBA_NV', 'GL_SIGNED_RGBA8_NV',
'GL_SIGNED_RGB_NV', 'GL_SIGNED_RGB8_NV', 'GL_SIGNED_LUMINANCE_NV',
'GL_SIGNED_LUMINANCE8_NV', 'GL_SIGNED_LUMINANCE_ALPHA_NV',
'GL_SIGNED_LUMINANCE8_ALPHA8_NV', 'GL_SIGNED_ALPHA_NV', 'GL_SIGNED_ALPHA8_NV',
'GL_SIGNED_INTENSITY_NV', 'GL_SIGNED_INTENSITY8_NV', 'GL_DSDT8_NV',
'GL_DSDT8_MAG8_NV', 'GL_DSDT8_MAG8_INTENSITY8_NV',
'GL_SIGNED_RGB_UNSIGNED_ALPHA_NV', 'GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV',
'GL_HI_SCALE_NV', 'GL_LO_SCALE_NV', 'GL_DS_SCALE_NV', 'GL_DT_SCALE_NV',
'GL_MAGNITUDE_SCALE_NV', 'GL_VIBRANCE_SCALE_NV', 'GL_HI_BIAS_NV',
'GL_LO_BIAS_NV', 'GL_DS_BIAS_NV', 'GL_DT_BIAS_NV', 'GL_MAGNITUDE_BIAS_NV',
'GL_VIBRANCE_BIAS_NV', 'GL_TEXTURE_BORDER_VALUES_NV', 'GL_TEXTURE_HI_SIZE_NV',
'GL_TEXTURE_LO_SIZE_NV', 'GL_TEXTURE_DS_SIZE_NV', 'GL_TEXTURE_DT_SIZE_NV',
'GL_TEXTURE_MAG_SIZE_NV', 'GL_DOT_PRODUCT_TEXTURE_3D_NV',
'GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV', 'GL_VERTEX_PROGRAM_NV',
'GL_VERTEX_STATE_PROGRAM_NV', 'GL_ATTRIB_ARRAY_SIZE_NV',
'GL_ATTRIB_ARRAY_STRIDE_NV', 'GL_ATTRIB_ARRAY_TYPE_NV',
'GL_CURRENT_ATTRIB_NV', 'GL_PROGRAM_LENGTH_NV', 'GL_PROGRAM_STRING_NV',
'GL_MODELVIEW_PROJECTION_NV', 'GL_IDENTITY_NV', 'GL_INVERSE_NV',
'GL_TRANSPOSE_NV', 'GL_INVERSE_TRANSPOSE_NV',
'GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV', 'GL_MAX_TRACK_MATRICES_NV',
'GL_MATRIX0_NV', 'GL_MATRIX1_NV', 'GL_MATRIX2_NV', 'GL_MATRIX3_NV',
'GL_MATRIX4_NV', 'GL_MATRIX5_NV', 'GL_MATRIX6_NV', 'GL_MATRIX7_NV',
'GL_CURRENT_MATRIX_STACK_DEPTH_NV', 'GL_CURRENT_MATRIX_NV',
'GL_VERTEX_PROGRAM_POINT_SIZE_NV', 'GL_VERTEX_PROGRAM_TWO_SIDE_NV',
'GL_PROGRAM_PARAMETER_NV', 'GL_ATTRIB_ARRAY_POINTER_NV',
'GL_PROGRAM_TARGET_NV', 'GL_PROGRAM_RESIDENT_NV', 'GL_TRACK_MATRIX_NV',
'GL_TRACK_MATRIX_TRANSFORM_NV', 'GL_VERTEX_PROGRAM_BINDING_NV',
'GL_PROGRAM_ERROR_POSITION_NV', 'GL_VERTEX_ATTRIB_ARRAY0_NV',
'GL_VERTEX_ATTRIB_ARRAY1_NV', 'GL_VERTEX_ATTRIB_ARRAY2_NV',
'GL_VERTEX_ATTRIB_ARRAY3_NV', 'GL_VERTEX_ATTRIB_ARRAY4_NV',
'GL_VERTEX_ATTRIB_ARRAY5_NV', 'GL_VERTEX_ATTRIB_ARRAY6_NV',
'GL_VERTEX_ATTRIB_ARRAY7_NV', 'GL_VERTEX_ATTRIB_ARRAY8_NV',
'GL_VERTEX_ATTRIB_ARRAY9_NV', 'GL_VERTEX_ATTRIB_ARRAY10_NV',
'GL_VERTEX_ATTRIB_ARRAY11_NV', 'GL_VERTEX_ATTRIB_ARRAY12_NV',
'GL_VERTEX_ATTRIB_ARRAY13_NV', 'GL_VERTEX_ATTRIB_ARRAY14_NV',
'GL_VERTEX_ATTRIB_ARRAY15_NV', 'GL_MAP1_VERTEX_ATTRIB0_4_NV',
'GL_MAP1_VERTEX_ATTRIB1_4_NV', 'GL_MAP1_VERTEX_ATTRIB2_4_NV',
'GL_MAP1_VERTEX_ATTRIB3_4_NV', 'GL_MAP1_VERTEX_ATTRIB4_4_NV',
'GL_MAP1_VERTEX_ATTRIB5_4_NV', 'GL_MAP1_VERTEX_ATTRIB6_4_NV',
'GL_MAP1_VERTEX_ATTRIB7_4_NV', 'GL_MAP1_VERTEX_ATTRIB8_4_NV',
'GL_MAP1_VERTEX_ATTRIB9_4_NV', 'GL_MAP1_VERTEX_ATTRIB10_4_NV',
'GL_MAP1_VERTEX_ATTRIB11_4_NV', 'GL_MAP1_VERTEX_ATTRIB12_4_NV',
'GL_MAP1_VERTEX_ATTRIB13_4_NV', 'GL_MAP1_VERTEX_ATTRIB14_4_NV',
'GL_MAP1_VERTEX_ATTRIB15_4_NV', 'GL_MAP2_VERTEX_ATTRIB0_4_NV',
'GL_MAP2_VERTEX_ATTRIB1_4_NV', 'GL_MAP2_VERTEX_ATTRIB2_4_NV',
'GL_MAP2_VERTEX_ATTRIB3_4_NV', 'GL_MAP2_VERTEX_ATTRIB4_4_NV',
'GL_MAP2_VERTEX_ATTRIB5_4_NV', 'GL_MAP2_VERTEX_ATTRIB6_4_NV',
'GL_MAP2_VERTEX_ATTRIB7_4_NV', 'GL_MAP2_VERTEX_ATTRIB8_4_NV',
'GL_MAP2_VERTEX_ATTRIB9_4_NV', 'GL_MAP2_VERTEX_ATTRIB10_4_NV',
'GL_MAP2_VERTEX_ATTRIB11_4_NV', 'GL_MAP2_VERTEX_ATTRIB12_4_NV',
'GL_MAP2_VERTEX_ATTRIB13_4_NV', 'GL_MAP2_VERTEX_ATTRIB14_4_NV',
'GL_MAP2_VERTEX_ATTRIB15_4_NV', 'GL_TEXTURE_MAX_CLAMP_S_SGIX',
'GL_TEXTURE_MAX_CLAMP_T_SGIX', 'GL_TEXTURE_MAX_CLAMP_R_SGIX',
'GL_SCALEBIAS_HINT_SGIX', 'GL_INTERLACE_OML', 'GL_INTERLACE_READ_OML',
'GL_FORMAT_SUBSAMPLE_24_24_OML', 'GL_FORMAT_SUBSAMPLE_244_244_OML',
'GL_PACK_RESAMPLE_OML', 'GL_UNPACK_RESAMPLE_OML', 'GL_RESAMPLE_REPLICATE_OML',
'GL_RESAMPLE_ZERO_FILL_OML', 'GL_RESAMPLE_AVERAGE_OML',
'GL_RESAMPLE_DECIMATE_OML', 'GL_DEPTH_STENCIL_TO_RGBA_NV',
'GL_DEPTH_STENCIL_TO_BGRA_NV', 'GL_BUMP_ROT_MATRIX_ATI',
'GL_BUMP_ROT_MATRIX_SIZE_ATI', 'GL_BUMP_NUM_TEX_UNITS_ATI',
'GL_BUMP_TEX_UNITS_ATI', 'GL_DUDV_ATI', 'GL_DU8DV8_ATI', 'GL_BUMP_ENVMAP_ATI',
'GL_BUMP_TARGET_ATI', 'GL_FRAGMENT_SHADER_ATI', 'GL_REG_0_ATI',
'GL_REG_1_ATI', 'GL_REG_2_ATI', 'GL_REG_3_ATI', 'GL_REG_4_ATI',
'GL_REG_5_ATI', 'GL_REG_6_ATI', 'GL_REG_7_ATI', 'GL_REG_8_ATI',
'GL_REG_9_ATI', 'GL_REG_10_ATI', 'GL_REG_11_ATI', 'GL_REG_12_ATI',
'GL_REG_13_ATI', 'GL_REG_14_ATI', 'GL_REG_15_ATI', 'GL_REG_16_ATI',
'GL_REG_17_ATI', 'GL_REG_18_ATI', 'GL_REG_19_ATI', 'GL_REG_20_ATI',
'GL_REG_21_ATI', 'GL_REG_22_ATI', 'GL_REG_23_ATI', 'GL_REG_24_ATI',
'GL_REG_25_ATI', 'GL_REG_26_ATI', 'GL_REG_27_ATI', 'GL_REG_28_ATI',
'GL_REG_29_ATI', 'GL_REG_30_ATI', 'GL_REG_31_ATI', 'GL_CON_0_ATI',
'GL_CON_1_ATI', 'GL_CON_2_ATI', 'GL_CON_3_ATI', 'GL_CON_4_ATI',
'GL_CON_5_ATI', 'GL_CON_6_ATI', 'GL_CON_7_ATI', 'GL_CON_8_ATI',
'GL_CON_9_ATI', 'GL_CON_10_ATI', 'GL_CON_11_ATI', 'GL_CON_12_ATI',
'GL_CON_13_ATI', 'GL_CON_14_ATI', 'GL_CON_15_ATI', 'GL_CON_16_ATI',
'GL_CON_17_ATI', 'GL_CON_18_ATI', 'GL_CON_19_ATI', 'GL_CON_20_ATI',
'GL_CON_21_ATI', 'GL_CON_22_ATI', 'GL_CON_23_ATI', 'GL_CON_24_ATI',
'GL_CON_25_ATI', 'GL_CON_26_ATI', 'GL_CON_27_ATI', 'GL_CON_28_ATI',
'GL_CON_29_ATI', 'GL_CON_30_ATI', 'GL_CON_31_ATI', 'GL_MOV_ATI', 'GL_ADD_ATI',
'GL_MUL_ATI', 'GL_SUB_ATI', 'GL_DOT3_ATI', 'GL_DOT4_ATI', 'GL_MAD_ATI',
'GL_LERP_ATI', 'GL_CND_ATI', 'GL_CND0_ATI', 'GL_DOT2_ADD_ATI',
'GL_SECONDARY_INTERPOLATOR_ATI', 'GL_NUM_FRAGMENT_REGISTERS_ATI',
'GL_NUM_FRAGMENT_CONSTANTS_ATI', 'GL_NUM_PASSES_ATI',
'GL_NUM_INSTRUCTIONS_PER_PASS_ATI', 'GL_NUM_INSTRUCTIONS_TOTAL_ATI',
'GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI', 'GL_NUM_LOOPBACK_COMPONENTS_ATI',
'GL_COLOR_ALPHA_PAIRING_ATI', 'GL_SWIZZLE_STR_ATI', 'GL_SWIZZLE_STQ_ATI',
'GL_SWIZZLE_STR_DR_ATI', 'GL_SWIZZLE_STQ_DQ_ATI', 'GL_SWIZZLE_STRQ_ATI',
'GL_SWIZZLE_STRQ_DQ_ATI', 'GL_RED_BIT_ATI', 'GL_GREEN_BIT_ATI',
'GL_BLUE_BIT_ATI', 'GL_2X_BIT_ATI', 'GL_4X_BIT_ATI', 'GL_8X_BIT_ATI',
'GL_HALF_BIT_ATI', 'GL_QUARTER_BIT_ATI', 'GL_EIGHTH_BIT_ATI',
'GL_SATURATE_BIT_ATI', 'GL_COMP_BIT_ATI', 'GL_NEGATE_BIT_ATI',
'GL_BIAS_BIT_ATI', 'GL_PN_TRIANGLES_ATI',
'GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI', 'GL_PN_TRIANGLES_POINT_MODE_ATI',
'GL_PN_TRIANGLES_NORMAL_MODE_ATI', 'GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI',
'GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI',
'GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI',
'GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI',
'GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI', 'GL_STATIC_ATI',
'GL_DYNAMIC_ATI', 'GL_PRESERVE_ATI', 'GL_DISCARD_ATI',
'GL_OBJECT_BUFFER_SIZE_ATI', 'GL_OBJECT_BUFFER_USAGE_ATI',
'GL_ARRAY_OBJECT_BUFFER_ATI', 'GL_ARRAY_OBJECT_OFFSET_ATI',
'GL_VERTEX_SHADER_EXT', 'GL_VERTEX_SHADER_BINDING_EXT', 'GL_OP_INDEX_EXT',
'GL_OP_NEGATE_EXT', 'GL_OP_DOT3_EXT', 'GL_OP_DOT4_EXT', 'GL_OP_MUL_EXT',
'GL_OP_ADD_EXT', 'GL_OP_MADD_EXT', 'GL_OP_FRAC_EXT', 'GL_OP_MAX_EXT',
'GL_OP_MIN_EXT', 'GL_OP_SET_GE_EXT', 'GL_OP_SET_LT_EXT', 'GL_OP_CLAMP_EXT',
'GL_OP_FLOOR_EXT', 'GL_OP_ROUND_EXT', 'GL_OP_EXP_BASE_2_EXT',
'GL_OP_LOG_BASE_2_EXT', 'GL_OP_POWER_EXT', 'GL_OP_RECIP_EXT',
'GL_OP_RECIP_SQRT_EXT', 'GL_OP_SUB_EXT', 'GL_OP_CROSS_PRODUCT_EXT',
'GL_OP_MULTIPLY_MATRIX_EXT', 'GL_OP_MOV_EXT', 'GL_OUTPUT_VERTEX_EXT',
'GL_OUTPUT_COLOR0_EXT', 'GL_OUTPUT_COLOR1_EXT',
'GL_OUTPUT_TEXTURE_COORD0_EXT', 'GL_OUTPUT_TEXTURE_COORD1_EXT',
'GL_OUTPUT_TEXTURE_COORD2_EXT', 'GL_OUTPUT_TEXTURE_COORD3_EXT',
'GL_OUTPUT_TEXTURE_COORD4_EXT', 'GL_OUTPUT_TEXTURE_COORD5_EXT',
'GL_OUTPUT_TEXTURE_COORD6_EXT', 'GL_OUTPUT_TEXTURE_COORD7_EXT',
'GL_OUTPUT_TEXTURE_COORD8_EXT', 'GL_OUTPUT_TEXTURE_COORD9_EXT',
'GL_OUTPUT_TEXTURE_COORD10_EXT', 'GL_OUTPUT_TEXTURE_COORD11_EXT',
'GL_OUTPUT_TEXTURE_COORD12_EXT', 'GL_OUTPUT_TEXTURE_COORD13_EXT',
'GL_OUTPUT_TEXTURE_COORD14_EXT', 'GL_OUTPUT_TEXTURE_COORD15_EXT',
'GL_OUTPUT_TEXTURE_COORD16_EXT', 'GL_OUTPUT_TEXTURE_COORD17_EXT',
'GL_OUTPUT_TEXTURE_COORD18_EXT', 'GL_OUTPUT_TEXTURE_COORD19_EXT',
'GL_OUTPUT_TEXTURE_COORD20_EXT', 'GL_OUTPUT_TEXTURE_COORD21_EXT',
'GL_OUTPUT_TEXTURE_COORD22_EXT', 'GL_OUTPUT_TEXTURE_COORD23_EXT',
'GL_OUTPUT_TEXTURE_COORD24_EXT', 'GL_OUTPUT_TEXTURE_COORD25_EXT',
'GL_OUTPUT_TEXTURE_COORD26_EXT', 'GL_OUTPUT_TEXTURE_COORD27_EXT',
'GL_OUTPUT_TEXTURE_COORD28_EXT', 'GL_OUTPUT_TEXTURE_COORD29_EXT',
'GL_OUTPUT_TEXTURE_COORD30_EXT', 'GL_OUTPUT_TEXTURE_COORD31_EXT',
'GL_OUTPUT_FOG_EXT', 'GL_SCALAR_EXT', 'GL_VECTOR_EXT', 'GL_MATRIX_EXT',
'GL_VARIANT_EXT', 'GL_INVARIANT_EXT', 'GL_LOCAL_CONSTANT_EXT', 'GL_LOCAL_EXT',
'GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT', 'GL_MAX_VERTEX_SHADER_VARIANTS_EXT',
'GL_MAX_VERTEX_SHADER_INVARIANTS_EXT',
'GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT', 'GL_MAX_VERTEX_SHADER_LOCALS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT',
'GL_VERTEX_SHADER_INSTRUCTIONS_EXT', 'GL_VERTEX_SHADER_VARIANTS_EXT',
'GL_VERTEX_SHADER_INVARIANTS_EXT', 'GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',
'GL_VERTEX_SHADER_LOCALS_EXT', 'GL_VERTEX_SHADER_OPTIMIZED_EXT', 'GL_X_EXT',
'GL_Y_EXT', 'GL_Z_EXT', 'GL_W_EXT', 'GL_NEGATIVE_X_EXT', 'GL_NEGATIVE_Y_EXT',
'GL_NEGATIVE_Z_EXT', 'GL_NEGATIVE_W_EXT', 'GL_ZERO_EXT', 'GL_ONE_EXT',
'GL_NEGATIVE_ONE_EXT', 'GL_NORMALIZED_RANGE_EXT', 'GL_FULL_RANGE_EXT',
'GL_CURRENT_VERTEX_EXT', 'GL_MVP_MATRIX_EXT', 'GL_VARIANT_VALUE_EXT',
'GL_VARIANT_DATATYPE_EXT', 'GL_VARIANT_ARRAY_STRIDE_EXT',
'GL_VARIANT_ARRAY_TYPE_EXT', 'GL_VARIANT_ARRAY_EXT',
'GL_VARIANT_ARRAY_POINTER_EXT', 'GL_INVARIANT_VALUE_EXT',
'GL_INVARIANT_DATATYPE_EXT', 'GL_LOCAL_CONSTANT_VALUE_EXT',
'GL_LOCAL_CONSTANT_DATATYPE_EXT', 'GL_MAX_VERTEX_STREAMS_ATI',
'GL_VERTEX_STREAM0_ATI', 'GL_VERTEX_STREAM1_ATI', 'GL_VERTEX_STREAM2_ATI',
'GL_VERTEX_STREAM3_ATI', 'GL_VERTEX_STREAM4_ATI', 'GL_VERTEX_STREAM5_ATI',
'GL_VERTEX_STREAM6_ATI', 'GL_VERTEX_STREAM7_ATI', 'GL_VERTEX_SOURCE_ATI',
'GL_ELEMENT_ARRAY_ATI', 'GL_ELEMENT_ARRAY_TYPE_ATI',
'GL_ELEMENT_ARRAY_POINTER_ATI', 'GL_QUAD_MESH_SUN', 'GL_TRIANGLE_MESH_SUN',
'GL_SLICE_ACCUM_SUN', 'GL_MULTISAMPLE_FILTER_HINT_NV', 'GL_DEPTH_CLAMP_NV',
'GL_PIXEL_COUNTER_BITS_NV', 'GL_CURRENT_OCCLUSION_QUERY_ID_NV',
'GL_PIXEL_COUNT_NV', 'GL_PIXEL_COUNT_AVAILABLE_NV', 'GL_POINT_SPRITE_NV',
'GL_COORD_REPLACE_NV', 'GL_POINT_SPRITE_R_MODE_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV',
'GL_OFFSET_HILO_TEXTURE_2D_NV', 'GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV',
'GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV',
'GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV',
'GL_DEPENDENT_HILO_TEXTURE_2D_NV', 'GL_DEPENDENT_RGB_TEXTURE_3D_NV',
'GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV', 'GL_DOT_PRODUCT_PASS_THROUGH_NV',
'GL_DOT_PRODUCT_TEXTURE_1D_NV', 'GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV',
'GL_HILO8_NV', 'GL_SIGNED_HILO8_NV', 'GL_FORCE_BLUE_TO_ONE_NV',
'GL_STENCIL_TEST_TWO_SIDE_EXT', 'GL_ACTIVE_STENCIL_FACE_EXT',
'GL_TEXT_FRAGMENT_SHADER_ATI', 'GL_UNPACK_CLIENT_STORAGE_APPLE',
'GL_ELEMENT_ARRAY_APPLE', 'GL_ELEMENT_ARRAY_TYPE_APPLE',
'GL_ELEMENT_ARRAY_POINTER_APPLE', 'GL_DRAW_PIXELS_APPLE', 'GL_FENCE_APPLE',
'GL_VERTEX_ARRAY_BINDING_APPLE', 'GL_VERTEX_ARRAY_RANGE_APPLE',
'GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE', 'GL_VERTEX_ARRAY_STORAGE_HINT_APPLE',
'GL_VERTEX_ARRAY_RANGE_POINTER_APPLE', 'GL_STORAGE_CACHED_APPLE',
'GL_STORAGE_SHARED_APPLE', 'GL_YCBCR_422_APPLE',
'GL_UNSIGNED_SHORT_8_8_APPLE', 'GL_UNSIGNED_SHORT_8_8_REV_APPLE',
'GL_RGB_S3TC', 'GL_RGB4_S3TC', 'GL_RGBA_S3TC', 'GL_RGBA4_S3TC',
'GL_MAX_DRAW_BUFFERS_ATI', 'GL_DRAW_BUFFER0_ATI', 'GL_DRAW_BUFFER1_ATI',
'GL_DRAW_BUFFER2_ATI', 'GL_DRAW_BUFFER3_ATI', 'GL_DRAW_BUFFER4_ATI',
'GL_DRAW_BUFFER5_ATI', 'GL_DRAW_BUFFER6_ATI', 'GL_DRAW_BUFFER7_ATI',
'GL_DRAW_BUFFER8_ATI', 'GL_DRAW_BUFFER9_ATI', 'GL_DRAW_BUFFER10_ATI',
'GL_DRAW_BUFFER11_ATI', 'GL_DRAW_BUFFER12_ATI', 'GL_DRAW_BUFFER13_ATI',
'GL_DRAW_BUFFER14_ATI', 'GL_DRAW_BUFFER15_ATI', 'GL_TYPE_RGBA_FLOAT_ATI',
'GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI', 'GL_MODULATE_ADD_ATI',
'GL_MODULATE_SIGNED_ADD_ATI', 'GL_MODULATE_SUBTRACT_ATI',
'GL_RGBA_FLOAT32_ATI', 'GL_RGB_FLOAT32_ATI', 'GL_ALPHA_FLOAT32_ATI',
'GL_INTENSITY_FLOAT32_ATI', 'GL_LUMINANCE_FLOAT32_ATI',
'GL_LUMINANCE_ALPHA_FLOAT32_ATI', 'GL_RGBA_FLOAT16_ATI', 'GL_RGB_FLOAT16_ATI',
'GL_ALPHA_FLOAT16_ATI', 'GL_INTENSITY_FLOAT16_ATI',
'GL_LUMINANCE_FLOAT16_ATI', 'GL_LUMINANCE_ALPHA_FLOAT16_ATI', 'GL_FLOAT_R_NV',
'GL_FLOAT_RG_NV', 'GL_FLOAT_RGB_NV', 'GL_FLOAT_RGBA_NV', 'GL_FLOAT_R16_NV',
'GL_FLOAT_R32_NV', 'GL_FLOAT_RG16_NV', 'GL_FLOAT_RG32_NV',
'GL_FLOAT_RGB16_NV', 'GL_FLOAT_RGB32_NV', 'GL_FLOAT_RGBA16_NV',
'GL_FLOAT_RGBA32_NV', 'GL_TEXTURE_FLOAT_COMPONENTS_NV',
'GL_FLOAT_CLEAR_COLOR_VALUE_NV', 'GL_FLOAT_RGBA_MODE_NV',
'GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV', 'GL_FRAGMENT_PROGRAM_NV',
'GL_MAX_TEXTURE_COORDS_NV', 'GL_MAX_TEXTURE_IMAGE_UNITS_NV',
'GL_FRAGMENT_PROGRAM_BINDING_NV', 'GL_PROGRAM_ERROR_STRING_NV',
'GL_HALF_FLOAT_NV', 'GL_WRITE_PIXEL_DATA_RANGE_NV',
'GL_READ_PIXEL_DATA_RANGE_NV', 'GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV',
'GL_READ_PIXEL_DATA_RANGE_LENGTH_NV', 'GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV',
'GL_READ_PIXEL_DATA_RANGE_POINTER_NV', 'GL_PRIMITIVE_RESTART_NV',
'GL_PRIMITIVE_RESTART_INDEX_NV', 'GL_TEXTURE_UNSIGNED_REMAP_MODE_NV',
'GL_STENCIL_BACK_FUNC_ATI', 'GL_STENCIL_BACK_FAIL_ATI',
'GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI', 'GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI',
'GL_IMPLEMENTATION_COLOR_READ_TYPE_OES',
'GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES', 'GL_DEPTH_BOUNDS_TEST_EXT',
'GL_DEPTH_BOUNDS_EXT', 'GL_MIRROR_CLAMP_EXT', 'GL_MIRROR_CLAMP_TO_EDGE_EXT',
'GL_MIRROR_CLAMP_TO_BORDER_EXT', 'GL_BLEND_EQUATION_RGB_EXT',
'GL_BLEND_EQUATION_ALPHA_EXT', 'GL_PACK_INVERT_MESA',
'GL_UNSIGNED_SHORT_8_8_MESA', 'GL_UNSIGNED_SHORT_8_8_REV_MESA',
'GL_YCBCR_MESA', 'GL_PIXEL_PACK_BUFFER_EXT', 'GL_PIXEL_UNPACK_BUFFER_EXT',
'GL_PIXEL_PACK_BUFFER_BINDING_EXT', 'GL_PIXEL_UNPACK_BUFFER_BINDING_EXT',
'GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV', 'GL_MAX_PROGRAM_CALL_DEPTH_NV',
'GL_MAX_PROGRAM_IF_DEPTH_NV', 'GL_MAX_PROGRAM_LOOP_DEPTH_NV',
'GL_MAX_PROGRAM_LOOP_COUNT_NV', 'GL_INVALID_FRAMEBUFFER_OPERATION_EXT',
'GL_MAX_RENDERBUFFER_SIZE_EXT', 'GL_FRAMEBUFFER_BINDING_EXT',
'GL_RENDERBUFFER_BINDING_EXT', 'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT',
'GL_FRAMEBUFFER_COMPLETE_EXT', 'GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DUPLICATE_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT', 'GL_FRAMEBUFFER_UNSUPPORTED_EXT',
'GL_MAX_COLOR_ATTACHMENTS_EXT', 'GL_COLOR_ATTACHMENT0_EXT',
'GL_COLOR_ATTACHMENT1_EXT', 'GL_COLOR_ATTACHMENT2_EXT',
'GL_COLOR_ATTACHMENT3_EXT', 'GL_COLOR_ATTACHMENT4_EXT',
'GL_COLOR_ATTACHMENT5_EXT', 'GL_COLOR_ATTACHMENT6_EXT',
'GL_COLOR_ATTACHMENT7_EXT', 'GL_COLOR_ATTACHMENT8_EXT',
'GL_COLOR_ATTACHMENT9_EXT', 'GL_COLOR_ATTACHMENT10_EXT',
'GL_COLOR_ATTACHMENT11_EXT', 'GL_COLOR_ATTACHMENT12_EXT',
'GL_COLOR_ATTACHMENT13_EXT', 'GL_COLOR_ATTACHMENT14_EXT',
'GL_COLOR_ATTACHMENT15_EXT', 'GL_DEPTH_ATTACHMENT_EXT',
'GL_STENCIL_ATTACHMENT_EXT', 'GL_FRAMEBUFFER_EXT', 'GL_RENDERBUFFER_EXT',
'GL_RENDERBUFFER_WIDTH_EXT', 'GL_RENDERBUFFER_HEIGHT_EXT',
'GL_RENDERBUFFER_INTERNAL_FORMAT_EXT', 'GL_STENCIL_INDEX1_EXT',
'GL_STENCIL_INDEX4_EXT', 'GL_STENCIL_INDEX8_EXT', 'GL_STENCIL_INDEX16_EXT',
'GL_RENDERBUFFER_RED_SIZE_EXT', 'GL_RENDERBUFFER_GREEN_SIZE_EXT',
'GL_RENDERBUFFER_BLUE_SIZE_EXT', 'GL_RENDERBUFFER_ALPHA_SIZE_EXT',
'GL_RENDERBUFFER_DEPTH_SIZE_EXT', 'GL_RENDERBUFFER_STENCIL_SIZE_EXT',
'GLchar', 'GLintptr', 'GLsizeiptr', 'GLintptrARB', 'GLsizeiptrARB',
'GLcharARB', 'GLhandleARB', 'GLhalfARB', 'GLhalfNV', 'GL_VERSION_1_2',
'glBlendColor', 'glBlendEquation', 'glDrawRangeElements', 'glColorTable',
'glColorTableParameterfv', 'glColorTableParameteriv', 'glCopyColorTable',
'glGetColorTable', 'glGetColorTableParameterfv', 'glGetColorTableParameteriv',
'glColorSubTable', 'glCopyColorSubTable', 'glConvolutionFilter1D',
'glConvolutionFilter2D', 'glConvolutionParameterf',
'glConvolutionParameterfv', 'glConvolutionParameteri',
'glConvolutionParameteriv', 'glCopyConvolutionFilter1D',
'glCopyConvolutionFilter2D', 'glGetConvolutionFilter',
'glGetConvolutionParameterfv', 'glGetConvolutionParameteriv',
'glGetSeparableFilter', 'glSeparableFilter2D', 'glGetHistogram',
'glGetHistogramParameterfv', 'glGetHistogramParameteriv', 'glGetMinmax',
'glGetMinmaxParameterfv', 'glGetMinmaxParameteriv', 'glHistogram', 'glMinmax',
'glResetHistogram', 'glResetMinmax', 'glTexImage3D', 'glTexSubImage3D',
'glCopyTexSubImage3D', 'PFNGLBLENDCOLORPROC', 'PFNGLBLENDEQUATIONPROC',
'PFNGLDRAWRANGEELEMENTSPROC', 'PFNGLCOLORTABLEPROC',
'PFNGLCOLORTABLEPARAMETERFVPROC', 'PFNGLCOLORTABLEPARAMETERIVPROC',
'PFNGLCOPYCOLORTABLEPROC', 'PFNGLGETCOLORTABLEPROC',
'PFNGLGETCOLORTABLEPARAMETERFVPROC', 'PFNGLGETCOLORTABLEPARAMETERIVPROC',
'PFNGLCOLORSUBTABLEPROC', 'PFNGLCOPYCOLORSUBTABLEPROC',
'PFNGLCONVOLUTIONFILTER1DPROC', 'PFNGLCONVOLUTIONFILTER2DPROC',
'PFNGLCONVOLUTIONPARAMETERFPROC', 'PFNGLCONVOLUTIONPARAMETERFVPROC',
'PFNGLCONVOLUTIONPARAMETERIPROC', 'PFNGLCONVOLUTIONPARAMETERIVPROC',
'PFNGLCOPYCONVOLUTIONFILTER1DPROC', 'PFNGLCOPYCONVOLUTIONFILTER2DPROC',
'PFNGLGETCONVOLUTIONFILTERPROC', 'PFNGLGETCONVOLUTIONPARAMETERFVPROC',
'PFNGLGETCONVOLUTIONPARAMETERIVPROC', 'PFNGLGETSEPARABLEFILTERPROC',
'PFNGLSEPARABLEFILTER2DPROC', 'PFNGLGETHISTOGRAMPROC',
'PFNGLGETHISTOGRAMPARAMETERFVPROC', 'PFNGLGETHISTOGRAMPARAMETERIVPROC',
'PFNGLGETMINMAXPROC', 'PFNGLGETMINMAXPARAMETERFVPROC',
'PFNGLGETMINMAXPARAMETERIVPROC', 'PFNGLHISTOGRAMPROC', 'PFNGLMINMAXPROC',
'PFNGLRESETHISTOGRAMPROC', 'PFNGLRESETMINMAXPROC', 'PFNGLTEXIMAGE3DPROC',
'PFNGLTEXSUBIMAGE3DPROC', 'PFNGLCOPYTEXSUBIMAGE3DPROC', 'GL_VERSION_1_3',
'glActiveTexture', 'glClientActiveTexture', 'glMultiTexCoord1d',
'glMultiTexCoord1dv', 'glMultiTexCoord1f', 'glMultiTexCoord1fv',
'glMultiTexCoord1i', 'glMultiTexCoord1iv', 'glMultiTexCoord1s',
'glMultiTexCoord1sv', 'glMultiTexCoord2d', 'glMultiTexCoord2dv',
'glMultiTexCoord2f', 'glMultiTexCoord2fv', 'glMultiTexCoord2i',
'glMultiTexCoord2iv', 'glMultiTexCoord2s', 'glMultiTexCoord2sv',
'glMultiTexCoord3d', 'glMultiTexCoord3dv', 'glMultiTexCoord3f',
'glMultiTexCoord3fv', 'glMultiTexCoord3i', 'glMultiTexCoord3iv',
'glMultiTexCoord3s', 'glMultiTexCoord3sv', 'glMultiTexCoord4d',
'glMultiTexCoord4dv', 'glMultiTexCoord4f', 'glMultiTexCoord4fv',
'glMultiTexCoord4i', 'glMultiTexCoord4iv', 'glMultiTexCoord4s',
'glMultiTexCoord4sv', 'glLoadTransposeMatrixf', 'glLoadTransposeMatrixd',
'glMultTransposeMatrixf', 'glMultTransposeMatrixd', 'glSampleCoverage',
'glCompressedTexImage3D', 'glCompressedTexImage2D', 'glCompressedTexImage1D',
'glCompressedTexSubImage3D', 'glCompressedTexSubImage2D',
'glCompressedTexSubImage1D', 'glGetCompressedTexImage',
'PFNGLACTIVETEXTUREPROC', 'PFNGLCLIENTACTIVETEXTUREPROC',
'PFNGLMULTITEXCOORD1DPROC', 'PFNGLMULTITEXCOORD1DVPROC',
'PFNGLMULTITEXCOORD1FPROC', 'PFNGLMULTITEXCOORD1FVPROC',
'PFNGLMULTITEXCOORD1IPROC', 'PFNGLMULTITEXCOORD1IVPROC',
'PFNGLMULTITEXCOORD1SPROC', 'PFNGLMULTITEXCOORD1SVPROC',
'PFNGLMULTITEXCOORD2DPROC', 'PFNGLMULTITEXCOORD2DVPROC',
'PFNGLMULTITEXCOORD2FPROC', 'PFNGLMULTITEXCOORD2FVPROC',
'PFNGLMULTITEXCOORD2IPROC', 'PFNGLMULTITEXCOORD2IVPROC',
'PFNGLMULTITEXCOORD2SPROC', 'PFNGLMULTITEXCOORD2SVPROC',
'PFNGLMULTITEXCOORD3DPROC', 'PFNGLMULTITEXCOORD3DVPROC',
'PFNGLMULTITEXCOORD3FPROC', 'PFNGLMULTITEXCOORD3FVPROC',
'PFNGLMULTITEXCOORD3IPROC', 'PFNGLMULTITEXCOORD3IVPROC',
'PFNGLMULTITEXCOORD3SPROC', 'PFNGLMULTITEXCOORD3SVPROC',
'PFNGLMULTITEXCOORD4DPROC', 'PFNGLMULTITEXCOORD4DVPROC',
'PFNGLMULTITEXCOORD4FPROC', 'PFNGLMULTITEXCOORD4FVPROC',
'PFNGLMULTITEXCOORD4IPROC', 'PFNGLMULTITEXCOORD4IVPROC',
'PFNGLMULTITEXCOORD4SPROC', 'PFNGLMULTITEXCOORD4SVPROC',
'PFNGLLOADTRANSPOSEMATRIXFPROC', 'PFNGLLOADTRANSPOSEMATRIXDPROC',
'PFNGLMULTTRANSPOSEMATRIXFPROC', 'PFNGLMULTTRANSPOSEMATRIXDPROC',
'PFNGLSAMPLECOVERAGEPROC', 'PFNGLCOMPRESSEDTEXIMAGE3DPROC',
'PFNGLCOMPRESSEDTEXIMAGE2DPROC', 'PFNGLCOMPRESSEDTEXIMAGE1DPROC',
'PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC', 'PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC',
'PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC', 'PFNGLGETCOMPRESSEDTEXIMAGEPROC',
'GL_VERSION_1_4', 'glBlendFuncSeparate', 'glFogCoordf', 'glFogCoordfv',
'glFogCoordd', 'glFogCoorddv', 'glFogCoordPointer', 'glMultiDrawArrays',
'glMultiDrawElements', 'glPointParameterf', 'glPointParameterfv',
'glPointParameteri', 'glPointParameteriv', 'glSecondaryColor3b',
'glSecondaryColor3bv', 'glSecondaryColor3d', 'glSecondaryColor3dv',
'glSecondaryColor3f', 'glSecondaryColor3fv', 'glSecondaryColor3i',
'glSecondaryColor3iv', 'glSecondaryColor3s', 'glSecondaryColor3sv',
'glSecondaryColor3ub', 'glSecondaryColor3ubv', 'glSecondaryColor3ui',
'glSecondaryColor3uiv', 'glSecondaryColor3us', 'glSecondaryColor3usv',
'glSecondaryColorPointer', 'glWindowPos2d', 'glWindowPos2dv', 'glWindowPos2f',
'glWindowPos2fv', 'glWindowPos2i', 'glWindowPos2iv', 'glWindowPos2s',
'glWindowPos2sv', 'glWindowPos3d', 'glWindowPos3dv', 'glWindowPos3f',
'glWindowPos3fv', 'glWindowPos3i', 'glWindowPos3iv', 'glWindowPos3s',
'glWindowPos3sv', 'PFNGLBLENDFUNCSEPARATEPROC', 'PFNGLFOGCOORDFPROC',
'PFNGLFOGCOORDFVPROC', 'PFNGLFOGCOORDDPROC', 'PFNGLFOGCOORDDVPROC',
'PFNGLFOGCOORDPOINTERPROC', 'PFNGLMULTIDRAWARRAYSPROC',
'PFNGLMULTIDRAWELEMENTSPROC', 'PFNGLPOINTPARAMETERFPROC',
'PFNGLPOINTPARAMETERFVPROC', 'PFNGLPOINTPARAMETERIPROC',
'PFNGLPOINTPARAMETERIVPROC', 'PFNGLSECONDARYCOLOR3BPROC',
'PFNGLSECONDARYCOLOR3BVPROC', 'PFNGLSECONDARYCOLOR3DPROC',
'PFNGLSECONDARYCOLOR3DVPROC', 'PFNGLSECONDARYCOLOR3FPROC',
'PFNGLSECONDARYCOLOR3FVPROC', 'PFNGLSECONDARYCOLOR3IPROC',
'PFNGLSECONDARYCOLOR3IVPROC', 'PFNGLSECONDARYCOLOR3SPROC',
'PFNGLSECONDARYCOLOR3SVPROC', 'PFNGLSECONDARYCOLOR3UBPROC',
'PFNGLSECONDARYCOLOR3UBVPROC', 'PFNGLSECONDARYCOLOR3UIPROC',
'PFNGLSECONDARYCOLOR3UIVPROC', 'PFNGLSECONDARYCOLOR3USPROC',
'PFNGLSECONDARYCOLOR3USVPROC', 'PFNGLSECONDARYCOLORPOINTERPROC',
'PFNGLWINDOWPOS2DPROC', 'PFNGLWINDOWPOS2DVPROC', 'PFNGLWINDOWPOS2FPROC',
'PFNGLWINDOWPOS2FVPROC', 'PFNGLWINDOWPOS2IPROC', 'PFNGLWINDOWPOS2IVPROC',
'PFNGLWINDOWPOS2SPROC', 'PFNGLWINDOWPOS2SVPROC', 'PFNGLWINDOWPOS3DPROC',
'PFNGLWINDOWPOS3DVPROC', 'PFNGLWINDOWPOS3FPROC', 'PFNGLWINDOWPOS3FVPROC',
'PFNGLWINDOWPOS3IPROC', 'PFNGLWINDOWPOS3IVPROC', 'PFNGLWINDOWPOS3SPROC',
'PFNGLWINDOWPOS3SVPROC', 'GL_VERSION_1_5', 'glGenQueries', 'glDeleteQueries',
'glIsQuery', 'glBeginQuery', 'glEndQuery', 'glGetQueryiv',
'glGetQueryObjectiv', 'glGetQueryObjectuiv', 'glBindBuffer',
'glDeleteBuffers', 'glGenBuffers', 'glIsBuffer', 'glBufferData',
'glBufferSubData', 'glGetBufferSubData', 'glMapBuffer', 'glUnmapBuffer',
'glGetBufferParameteriv', 'glGetBufferPointerv', 'PFNGLGENQUERIESPROC',
'PFNGLDELETEQUERIESPROC', 'PFNGLISQUERYPROC', 'PFNGLBEGINQUERYPROC',
'PFNGLENDQUERYPROC', 'PFNGLGETQUERYIVPROC', 'PFNGLGETQUERYOBJECTIVPROC',
'PFNGLGETQUERYOBJECTUIVPROC', 'PFNGLBINDBUFFERPROC', 'PFNGLDELETEBUFFERSPROC',
'PFNGLGENBUFFERSPROC', 'PFNGLISBUFFERPROC', 'PFNGLBUFFERDATAPROC',
'PFNGLBUFFERSUBDATAPROC', 'PFNGLGETBUFFERSUBDATAPROC', 'PFNGLMAPBUFFERPROC',
'PFNGLUNMAPBUFFERPROC', 'PFNGLGETBUFFERPARAMETERIVPROC',
'PFNGLGETBUFFERPOINTERVPROC', 'GL_VERSION_2_0', 'glBlendEquationSeparate',
'glDrawBuffers', 'glStencilOpSeparate', 'glStencilFuncSeparate',
'glStencilMaskSeparate', 'glAttachShader', 'glBindAttribLocation',
'glCompileShader', 'glCreateProgram', 'glCreateShader', 'glDeleteProgram',
'glDeleteShader', 'glDetachShader', 'glDisableVertexAttribArray',
'glEnableVertexAttribArray', 'glGetActiveAttrib', 'glGetActiveUniform',
'glGetAttachedShaders', 'glGetAttribLocation', 'glGetProgramiv',
'glGetProgramInfoLog', 'glGetShaderiv', 'glGetShaderInfoLog',
'glGetShaderSource', 'glGetUniformLocation', 'glGetUniformfv',
'glGetUniformiv', 'glGetVertexAttribdv', 'glGetVertexAttribfv',
'glGetVertexAttribiv', 'glGetVertexAttribPointerv', 'glIsProgram',
'glIsShader', 'glLinkProgram', 'glShaderSource', 'glUseProgram',
'glUniform1f', 'glUniform2f', 'glUniform3f', 'glUniform4f', 'glUniform1i',
'glUniform2i', 'glUniform3i', 'glUniform4i', 'glUniform1fv', 'glUniform2fv',
'glUniform3fv', 'glUniform4fv', 'glUniform1iv', 'glUniform2iv',
'glUniform3iv', 'glUniform4iv', 'glUniformMatrix2fv', 'glUniformMatrix3fv',
'glUniformMatrix4fv', 'glValidateProgram', 'glVertexAttrib1d',
'glVertexAttrib1dv', 'glVertexAttrib1f', 'glVertexAttrib1fv',
'glVertexAttrib1s', 'glVertexAttrib1sv', 'glVertexAttrib2d',
'glVertexAttrib2dv', 'glVertexAttrib2f', 'glVertexAttrib2fv',
'glVertexAttrib2s', 'glVertexAttrib2sv', 'glVertexAttrib3d',
'glVertexAttrib3dv', 'glVertexAttrib3f', 'glVertexAttrib3fv',
'glVertexAttrib3s', 'glVertexAttrib3sv', 'glVertexAttrib4Nbv',
'glVertexAttrib4Niv', 'glVertexAttrib4Nsv', 'glVertexAttrib4Nub',
'glVertexAttrib4Nubv', 'glVertexAttrib4Nuiv', 'glVertexAttrib4Nusv',
'glVertexAttrib4bv', 'glVertexAttrib4d', 'glVertexAttrib4dv',
'glVertexAttrib4f', 'glVertexAttrib4fv', 'glVertexAttrib4iv',
'glVertexAttrib4s', 'glVertexAttrib4sv', 'glVertexAttrib4ubv',
'glVertexAttrib4uiv', 'glVertexAttrib4usv', 'glVertexAttribPointer',
'PFNGLBLENDEQUATIONSEPARATEPROC', 'PFNGLDRAWBUFFERSPROC',
'PFNGLSTENCILOPSEPARATEPROC', 'PFNGLSTENCILFUNCSEPARATEPROC',
'PFNGLSTENCILMASKSEPARATEPROC', 'PFNGLATTACHSHADERPROC',
'PFNGLBINDATTRIBLOCATIONPROC', 'PFNGLCOMPILESHADERPROC',
'PFNGLCREATEPROGRAMPROC', 'PFNGLCREATESHADERPROC', 'PFNGLDELETEPROGRAMPROC',
'PFNGLDELETESHADERPROC', 'PFNGLDETACHSHADERPROC',
'PFNGLDISABLEVERTEXATTRIBARRAYPROC', 'PFNGLENABLEVERTEXATTRIBARRAYPROC',
'PFNGLGETACTIVEATTRIBPROC', 'PFNGLGETACTIVEUNIFORMPROC',
'PFNGLGETATTACHEDSHADERSPROC', 'PFNGLGETATTRIBLOCATIONPROC',
'PFNGLGETPROGRAMIVPROC', 'PFNGLGETPROGRAMINFOLOGPROC', 'PFNGLGETSHADERIVPROC',
'PFNGLGETSHADERINFOLOGPROC', 'PFNGLGETSHADERSOURCEPROC',
'PFNGLGETUNIFORMLOCATIONPROC', 'PFNGLGETUNIFORMFVPROC',
'PFNGLGETUNIFORMIVPROC', 'PFNGLGETVERTEXATTRIBDVPROC',
'PFNGLGETVERTEXATTRIBFVPROC', 'PFNGLGETVERTEXATTRIBIVPROC',
'PFNGLGETVERTEXATTRIBPOINTERVPROC', 'PFNGLISPROGRAMPROC', 'PFNGLISSHADERPROC',
'PFNGLLINKPROGRAMPROC', 'PFNGLSHADERSOURCEPROC', 'PFNGLUSEPROGRAMPROC',
'PFNGLUNIFORM1FPROC', 'PFNGLUNIFORM2FPROC', 'PFNGLUNIFORM3FPROC',
'PFNGLUNIFORM4FPROC', 'PFNGLUNIFORM1IPROC', 'PFNGLUNIFORM2IPROC',
'PFNGLUNIFORM3IPROC', 'PFNGLUNIFORM4IPROC', 'PFNGLUNIFORM1FVPROC',
'PFNGLUNIFORM2FVPROC', 'PFNGLUNIFORM3FVPROC', 'PFNGLUNIFORM4FVPROC',
'PFNGLUNIFORM1IVPROC', 'PFNGLUNIFORM2IVPROC', 'PFNGLUNIFORM3IVPROC',
'PFNGLUNIFORM4IVPROC', 'PFNGLUNIFORMMATRIX2FVPROC',
'PFNGLUNIFORMMATRIX3FVPROC', 'PFNGLUNIFORMMATRIX4FVPROC',
'PFNGLVALIDATEPROGRAMPROC', 'PFNGLVERTEXATTRIB1DPROC',
'PFNGLVERTEXATTRIB1DVPROC', 'PFNGLVERTEXATTRIB1FPROC',
'PFNGLVERTEXATTRIB1FVPROC', 'PFNGLVERTEXATTRIB1SPROC',
'PFNGLVERTEXATTRIB1SVPROC', 'PFNGLVERTEXATTRIB2DPROC',
'PFNGLVERTEXATTRIB2DVPROC', 'PFNGLVERTEXATTRIB2FPROC',
'PFNGLVERTEXATTRIB2FVPROC', 'PFNGLVERTEXATTRIB2SPROC',
'PFNGLVERTEXATTRIB2SVPROC', 'PFNGLVERTEXATTRIB3DPROC',
'PFNGLVERTEXATTRIB3DVPROC', 'PFNGLVERTEXATTRIB3FPROC',
'PFNGLVERTEXATTRIB3FVPROC', 'PFNGLVERTEXATTRIB3SPROC',
'PFNGLVERTEXATTRIB3SVPROC', 'PFNGLVERTEXATTRIB4NBVPROC',
'PFNGLVERTEXATTRIB4NIVPROC', 'PFNGLVERTEXATTRIB4NSVPROC',
'PFNGLVERTEXATTRIB4NUBPROC', 'PFNGLVERTEXATTRIB4NUBVPROC',
'PFNGLVERTEXATTRIB4NUIVPROC', 'PFNGLVERTEXATTRIB4NUSVPROC',
'PFNGLVERTEXATTRIB4BVPROC', 'PFNGLVERTEXATTRIB4DPROC',
'PFNGLVERTEXATTRIB4DVPROC', 'PFNGLVERTEXATTRIB4FPROC',
'PFNGLVERTEXATTRIB4FVPROC', 'PFNGLVERTEXATTRIB4IVPROC',
'PFNGLVERTEXATTRIB4SPROC', 'PFNGLVERTEXATTRIB4SVPROC',
'PFNGLVERTEXATTRIB4UBVPROC', 'PFNGLVERTEXATTRIB4UIVPROC',
'PFNGLVERTEXATTRIB4USVPROC', 'PFNGLVERTEXATTRIBPOINTERPROC',
'GL_ARB_multitexture', 'glActiveTextureARB', 'glClientActiveTextureARB',
'glMultiTexCoord1dARB', 'glMultiTexCoord1dvARB', 'glMultiTexCoord1fARB',
'glMultiTexCoord1fvARB', 'glMultiTexCoord1iARB', 'glMultiTexCoord1ivARB',
'glMultiTexCoord1sARB', 'glMultiTexCoord1svARB', 'glMultiTexCoord2dARB',
'glMultiTexCoord2dvARB', 'glMultiTexCoord2fARB', 'glMultiTexCoord2fvARB',
'glMultiTexCoord2iARB', 'glMultiTexCoord2ivARB', 'glMultiTexCoord2sARB',
'glMultiTexCoord2svARB', 'glMultiTexCoord3dARB', 'glMultiTexCoord3dvARB',
'glMultiTexCoord3fARB', 'glMultiTexCoord3fvARB', 'glMultiTexCoord3iARB',
'glMultiTexCoord3ivARB', 'glMultiTexCoord3sARB', 'glMultiTexCoord3svARB',
'glMultiTexCoord4dARB', 'glMultiTexCoord4dvARB', 'glMultiTexCoord4fARB',
'glMultiTexCoord4fvARB', 'glMultiTexCoord4iARB', 'glMultiTexCoord4ivARB',
'glMultiTexCoord4sARB', 'glMultiTexCoord4svARB', 'PFNGLACTIVETEXTUREARBPROC',
'PFNGLCLIENTACTIVETEXTUREARBPROC', 'PFNGLMULTITEXCOORD1DARBPROC',
'PFNGLMULTITEXCOORD1DVARBPROC', 'PFNGLMULTITEXCOORD1FARBPROC',
'PFNGLMULTITEXCOORD1FVARBPROC', 'PFNGLMULTITEXCOORD1IARBPROC',
'PFNGLMULTITEXCOORD1IVARBPROC', 'PFNGLMULTITEXCOORD1SARBPROC',
'PFNGLMULTITEXCOORD1SVARBPROC', 'PFNGLMULTITEXCOORD2DARBPROC',
'PFNGLMULTITEXCOORD2DVARBPROC', 'PFNGLMULTITEXCOORD2FARBPROC',
'PFNGLMULTITEXCOORD2FVARBPROC', 'PFNGLMULTITEXCOORD2IARBPROC',
'PFNGLMULTITEXCOORD2IVARBPROC', 'PFNGLMULTITEXCOORD2SARBPROC',
'PFNGLMULTITEXCOORD2SVARBPROC', 'PFNGLMULTITEXCOORD3DARBPROC',
'PFNGLMULTITEXCOORD3DVARBPROC', 'PFNGLMULTITEXCOORD3FARBPROC',
'PFNGLMULTITEXCOORD3FVARBPROC', 'PFNGLMULTITEXCOORD3IARBPROC',
'PFNGLMULTITEXCOORD3IVARBPROC', 'PFNGLMULTITEXCOORD3SARBPROC',
'PFNGLMULTITEXCOORD3SVARBPROC', 'PFNGLMULTITEXCOORD4DARBPROC',
'PFNGLMULTITEXCOORD4DVARBPROC', 'PFNGLMULTITEXCOORD4FARBPROC',
'PFNGLMULTITEXCOORD4FVARBPROC', 'PFNGLMULTITEXCOORD4IARBPROC',
'PFNGLMULTITEXCOORD4IVARBPROC', 'PFNGLMULTITEXCOORD4SARBPROC',
'PFNGLMULTITEXCOORD4SVARBPROC', 'GL_ARB_transpose_matrix',
'glLoadTransposeMatrixfARB', 'glLoadTransposeMatrixdARB',
'glMultTransposeMatrixfARB', 'glMultTransposeMatrixdARB',
'PFNGLLOADTRANSPOSEMATRIXFARBPROC', 'PFNGLLOADTRANSPOSEMATRIXDARBPROC',
'PFNGLMULTTRANSPOSEMATRIXFARBPROC', 'PFNGLMULTTRANSPOSEMATRIXDARBPROC',
'GL_ARB_multisample', 'glSampleCoverageARB', 'PFNGLSAMPLECOVERAGEARBPROC',
'GL_ARB_texture_env_add', 'GL_ARB_texture_cube_map',
'GL_ARB_texture_compression', 'glCompressedTexImage3DARB',
'glCompressedTexImage2DARB', 'glCompressedTexImage1DARB',
'glCompressedTexSubImage3DARB', 'glCompressedTexSubImage2DARB',
'glCompressedTexSubImage1DARB', 'glGetCompressedTexImageARB',
'PFNGLCOMPRESSEDTEXIMAGE3DARBPROC', 'PFNGLCOMPRESSEDTEXIMAGE2DARBPROC',
'PFNGLCOMPRESSEDTEXIMAGE1DARBPROC', 'PFNGLCOMPRESSEDTEXSUBIMAGE3DARBPROC',
'PFNGLCOMPRESSEDTEXSUBIMAGE2DARBPROC', 'PFNGLCOMPRESSEDTEXSUBIMAGE1DARBPROC',
'PFNGLGETCOMPRESSEDTEXIMAGEARBPROC', 'GL_ARB_texture_border_clamp',
'GL_ARB_point_parameters', 'glPointParameterfARB', 'glPointParameterfvARB',
'PFNGLPOINTPARAMETERFARBPROC', 'PFNGLPOINTPARAMETERFVARBPROC',
'GL_ARB_vertex_blend', 'glWeightbvARB', 'glWeightsvARB', 'glWeightivARB',
'glWeightfvARB', 'glWeightdvARB', 'glWeightubvARB', 'glWeightusvARB',
'glWeightuivARB', 'glWeightPointerARB', 'glVertexBlendARB',
'PFNGLWEIGHTBVARBPROC', 'PFNGLWEIGHTSVARBPROC', 'PFNGLWEIGHTIVARBPROC',
'PFNGLWEIGHTFVARBPROC', 'PFNGLWEIGHTDVARBPROC', 'PFNGLWEIGHTUBVARBPROC',
'PFNGLWEIGHTUSVARBPROC', 'PFNGLWEIGHTUIVARBPROC', 'PFNGLWEIGHTPOINTERARBPROC',
'PFNGLVERTEXBLENDARBPROC', 'GL_ARB_matrix_palette',
'glCurrentPaletteMatrixARB', 'glMatrixIndexubvARB', 'glMatrixIndexusvARB',
'glMatrixIndexuivARB', 'glMatrixIndexPointerARB',
'PFNGLCURRENTPALETTEMATRIXARBPROC', 'PFNGLMATRIXINDEXUBVARBPROC',
'PFNGLMATRIXINDEXUSVARBPROC', 'PFNGLMATRIXINDEXUIVARBPROC',
'PFNGLMATRIXINDEXPOINTERARBPROC', 'GL_ARB_texture_env_combine',
'GL_ARB_texture_env_crossbar', 'GL_ARB_texture_env_dot3',
'GL_ARB_texture_mirrored_repeat', 'GL_ARB_depth_texture', 'GL_ARB_shadow',
'GL_ARB_shadow_ambient', 'GL_ARB_window_pos', 'glWindowPos2dARB',
'glWindowPos2dvARB', 'glWindowPos2fARB', 'glWindowPos2fvARB',
'glWindowPos2iARB', 'glWindowPos2ivARB', 'glWindowPos2sARB',
'glWindowPos2svARB', 'glWindowPos3dARB', 'glWindowPos3dvARB',
'glWindowPos3fARB', 'glWindowPos3fvARB', 'glWindowPos3iARB',
'glWindowPos3ivARB', 'glWindowPos3sARB', 'glWindowPos3svARB',
'PFNGLWINDOWPOS2DARBPROC', 'PFNGLWINDOWPOS2DVARBPROC',
'PFNGLWINDOWPOS2FARBPROC', 'PFNGLWINDOWPOS2FVARBPROC',
'PFNGLWINDOWPOS2IARBPROC', 'PFNGLWINDOWPOS2IVARBPROC',
'PFNGLWINDOWPOS2SARBPROC', 'PFNGLWINDOWPOS2SVARBPROC',
'PFNGLWINDOWPOS3DARBPROC', 'PFNGLWINDOWPOS3DVARBPROC',
'PFNGLWINDOWPOS3FARBPROC', 'PFNGLWINDOWPOS3FVARBPROC',
'PFNGLWINDOWPOS3IARBPROC', 'PFNGLWINDOWPOS3IVARBPROC',
'PFNGLWINDOWPOS3SARBPROC', 'PFNGLWINDOWPOS3SVARBPROC',
'GL_ARB_vertex_program', 'glVertexAttrib1dARB', 'glVertexAttrib1dvARB',
'glVertexAttrib1fARB', 'glVertexAttrib1fvARB', 'glVertexAttrib1sARB',
'glVertexAttrib1svARB', 'glVertexAttrib2dARB', 'glVertexAttrib2dvARB',
'glVertexAttrib2fARB', 'glVertexAttrib2fvARB', 'glVertexAttrib2sARB',
'glVertexAttrib2svARB', 'glVertexAttrib3dARB', 'glVertexAttrib3dvARB',
'glVertexAttrib3fARB', 'glVertexAttrib3fvARB', 'glVertexAttrib3sARB',
'glVertexAttrib3svARB', 'glVertexAttrib4NbvARB', 'glVertexAttrib4NivARB',
'glVertexAttrib4NsvARB', 'glVertexAttrib4NubARB', 'glVertexAttrib4NubvARB',
'glVertexAttrib4NuivARB', 'glVertexAttrib4NusvARB', 'glVertexAttrib4bvARB',
'glVertexAttrib4dARB', 'glVertexAttrib4dvARB', 'glVertexAttrib4fARB',
'glVertexAttrib4fvARB', 'glVertexAttrib4ivARB', 'glVertexAttrib4sARB',
'glVertexAttrib4svARB', 'glVertexAttrib4ubvARB', 'glVertexAttrib4uivARB',
'glVertexAttrib4usvARB', 'glVertexAttribPointerARB',
'glEnableVertexAttribArrayARB', 'glDisableVertexAttribArrayARB',
'glProgramStringARB', 'glBindProgramARB', 'glDeleteProgramsARB',
'glGenProgramsARB', 'glProgramEnvParameter4dARB',
'glProgramEnvParameter4dvARB', 'glProgramEnvParameter4fARB',
'glProgramEnvParameter4fvARB', 'glProgramLocalParameter4dARB',
'glProgramLocalParameter4dvARB', 'glProgramLocalParameter4fARB',
'glProgramLocalParameter4fvARB', 'glGetProgramEnvParameterdvARB',
'glGetProgramEnvParameterfvARB', 'glGetProgramLocalParameterdvARB',
'glGetProgramLocalParameterfvARB', 'glGetProgramivARB',
'glGetProgramStringARB', 'glGetVertexAttribdvARB', 'glGetVertexAttribfvARB',
'glGetVertexAttribivARB', 'glGetVertexAttribPointervARB', 'glIsProgramARB',
'PFNGLVERTEXATTRIB1DARBPROC', 'PFNGLVERTEXATTRIB1DVARBPROC',
'PFNGLVERTEXATTRIB1FARBPROC', 'PFNGLVERTEXATTRIB1FVARBPROC',
'PFNGLVERTEXATTRIB1SARBPROC', 'PFNGLVERTEXATTRIB1SVARBPROC',
'PFNGLVERTEXATTRIB2DARBPROC', 'PFNGLVERTEXATTRIB2DVARBPROC',
'PFNGLVERTEXATTRIB2FARBPROC', 'PFNGLVERTEXATTRIB2FVARBPROC',
'PFNGLVERTEXATTRIB2SARBPROC', 'PFNGLVERTEXATTRIB2SVARBPROC',
'PFNGLVERTEXATTRIB3DARBPROC', 'PFNGLVERTEXATTRIB3DVARBPROC',
'PFNGLVERTEXATTRIB3FARBPROC', 'PFNGLVERTEXATTRIB3FVARBPROC',
'PFNGLVERTEXATTRIB3SARBPROC', 'PFNGLVERTEXATTRIB3SVARBPROC',
'PFNGLVERTEXATTRIB4NBVARBPROC', 'PFNGLVERTEXATTRIB4NIVARBPROC',
'PFNGLVERTEXATTRIB4NSVARBPROC', 'PFNGLVERTEXATTRIB4NUBARBPROC',
'PFNGLVERTEXATTRIB4NUBVARBPROC', 'PFNGLVERTEXATTRIB4NUIVARBPROC',
'PFNGLVERTEXATTRIB4NUSVARBPROC', 'PFNGLVERTEXATTRIB4BVARBPROC',
'PFNGLVERTEXATTRIB4DARBPROC', 'PFNGLVERTEXATTRIB4DVARBPROC',
'PFNGLVERTEXATTRIB4FARBPROC', 'PFNGLVERTEXATTRIB4FVARBPROC',
'PFNGLVERTEXATTRIB4IVARBPROC', 'PFNGLVERTEXATTRIB4SARBPROC',
'PFNGLVERTEXATTRIB4SVARBPROC', 'PFNGLVERTEXATTRIB4UBVARBPROC',
'PFNGLVERTEXATTRIB4UIVARBPROC', 'PFNGLVERTEXATTRIB4USVARBPROC',
'PFNGLVERTEXATTRIBPOINTERARBPROC', 'PFNGLENABLEVERTEXATTRIBARRAYARBPROC',
'PFNGLDISABLEVERTEXATTRIBARRAYARBPROC', 'PFNGLPROGRAMSTRINGARBPROC',
'PFNGLBINDPROGRAMARBPROC', 'PFNGLDELETEPROGRAMSARBPROC',
'PFNGLGENPROGRAMSARBPROC', 'PFNGLPROGRAMENVPARAMETER4DARBPROC',
'PFNGLPROGRAMENVPARAMETER4DVARBPROC', 'PFNGLPROGRAMENVPARAMETER4FARBPROC',
'PFNGLPROGRAMENVPARAMETER4FVARBPROC', 'PFNGLPROGRAMLOCALPARAMETER4DARBPROC',
'PFNGLPROGRAMLOCALPARAMETER4DVARBPROC', 'PFNGLPROGRAMLOCALPARAMETER4FARBPROC',
'PFNGLPROGRAMLOCALPARAMETER4FVARBPROC',
'PFNGLGETPROGRAMENVPARAMETERDVARBPROC',
'PFNGLGETPROGRAMENVPARAMETERFVARBPROC',
'PFNGLGETPROGRAMLOCALPARAMETERDVARBPROC',
'PFNGLGETPROGRAMLOCALPARAMETERFVARBPROC', 'PFNGLGETPROGRAMIVARBPROC',
'PFNGLGETPROGRAMSTRINGARBPROC', 'PFNGLGETVERTEXATTRIBDVARBPROC',
'PFNGLGETVERTEXATTRIBFVARBPROC', 'PFNGLGETVERTEXATTRIBIVARBPROC',
'PFNGLGETVERTEXATTRIBPOINTERVARBPROC', 'PFNGLISPROGRAMARBPROC',
'GL_ARB_fragment_program', 'GL_ARB_vertex_buffer_object', 'glBindBufferARB',
'glDeleteBuffersARB', 'glGenBuffersARB', 'glIsBufferARB', 'glBufferDataARB',
'glBufferSubDataARB', 'glGetBufferSubDataARB', 'glMapBufferARB',
'glUnmapBufferARB', 'glGetBufferParameterivARB', 'glGetBufferPointervARB',
'PFNGLBINDBUFFERARBPROC', 'PFNGLDELETEBUFFERSARBPROC',
'PFNGLGENBUFFERSARBPROC', 'PFNGLISBUFFERARBPROC', 'PFNGLBUFFERDATAARBPROC',
'PFNGLBUFFERSUBDATAARBPROC', 'PFNGLGETBUFFERSUBDATAARBPROC',
'PFNGLMAPBUFFERARBPROC', 'PFNGLUNMAPBUFFERARBPROC',
'PFNGLGETBUFFERPARAMETERIVARBPROC', 'PFNGLGETBUFFERPOINTERVARBPROC',
'GL_ARB_occlusion_query', 'glGenQueriesARB', 'glDeleteQueriesARB',
'glIsQueryARB', 'glBeginQueryARB', 'glEndQueryARB', 'glGetQueryivARB',
'glGetQueryObjectivARB', 'glGetQueryObjectuivARB', 'PFNGLGENQUERIESARBPROC',
'PFNGLDELETEQUERIESARBPROC', 'PFNGLISQUERYARBPROC', 'PFNGLBEGINQUERYARBPROC',
'PFNGLENDQUERYARBPROC', 'PFNGLGETQUERYIVARBPROC',
'PFNGLGETQUERYOBJECTIVARBPROC', 'PFNGLGETQUERYOBJECTUIVARBPROC',
'GL_ARB_shader_objects', 'glDeleteObjectARB', 'glGetHandleARB',
'glDetachObjectARB', 'glCreateShaderObjectARB', 'glShaderSourceARB',
'glCompileShaderARB', 'glCreateProgramObjectARB', 'glAttachObjectARB',
'glLinkProgramARB', 'glUseProgramObjectARB', 'glValidateProgramARB',
'glUniform1fARB', 'glUniform2fARB', 'glUniform3fARB', 'glUniform4fARB',
'glUniform1iARB', 'glUniform2iARB', 'glUniform3iARB', 'glUniform4iARB',
'glUniform1fvARB', 'glUniform2fvARB', 'glUniform3fvARB', 'glUniform4fvARB',
'glUniform1ivARB', 'glUniform2ivARB', 'glUniform3ivARB', 'glUniform4ivARB',
'glUniformMatrix2fvARB', 'glUniformMatrix3fvARB', 'glUniformMatrix4fvARB',
'glGetObjectParameterfvARB', 'glGetObjectParameterivARB', 'glGetInfoLogARB',
'glGetAttachedObjectsARB', 'glGetUniformLocationARB', 'glGetActiveUniformARB',
'glGetUniformfvARB', 'glGetUniformivARB', 'glGetShaderSourceARB',
'PFNGLDELETEOBJECTARBPROC', 'PFNGLGETHANDLEARBPROC',
'PFNGLDETACHOBJECTARBPROC', 'PFNGLCREATESHADEROBJECTARBPROC',
'PFNGLSHADERSOURCEARBPROC', 'PFNGLCOMPILESHADERARBPROC',
'PFNGLCREATEPROGRAMOBJECTARBPROC', 'PFNGLATTACHOBJECTARBPROC',
'PFNGLLINKPROGRAMARBPROC', 'PFNGLUSEPROGRAMOBJECTARBPROC',
'PFNGLVALIDATEPROGRAMARBPROC', 'PFNGLUNIFORM1FARBPROC',
'PFNGLUNIFORM2FARBPROC', 'PFNGLUNIFORM3FARBPROC', 'PFNGLUNIFORM4FARBPROC',
'PFNGLUNIFORM1IARBPROC', 'PFNGLUNIFORM2IARBPROC', 'PFNGLUNIFORM3IARBPROC',
'PFNGLUNIFORM4IARBPROC', 'PFNGLUNIFORM1FVARBPROC', 'PFNGLUNIFORM2FVARBPROC',
'PFNGLUNIFORM3FVARBPROC', 'PFNGLUNIFORM4FVARBPROC', 'PFNGLUNIFORM1IVARBPROC',
'PFNGLUNIFORM2IVARBPROC', 'PFNGLUNIFORM3IVARBPROC', 'PFNGLUNIFORM4IVARBPROC',
'PFNGLUNIFORMMATRIX2FVARBPROC', 'PFNGLUNIFORMMATRIX3FVARBPROC',
'PFNGLUNIFORMMATRIX4FVARBPROC', 'PFNGLGETOBJECTPARAMETERFVARBPROC',
'PFNGLGETOBJECTPARAMETERIVARBPROC', 'PFNGLGETINFOLOGARBPROC',
'PFNGLGETATTACHEDOBJECTSARBPROC', 'PFNGLGETUNIFORMLOCATIONARBPROC',
'PFNGLGETACTIVEUNIFORMARBPROC', 'PFNGLGETUNIFORMFVARBPROC',
'PFNGLGETUNIFORMIVARBPROC', 'PFNGLGETSHADERSOURCEARBPROC',
'GL_ARB_vertex_shader', 'glBindAttribLocationARB', 'glGetActiveAttribARB',
'glGetAttribLocationARB', 'PFNGLBINDATTRIBLOCATIONARBPROC',
'PFNGLGETACTIVEATTRIBARBPROC', 'PFNGLGETATTRIBLOCATIONARBPROC',
'GL_ARB_fragment_shader', 'GL_ARB_shading_language_100',
'GL_ARB_texture_non_power_of_two', 'GL_ARB_point_sprite',
'GL_ARB_fragment_program_shadow', 'GL_ARB_draw_buffers', 'glDrawBuffersARB',
'PFNGLDRAWBUFFERSARBPROC', 'GL_ARB_texture_rectangle',
'GL_ARB_color_buffer_float', 'glClampColorARB', 'PFNGLCLAMPCOLORARBPROC',
'GL_ARB_half_float_pixel', 'GL_ARB_texture_float',
'GL_ARB_pixel_buffer_object', 'GL_EXT_abgr', 'GL_EXT_blend_color',
'glBlendColorEXT', 'PFNGLBLENDCOLOREXTPROC', 'GL_EXT_polygon_offset',
'glPolygonOffsetEXT', 'PFNGLPOLYGONOFFSETEXTPROC', 'GL_EXT_texture',
'GL_EXT_texture3D', 'glTexImage3DEXT', 'glTexSubImage3DEXT',
'PFNGLTEXIMAGE3DEXTPROC', 'PFNGLTEXSUBIMAGE3DEXTPROC',
'GL_SGIS_texture_filter4', 'glGetTexFilterFuncSGIS', 'glTexFilterFuncSGIS',
'PFNGLGETTEXFILTERFUNCSGISPROC', 'PFNGLTEXFILTERFUNCSGISPROC',
'GL_EXT_subtexture', 'glTexSubImage1DEXT', 'glTexSubImage2DEXT',
'PFNGLTEXSUBIMAGE1DEXTPROC', 'PFNGLTEXSUBIMAGE2DEXTPROC',
'GL_EXT_copy_texture', 'glCopyTexImage1DEXT', 'glCopyTexImage2DEXT',
'glCopyTexSubImage1DEXT', 'glCopyTexSubImage2DEXT', 'glCopyTexSubImage3DEXT',
'PFNGLCOPYTEXIMAGE1DEXTPROC', 'PFNGLCOPYTEXIMAGE2DEXTPROC',
'PFNGLCOPYTEXSUBIMAGE1DEXTPROC', 'PFNGLCOPYTEXSUBIMAGE2DEXTPROC',
'PFNGLCOPYTEXSUBIMAGE3DEXTPROC', 'GL_EXT_histogram', 'glGetHistogramEXT',
'glGetHistogramParameterfvEXT', 'glGetHistogramParameterivEXT',
'glGetMinmaxEXT', 'glGetMinmaxParameterfvEXT', 'glGetMinmaxParameterivEXT',
'glHistogramEXT', 'glMinmaxEXT', 'glResetHistogramEXT', 'glResetMinmaxEXT',
'PFNGLGETHISTOGRAMEXTPROC', 'PFNGLGETHISTOGRAMPARAMETERFVEXTPROC',
'PFNGLGETHISTOGRAMPARAMETERIVEXTPROC', 'PFNGLGETMINMAXEXTPROC',
'PFNGLGETMINMAXPARAMETERFVEXTPROC', 'PFNGLGETMINMAXPARAMETERIVEXTPROC',
'PFNGLHISTOGRAMEXTPROC', 'PFNGLMINMAXEXTPROC', 'PFNGLRESETHISTOGRAMEXTPROC',
'PFNGLRESETMINMAXEXTPROC', 'GL_EXT_convolution', 'glConvolutionFilter1DEXT',
'glConvolutionFilter2DEXT', 'glConvolutionParameterfEXT',
'glConvolutionParameterfvEXT', 'glConvolutionParameteriEXT',
'glConvolutionParameterivEXT', 'glCopyConvolutionFilter1DEXT',
'glCopyConvolutionFilter2DEXT', 'glGetConvolutionFilterEXT',
'glGetConvolutionParameterfvEXT', 'glGetConvolutionParameterivEXT',
'glGetSeparableFilterEXT', 'glSeparableFilter2DEXT',
'PFNGLCONVOLUTIONFILTER1DEXTPROC', 'PFNGLCONVOLUTIONFILTER2DEXTPROC',
'PFNGLCONVOLUTIONPARAMETERFEXTPROC', 'PFNGLCONVOLUTIONPARAMETERFVEXTPROC',
'PFNGLCONVOLUTIONPARAMETERIEXTPROC', 'PFNGLCONVOLUTIONPARAMETERIVEXTPROC',
'PFNGLCOPYCONVOLUTIONFILTER1DEXTPROC', 'PFNGLCOPYCONVOLUTIONFILTER2DEXTPROC',
'PFNGLGETCONVOLUTIONFILTEREXTPROC', 'PFNGLGETCONVOLUTIONPARAMETERFVEXTPROC',
'PFNGLGETCONVOLUTIONPARAMETERIVEXTPROC', 'PFNGLGETSEPARABLEFILTEREXTPROC',
'PFNGLSEPARABLEFILTER2DEXTPROC', 'GL_EXT_color_matrix', 'GL_SGI_color_table',
'glColorTableSGI', 'glColorTableParameterfvSGI', 'glColorTableParameterivSGI',
'glCopyColorTableSGI', 'glGetColorTableSGI', 'glGetColorTableParameterfvSGI',
'glGetColorTableParameterivSGI', 'PFNGLCOLORTABLESGIPROC',
'PFNGLCOLORTABLEPARAMETERFVSGIPROC', 'PFNGLCOLORTABLEPARAMETERIVSGIPROC',
'PFNGLCOPYCOLORTABLESGIPROC', 'PFNGLGETCOLORTABLESGIPROC',
'PFNGLGETCOLORTABLEPARAMETERFVSGIPROC',
'PFNGLGETCOLORTABLEPARAMETERIVSGIPROC', 'GL_SGIX_pixel_texture',
'glPixelTexGenSGIX', 'PFNGLPIXELTEXGENSGIXPROC', 'GL_SGIS_pixel_texture',
'glPixelTexGenParameteriSGIS', 'glPixelTexGenParameterivSGIS',
'glPixelTexGenParameterfSGIS', 'glPixelTexGenParameterfvSGIS',
'glGetPixelTexGenParameterivSGIS', 'glGetPixelTexGenParameterfvSGIS',
'PFNGLPIXELTEXGENPARAMETERISGISPROC', 'PFNGLPIXELTEXGENPARAMETERIVSGISPROC',
'PFNGLPIXELTEXGENPARAMETERFSGISPROC', 'PFNGLPIXELTEXGENPARAMETERFVSGISPROC',
'PFNGLGETPIXELTEXGENPARAMETERIVSGISPROC',
'PFNGLGETPIXELTEXGENPARAMETERFVSGISPROC', 'GL_SGIS_texture4D',
'glTexImage4DSGIS', 'glTexSubImage4DSGIS', 'PFNGLTEXIMAGE4DSGISPROC',
'PFNGLTEXSUBIMAGE4DSGISPROC', 'GL_SGI_texture_color_table', 'GL_EXT_cmyka',
'GL_EXT_texture_object', 'glAreTexturesResidentEXT', 'glBindTextureEXT',
'glDeleteTexturesEXT', 'glGenTexturesEXT', 'glIsTextureEXT',
'glPrioritizeTexturesEXT', 'PFNGLARETEXTURESRESIDENTEXTPROC',
'PFNGLBINDTEXTUREEXTPROC', 'PFNGLDELETETEXTURESEXTPROC',
'PFNGLGENTEXTURESEXTPROC', 'PFNGLISTEXTUREEXTPROC',
'PFNGLPRIORITIZETEXTURESEXTPROC', 'GL_SGIS_detail_texture',
'glDetailTexFuncSGIS', 'glGetDetailTexFuncSGIS', 'PFNGLDETAILTEXFUNCSGISPROC',
'PFNGLGETDETAILTEXFUNCSGISPROC', 'GL_SGIS_sharpen_texture',
'glSharpenTexFuncSGIS', 'glGetSharpenTexFuncSGIS',
'PFNGLSHARPENTEXFUNCSGISPROC', 'PFNGLGETSHARPENTEXFUNCSGISPROC',
'GL_EXT_packed_pixels', 'GL_SGIS_texture_lod', 'GL_SGIS_multisample',
'glSampleMaskSGIS', 'glSamplePatternSGIS', 'PFNGLSAMPLEMASKSGISPROC',
'PFNGLSAMPLEPATTERNSGISPROC', 'GL_EXT_rescale_normal', 'GL_EXT_vertex_array',
'glArrayElementEXT', 'glColorPointerEXT', 'glDrawArraysEXT',
'glEdgeFlagPointerEXT', 'glGetPointervEXT', 'glIndexPointerEXT',
'glNormalPointerEXT', 'glTexCoordPointerEXT', 'glVertexPointerEXT',
'PFNGLARRAYELEMENTEXTPROC', 'PFNGLCOLORPOINTEREXTPROC',
'PFNGLDRAWARRAYSEXTPROC', 'PFNGLEDGEFLAGPOINTEREXTPROC',
'PFNGLGETPOINTERVEXTPROC', 'PFNGLINDEXPOINTEREXTPROC',
'PFNGLNORMALPOINTEREXTPROC', 'PFNGLTEXCOORDPOINTEREXTPROC',
'PFNGLVERTEXPOINTEREXTPROC', 'GL_EXT_misc_attribute',
'GL_SGIS_generate_mipmap', 'GL_SGIX_clipmap', 'GL_SGIX_shadow',
'GL_SGIS_texture_edge_clamp', 'GL_SGIS_texture_border_clamp',
'GL_EXT_blend_minmax', 'glBlendEquationEXT', 'PFNGLBLENDEQUATIONEXTPROC',
'GL_EXT_blend_subtract', 'GL_EXT_blend_logic_op', 'GL_SGIX_interlace',
'GL_SGIX_pixel_tiles', 'GL_SGIX_texture_select', 'GL_SGIX_sprite',
'glSpriteParameterfSGIX', 'glSpriteParameterfvSGIX', 'glSpriteParameteriSGIX',
'glSpriteParameterivSGIX', 'PFNGLSPRITEPARAMETERFSGIXPROC',
'PFNGLSPRITEPARAMETERFVSGIXPROC', 'PFNGLSPRITEPARAMETERISGIXPROC',
'PFNGLSPRITEPARAMETERIVSGIXPROC', 'GL_SGIX_texture_multi_buffer',
'GL_EXT_point_parameters', 'glPointParameterfEXT', 'glPointParameterfvEXT',
'PFNGLPOINTPARAMETERFEXTPROC', 'PFNGLPOINTPARAMETERFVEXTPROC',
'GL_SGIS_point_parameters', 'glPointParameterfSGIS', 'glPointParameterfvSGIS',
'PFNGLPOINTPARAMETERFSGISPROC', 'PFNGLPOINTPARAMETERFVSGISPROC',
'GL_SGIX_instruments', 'glGetInstrumentsSGIX', 'glInstrumentsBufferSGIX',
'glPollInstrumentsSGIX', 'glReadInstrumentsSGIX', 'glStartInstrumentsSGIX',
'glStopInstrumentsSGIX', 'PFNGLGETINSTRUMENTSSGIXPROC',
'PFNGLINSTRUMENTSBUFFERSGIXPROC', 'PFNGLPOLLINSTRUMENTSSGIXPROC',
'PFNGLREADINSTRUMENTSSGIXPROC', 'PFNGLSTARTINSTRUMENTSSGIXPROC',
'PFNGLSTOPINSTRUMENTSSGIXPROC', 'GL_SGIX_texture_scale_bias',
'GL_SGIX_framezoom', 'glFrameZoomSGIX', 'PFNGLFRAMEZOOMSGIXPROC',
'GL_SGIX_tag_sample_buffer', 'glTagSampleBufferSGIX',
'PFNGLTAGSAMPLEBUFFERSGIXPROC', 'GL_SGIX_polynomial_ffd',
'glDeformationMap3dSGIX', 'glDeformationMap3fSGIX', 'glDeformSGIX',
'glLoadIdentityDeformationMapSGIX', 'PFNGLDEFORMATIONMAP3DSGIXPROC',
'PFNGLDEFORMATIONMAP3FSGIXPROC', 'PFNGLDEFORMSGIXPROC',
'PFNGLLOADIDENTITYDEFORMATIONMAPSGIXPROC', 'GL_SGIX_reference_plane',
'glReferencePlaneSGIX', 'PFNGLREFERENCEPLANESGIXPROC', 'GL_SGIX_flush_raster',
'glFlushRasterSGIX', 'PFNGLFLUSHRASTERSGIXPROC', 'GL_SGIX_depth_texture',
'GL_SGIS_fog_function', 'glFogFuncSGIS', 'glGetFogFuncSGIS',
'PFNGLFOGFUNCSGISPROC', 'PFNGLGETFOGFUNCSGISPROC', 'GL_SGIX_fog_offset',
'GL_HP_image_transform', 'glImageTransformParameteriHP',
'glImageTransformParameterfHP', 'glImageTransformParameterivHP',
'glImageTransformParameterfvHP', 'glGetImageTransformParameterivHP',
'glGetImageTransformParameterfvHP', 'PFNGLIMAGETRANSFORMPARAMETERIHPPROC',
'PFNGLIMAGETRANSFORMPARAMETERFHPPROC', 'PFNGLIMAGETRANSFORMPARAMETERIVHPPROC',
'PFNGLIMAGETRANSFORMPARAMETERFVHPPROC',
'PFNGLGETIMAGETRANSFORMPARAMETERIVHPPROC',
'PFNGLGETIMAGETRANSFORMPARAMETERFVHPPROC', 'GL_HP_convolution_border_modes',
'GL_SGIX_texture_add_env', 'GL_EXT_color_subtable', 'glColorSubTableEXT',
'glCopyColorSubTableEXT', 'PFNGLCOLORSUBTABLEEXTPROC',
'PFNGLCOPYCOLORSUBTABLEEXTPROC', 'GL_PGI_vertex_hints', 'GL_PGI_misc_hints',
'glHintPGI', 'PFNGLHINTPGIPROC', 'GL_EXT_paletted_texture', 'glColorTableEXT',
'glGetColorTableEXT', 'glGetColorTableParameterivEXT',
'glGetColorTableParameterfvEXT', 'PFNGLCOLORTABLEEXTPROC',
'PFNGLGETCOLORTABLEEXTPROC', 'PFNGLGETCOLORTABLEPARAMETERIVEXTPROC',
'PFNGLGETCOLORTABLEPARAMETERFVEXTPROC', 'GL_EXT_clip_volume_hint',
'GL_SGIX_list_priority', 'glGetListParameterfvSGIX',
'glGetListParameterivSGIX', 'glListParameterfSGIX', 'glListParameterfvSGIX',
'glListParameteriSGIX', 'glListParameterivSGIX',
'PFNGLGETLISTPARAMETERFVSGIXPROC', 'PFNGLGETLISTPARAMETERIVSGIXPROC',
'PFNGLLISTPARAMETERFSGIXPROC', 'PFNGLLISTPARAMETERFVSGIXPROC',
'PFNGLLISTPARAMETERISGIXPROC', 'PFNGLLISTPARAMETERIVSGIXPROC',
'GL_SGIX_ir_instrument1', 'GL_SGIX_calligraphic_fragment',
'GL_SGIX_texture_lod_bias', 'GL_SGIX_shadow_ambient', 'GL_EXT_index_texture',
'GL_EXT_index_material', 'glIndexMaterialEXT', 'PFNGLINDEXMATERIALEXTPROC',
'GL_EXT_index_func', 'glIndexFuncEXT', 'PFNGLINDEXFUNCEXTPROC',
'GL_EXT_index_array_formats', 'GL_EXT_compiled_vertex_array',
'glLockArraysEXT', 'glUnlockArraysEXT', 'PFNGLLOCKARRAYSEXTPROC',
'PFNGLUNLOCKARRAYSEXTPROC', 'GL_EXT_cull_vertex', 'glCullParameterdvEXT',
'glCullParameterfvEXT', 'PFNGLCULLPARAMETERDVEXTPROC',
'PFNGLCULLPARAMETERFVEXTPROC', 'GL_SGIX_ycrcb', 'GL_SGIX_fragment_lighting',
'glFragmentColorMaterialSGIX', 'glFragmentLightfSGIX',
'glFragmentLightfvSGIX', 'glFragmentLightiSGIX', 'glFragmentLightivSGIX',
'glFragmentLightModelfSGIX', 'glFragmentLightModelfvSGIX',
'glFragmentLightModeliSGIX', 'glFragmentLightModelivSGIX',
'glFragmentMaterialfSGIX', 'glFragmentMaterialfvSGIX',
'glFragmentMaterialiSGIX', 'glFragmentMaterialivSGIX',
'glGetFragmentLightfvSGIX', 'glGetFragmentLightivSGIX',
'glGetFragmentMaterialfvSGIX', 'glGetFragmentMaterialivSGIX',
'glLightEnviSGIX', 'PFNGLFRAGMENTCOLORMATERIALSGIXPROC',
'PFNGLFRAGMENTLIGHTFSGIXPROC', 'PFNGLFRAGMENTLIGHTFVSGIXPROC',
'PFNGLFRAGMENTLIGHTISGIXPROC', 'PFNGLFRAGMENTLIGHTIVSGIXPROC',
'PFNGLFRAGMENTLIGHTMODELFSGIXPROC', 'PFNGLFRAGMENTLIGHTMODELFVSGIXPROC',
'PFNGLFRAGMENTLIGHTMODELISGIXPROC', 'PFNGLFRAGMENTLIGHTMODELIVSGIXPROC',
'PFNGLFRAGMENTMATERIALFSGIXPROC', 'PFNGLFRAGMENTMATERIALFVSGIXPROC',
'PFNGLFRAGMENTMATERIALISGIXPROC', 'PFNGLFRAGMENTMATERIALIVSGIXPROC',
'PFNGLGETFRAGMENTLIGHTFVSGIXPROC', 'PFNGLGETFRAGMENTLIGHTIVSGIXPROC',
'PFNGLGETFRAGMENTMATERIALFVSGIXPROC', 'PFNGLGETFRAGMENTMATERIALIVSGIXPROC',
'PFNGLLIGHTENVISGIXPROC', 'GL_IBM_rasterpos_clip', 'GL_HP_texture_lighting',
'GL_EXT_draw_range_elements', 'glDrawRangeElementsEXT',
'PFNGLDRAWRANGEELEMENTSEXTPROC', 'GL_WIN_phong_shading',
'GL_WIN_specular_fog', 'GL_EXT_light_texture', 'glApplyTextureEXT',
'glTextureLightEXT', 'glTextureMaterialEXT', 'PFNGLAPPLYTEXTUREEXTPROC',
'PFNGLTEXTURELIGHTEXTPROC', 'PFNGLTEXTUREMATERIALEXTPROC',
'GL_SGIX_blend_alpha_minmax', 'GL_EXT_bgra', 'GL_SGIX_async',
'glAsyncMarkerSGIX', 'glFinishAsyncSGIX', 'glPollAsyncSGIX',
'glGenAsyncMarkersSGIX', 'glDeleteAsyncMarkersSGIX', 'glIsAsyncMarkerSGIX',
'PFNGLASYNCMARKERSGIXPROC', 'PFNGLFINISHASYNCSGIXPROC',
'PFNGLPOLLASYNCSGIXPROC', 'PFNGLGENASYNCMARKERSSGIXPROC',
'PFNGLDELETEASYNCMARKERSSGIXPROC', 'PFNGLISASYNCMARKERSGIXPROC',
'GL_SGIX_async_pixel', 'GL_SGIX_async_histogram', 'GL_INTEL_parallel_arrays',
'glVertexPointervINTEL', 'glNormalPointervINTEL', 'glColorPointervINTEL',
'glTexCoordPointervINTEL', 'PFNGLVERTEXPOINTERVINTELPROC',
'PFNGLNORMALPOINTERVINTELPROC', 'PFNGLCOLORPOINTERVINTELPROC',
'PFNGLTEXCOORDPOINTERVINTELPROC', 'GL_HP_occlusion_test',
'GL_EXT_pixel_transform', 'glPixelTransformParameteriEXT',
'glPixelTransformParameterfEXT', 'glPixelTransformParameterivEXT',
'glPixelTransformParameterfvEXT', 'PFNGLPIXELTRANSFORMPARAMETERIEXTPROC',
'PFNGLPIXELTRANSFORMPARAMETERFEXTPROC',
'PFNGLPIXELTRANSFORMPARAMETERIVEXTPROC',
'PFNGLPIXELTRANSFORMPARAMETERFVEXTPROC', 'GL_EXT_pixel_transform_color_table',
'GL_EXT_shared_texture_palette', 'GL_EXT_separate_specular_color',
'GL_EXT_secondary_color', 'glSecondaryColor3bEXT', 'glSecondaryColor3bvEXT',
'glSecondaryColor3dEXT', 'glSecondaryColor3dvEXT', 'glSecondaryColor3fEXT',
'glSecondaryColor3fvEXT', 'glSecondaryColor3iEXT', 'glSecondaryColor3ivEXT',
'glSecondaryColor3sEXT', 'glSecondaryColor3svEXT', 'glSecondaryColor3ubEXT',
'glSecondaryColor3ubvEXT', 'glSecondaryColor3uiEXT',
'glSecondaryColor3uivEXT', 'glSecondaryColor3usEXT',
'glSecondaryColor3usvEXT', 'glSecondaryColorPointerEXT',
'PFNGLSECONDARYCOLOR3BEXTPROC', 'PFNGLSECONDARYCOLOR3BVEXTPROC',
'PFNGLSECONDARYCOLOR3DEXTPROC', 'PFNGLSECONDARYCOLOR3DVEXTPROC',
'PFNGLSECONDARYCOLOR3FEXTPROC', 'PFNGLSECONDARYCOLOR3FVEXTPROC',
'PFNGLSECONDARYCOLOR3IEXTPROC', 'PFNGLSECONDARYCOLOR3IVEXTPROC',
'PFNGLSECONDARYCOLOR3SEXTPROC', 'PFNGLSECONDARYCOLOR3SVEXTPROC',
'PFNGLSECONDARYCOLOR3UBEXTPROC', 'PFNGLSECONDARYCOLOR3UBVEXTPROC',
'PFNGLSECONDARYCOLOR3UIEXTPROC', 'PFNGLSECONDARYCOLOR3UIVEXTPROC',
'PFNGLSECONDARYCOLOR3USEXTPROC', 'PFNGLSECONDARYCOLOR3USVEXTPROC',
'PFNGLSECONDARYCOLORPOINTEREXTPROC', 'GL_EXT_texture_perturb_normal',
'glTextureNormalEXT', 'PFNGLTEXTURENORMALEXTPROC', 'GL_EXT_multi_draw_arrays',
'glMultiDrawArraysEXT', 'glMultiDrawElementsEXT',
'PFNGLMULTIDRAWARRAYSEXTPROC', 'PFNGLMULTIDRAWELEMENTSEXTPROC',
'GL_EXT_fog_coord', 'glFogCoordfEXT', 'glFogCoordfvEXT', 'glFogCoorddEXT',
'glFogCoorddvEXT', 'glFogCoordPointerEXT', 'PFNGLFOGCOORDFEXTPROC',
'PFNGLFOGCOORDFVEXTPROC', 'PFNGLFOGCOORDDEXTPROC', 'PFNGLFOGCOORDDVEXTPROC',
'PFNGLFOGCOORDPOINTEREXTPROC', 'GL_REND_screen_coordinates',
'GL_EXT_coordinate_frame', 'glTangent3bEXT', 'glTangent3bvEXT',
'glTangent3dEXT', 'glTangent3dvEXT', 'glTangent3fEXT', 'glTangent3fvEXT',
'glTangent3iEXT', 'glTangent3ivEXT', 'glTangent3sEXT', 'glTangent3svEXT',
'glBinormal3bEXT', 'glBinormal3bvEXT', 'glBinormal3dEXT', 'glBinormal3dvEXT',
'glBinormal3fEXT', 'glBinormal3fvEXT', 'glBinormal3iEXT', 'glBinormal3ivEXT',
'glBinormal3sEXT', 'glBinormal3svEXT', 'glTangentPointerEXT',
'glBinormalPointerEXT', 'PFNGLTANGENT3BEXTPROC', 'PFNGLTANGENT3BVEXTPROC',
'PFNGLTANGENT3DEXTPROC', 'PFNGLTANGENT3DVEXTPROC', 'PFNGLTANGENT3FEXTPROC',
'PFNGLTANGENT3FVEXTPROC', 'PFNGLTANGENT3IEXTPROC', 'PFNGLTANGENT3IVEXTPROC',
'PFNGLTANGENT3SEXTPROC', 'PFNGLTANGENT3SVEXTPROC', 'PFNGLBINORMAL3BEXTPROC',
'PFNGLBINORMAL3BVEXTPROC', 'PFNGLBINORMAL3DEXTPROC',
'PFNGLBINORMAL3DVEXTPROC', 'PFNGLBINORMAL3FEXTPROC',
'PFNGLBINORMAL3FVEXTPROC', 'PFNGLBINORMAL3IEXTPROC',
'PFNGLBINORMAL3IVEXTPROC', 'PFNGLBINORMAL3SEXTPROC',
'PFNGLBINORMAL3SVEXTPROC', 'PFNGLTANGENTPOINTEREXTPROC',
'PFNGLBINORMALPOINTEREXTPROC', 'GL_EXT_texture_env_combine',
'GL_APPLE_specular_vector', 'GL_APPLE_transform_hint', 'GL_SGIX_fog_scale',
'GL_SUNX_constant_data', 'glFinishTextureSUNX', 'PFNGLFINISHTEXTURESUNXPROC',
'GL_SUN_global_alpha', 'glGlobalAlphaFactorbSUN', 'glGlobalAlphaFactorsSUN',
'glGlobalAlphaFactoriSUN', 'glGlobalAlphaFactorfSUN',
'glGlobalAlphaFactordSUN', 'glGlobalAlphaFactorubSUN',
'glGlobalAlphaFactorusSUN', 'glGlobalAlphaFactoruiSUN',
'PFNGLGLOBALALPHAFACTORBSUNPROC', 'PFNGLGLOBALALPHAFACTORSSUNPROC',
'PFNGLGLOBALALPHAFACTORISUNPROC', 'PFNGLGLOBALALPHAFACTORFSUNPROC',
'PFNGLGLOBALALPHAFACTORDSUNPROC', 'PFNGLGLOBALALPHAFACTORUBSUNPROC',
'PFNGLGLOBALALPHAFACTORUSSUNPROC', 'PFNGLGLOBALALPHAFACTORUISUNPROC',
'GL_SUN_triangle_list', 'glReplacementCodeuiSUN', 'glReplacementCodeusSUN',
'glReplacementCodeubSUN', 'glReplacementCodeuivSUN',
'glReplacementCodeusvSUN', 'glReplacementCodeubvSUN',
'glReplacementCodePointerSUN', 'PFNGLREPLACEMENTCODEUISUNPROC',
'PFNGLREPLACEMENTCODEUSSUNPROC', 'PFNGLREPLACEMENTCODEUBSUNPROC',
'PFNGLREPLACEMENTCODEUIVSUNPROC', 'PFNGLREPLACEMENTCODEUSVSUNPROC',
'PFNGLREPLACEMENTCODEUBVSUNPROC', 'PFNGLREPLACEMENTCODEPOINTERSUNPROC',
'GL_SUN_vertex', 'glColor4ubVertex2fSUN', 'glColor4ubVertex2fvSUN',
'glColor4ubVertex3fSUN', 'glColor4ubVertex3fvSUN', 'glColor3fVertex3fSUN',
'glColor3fVertex3fvSUN', 'glNormal3fVertex3fSUN', 'glNormal3fVertex3fvSUN',
'glColor4fNormal3fVertex3fSUN', 'glColor4fNormal3fVertex3fvSUN',
'glTexCoord2fVertex3fSUN', 'glTexCoord2fVertex3fvSUN',
'glTexCoord4fVertex4fSUN', 'glTexCoord4fVertex4fvSUN',
'glTexCoord2fColor4ubVertex3fSUN', 'glTexCoord2fColor4ubVertex3fvSUN',
'glTexCoord2fColor3fVertex3fSUN', 'glTexCoord2fColor3fVertex3fvSUN',
'glTexCoord2fNormal3fVertex3fSUN', 'glTexCoord2fNormal3fVertex3fvSUN',
'glTexCoord2fColor4fNormal3fVertex3fSUN',
'glTexCoord2fColor4fNormal3fVertex3fvSUN',
'glTexCoord4fColor4fNormal3fVertex4fSUN',
'glTexCoord4fColor4fNormal3fVertex4fvSUN', 'glReplacementCodeuiVertex3fSUN',
'glReplacementCodeuiVertex3fvSUN', 'glReplacementCodeuiColor4ubVertex3fSUN',
'glReplacementCodeuiColor4ubVertex3fvSUN',
'glReplacementCodeuiColor3fVertex3fSUN',
'glReplacementCodeuiColor3fVertex3fvSUN',
'glReplacementCodeuiNormal3fVertex3fSUN',
'glReplacementCodeuiNormal3fVertex3fvSUN',
'glReplacementCodeuiColor4fNormal3fVertex3fSUN',
'glReplacementCodeuiColor4fNormal3fVertex3fvSUN',
'glReplacementCodeuiTexCoord2fVertex3fSUN',
'glReplacementCodeuiTexCoord2fVertex3fvSUN',
'glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN',
'glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN',
'glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN',
'glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN',
'PFNGLCOLOR4UBVERTEX2FSUNPROC', 'PFNGLCOLOR4UBVERTEX2FVSUNPROC',
'PFNGLCOLOR4UBVERTEX3FSUNPROC', 'PFNGLCOLOR4UBVERTEX3FVSUNPROC',
'PFNGLCOLOR3FVERTEX3FSUNPROC', 'PFNGLCOLOR3FVERTEX3FVSUNPROC',
'PFNGLNORMAL3FVERTEX3FSUNPROC', 'PFNGLNORMAL3FVERTEX3FVSUNPROC',
'PFNGLCOLOR4FNORMAL3FVERTEX3FSUNPROC', 'PFNGLCOLOR4FNORMAL3FVERTEX3FVSUNPROC',
'PFNGLTEXCOORD2FVERTEX3FSUNPROC', 'PFNGLTEXCOORD2FVERTEX3FVSUNPROC',
'PFNGLTEXCOORD4FVERTEX4FSUNPROC', 'PFNGLTEXCOORD4FVERTEX4FVSUNPROC',
'PFNGLTEXCOORD2FCOLOR4UBVERTEX3FSUNPROC',
'PFNGLTEXCOORD2FCOLOR4UBVERTEX3FVSUNPROC',
'PFNGLTEXCOORD2FCOLOR3FVERTEX3FSUNPROC',
'PFNGLTEXCOORD2FCOLOR3FVERTEX3FVSUNPROC',
'PFNGLTEXCOORD2FNORMAL3FVERTEX3FSUNPROC',
'PFNGLTEXCOORD2FNORMAL3FVERTEX3FVSUNPROC',
'PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC',
'PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC',
'PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FSUNPROC',
'PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FVSUNPROC',
'PFNGLREPLACEMENTCODEUIVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUIVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FVSUNPROC',
'PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC',
'PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC',
'GL_EXT_blend_func_separate', 'glBlendFuncSeparateEXT',
'PFNGLBLENDFUNCSEPARATEEXTPROC', 'GL_INGR_blend_func_separate',
'glBlendFuncSeparateINGR', 'PFNGLBLENDFUNCSEPARATEINGRPROC',
'GL_INGR_color_clamp', 'GL_INGR_interlace_read', 'GL_EXT_stencil_wrap',
'GL_EXT_422_pixels', 'GL_NV_texgen_reflection',
'GL_SUN_convolution_border_modes', 'GL_EXT_texture_env_add',
'GL_EXT_texture_lod_bias', 'GL_EXT_texture_filter_anisotropic',
'GL_EXT_vertex_weighting', 'glVertexWeightfEXT', 'glVertexWeightfvEXT',
'glVertexWeightPointerEXT', 'PFNGLVERTEXWEIGHTFEXTPROC',
'PFNGLVERTEXWEIGHTFVEXTPROC', 'PFNGLVERTEXWEIGHTPOINTEREXTPROC',
'GL_NV_light_max_exponent', 'GL_NV_vertex_array_range',
'glFlushVertexArrayRangeNV', 'glVertexArrayRangeNV',
'PFNGLFLUSHVERTEXARRAYRANGENVPROC', 'PFNGLVERTEXARRAYRANGENVPROC',
'GL_NV_register_combiners', 'glCombinerParameterfvNV',
'glCombinerParameterfNV', 'glCombinerParameterivNV', 'glCombinerParameteriNV',
'glCombinerInputNV', 'glCombinerOutputNV', 'glFinalCombinerInputNV',
'glGetCombinerInputParameterfvNV', 'glGetCombinerInputParameterivNV',
'glGetCombinerOutputParameterfvNV', 'glGetCombinerOutputParameterivNV',
'glGetFinalCombinerInputParameterfvNV',
'glGetFinalCombinerInputParameterivNV', 'PFNGLCOMBINERPARAMETERFVNVPROC',
'PFNGLCOMBINERPARAMETERFNVPROC', 'PFNGLCOMBINERPARAMETERIVNVPROC',
'PFNGLCOMBINERPARAMETERINVPROC', 'PFNGLCOMBINERINPUTNVPROC',
'PFNGLCOMBINEROUTPUTNVPROC', 'PFNGLFINALCOMBINERINPUTNVPROC',
'PFNGLGETCOMBINERINPUTPARAMETERFVNVPROC',
'PFNGLGETCOMBINERINPUTPARAMETERIVNVPROC',
'PFNGLGETCOMBINEROUTPUTPARAMETERFVNVPROC',
'PFNGLGETCOMBINEROUTPUTPARAMETERIVNVPROC',
'PFNGLGETFINALCOMBINERINPUTPARAMETERFVNVPROC',
'PFNGLGETFINALCOMBINERINPUTPARAMETERIVNVPROC', 'GL_NV_fog_distance',
'GL_NV_texgen_emboss', 'GL_NV_blend_square', 'GL_NV_texture_env_combine4',
'GL_MESA_resize_buffers', 'glResizeBuffersMESA', 'PFNGLRESIZEBUFFERSMESAPROC',
'GL_MESA_window_pos', 'glWindowPos2dMESA', 'glWindowPos2dvMESA',
'glWindowPos2fMESA', 'glWindowPos2fvMESA', 'glWindowPos2iMESA',
'glWindowPos2ivMESA', 'glWindowPos2sMESA', 'glWindowPos2svMESA',
'glWindowPos3dMESA', 'glWindowPos3dvMESA', 'glWindowPos3fMESA',
'glWindowPos3fvMESA', 'glWindowPos3iMESA', 'glWindowPos3ivMESA',
'glWindowPos3sMESA', 'glWindowPos3svMESA', 'glWindowPos4dMESA',
'glWindowPos4dvMESA', 'glWindowPos4fMESA', 'glWindowPos4fvMESA',
'glWindowPos4iMESA', 'glWindowPos4ivMESA', 'glWindowPos4sMESA',
'glWindowPos4svMESA', 'PFNGLWINDOWPOS2DMESAPROC', 'PFNGLWINDOWPOS2DVMESAPROC',
'PFNGLWINDOWPOS2FMESAPROC', 'PFNGLWINDOWPOS2FVMESAPROC',
'PFNGLWINDOWPOS2IMESAPROC', 'PFNGLWINDOWPOS2IVMESAPROC',
'PFNGLWINDOWPOS2SMESAPROC', 'PFNGLWINDOWPOS2SVMESAPROC',
'PFNGLWINDOWPOS3DMESAPROC', 'PFNGLWINDOWPOS3DVMESAPROC',
'PFNGLWINDOWPOS3FMESAPROC', 'PFNGLWINDOWPOS3FVMESAPROC',
'PFNGLWINDOWPOS3IMESAPROC', 'PFNGLWINDOWPOS3IVMESAPROC',
'PFNGLWINDOWPOS3SMESAPROC', 'PFNGLWINDOWPOS3SVMESAPROC',
'PFNGLWINDOWPOS4DMESAPROC', 'PFNGLWINDOWPOS4DVMESAPROC',
'PFNGLWINDOWPOS4FMESAPROC', 'PFNGLWINDOWPOS4FVMESAPROC',
'PFNGLWINDOWPOS4IMESAPROC', 'PFNGLWINDOWPOS4IVMESAPROC',
'PFNGLWINDOWPOS4SMESAPROC', 'PFNGLWINDOWPOS4SVMESAPROC', 'GL_IBM_cull_vertex',
'GL_IBM_multimode_draw_arrays', 'glMultiModeDrawArraysIBM',
'glMultiModeDrawElementsIBM', 'PFNGLMULTIMODEDRAWARRAYSIBMPROC',
'PFNGLMULTIMODEDRAWELEMENTSIBMPROC', 'GL_IBM_vertex_array_lists',
'glColorPointerListIBM', 'glSecondaryColorPointerListIBM',
'glEdgeFlagPointerListIBM', 'glFogCoordPointerListIBM',
'glIndexPointerListIBM', 'glNormalPointerListIBM', 'glTexCoordPointerListIBM',
'glVertexPointerListIBM', 'PFNGLCOLORPOINTERLISTIBMPROC',
'PFNGLSECONDARYCOLORPOINTERLISTIBMPROC', 'PFNGLEDGEFLAGPOINTERLISTIBMPROC',
'PFNGLFOGCOORDPOINTERLISTIBMPROC', 'PFNGLINDEXPOINTERLISTIBMPROC',
'PFNGLNORMALPOINTERLISTIBMPROC', 'PFNGLTEXCOORDPOINTERLISTIBMPROC',
'PFNGLVERTEXPOINTERLISTIBMPROC', 'GL_SGIX_subsample', 'GL_SGIX_ycrcba',
'GL_SGIX_ycrcb_subsample', 'GL_SGIX_depth_pass_instrument',
'GL_3DFX_texture_compression_FXT1', 'GL_3DFX_multisample', 'GL_3DFX_tbuffer',
'glTbufferMask3DFX', 'PFNGLTBUFFERMASK3DFXPROC', 'GL_EXT_multisample',
'glSampleMaskEXT', 'glSamplePatternEXT', 'PFNGLSAMPLEMASKEXTPROC',
'PFNGLSAMPLEPATTERNEXTPROC', 'GL_SGIX_vertex_preclip',
'GL_SGIX_convolution_accuracy', 'GL_SGIX_resample',
'GL_SGIS_point_line_texgen', 'GL_SGIS_texture_color_mask',
'glTextureColorMaskSGIS', 'PFNGLTEXTURECOLORMASKSGISPROC',
'GL_SGIX_igloo_interface', 'glIglooInterfaceSGIX',
'PFNGLIGLOOINTERFACESGIXPROC', 'GL_EXT_texture_env_dot3',
'GL_ATI_texture_mirror_once', 'GL_NV_fence', 'glDeleteFencesNV',
'glGenFencesNV', 'glIsFenceNV', 'glTestFenceNV', 'glGetFenceivNV',
'glFinishFenceNV', 'glSetFenceNV', 'PFNGLDELETEFENCESNVPROC',
'PFNGLGENFENCESNVPROC', 'PFNGLISFENCENVPROC', 'PFNGLTESTFENCENVPROC',
'PFNGLGETFENCEIVNVPROC', 'PFNGLFINISHFENCENVPROC', 'PFNGLSETFENCENVPROC',
'GL_NV_evaluators', 'glMapControlPointsNV', 'glMapParameterivNV',
'glMapParameterfvNV', 'glGetMapControlPointsNV', 'glGetMapParameterivNV',
'glGetMapParameterfvNV', 'glGetMapAttribParameterivNV',
'glGetMapAttribParameterfvNV', 'glEvalMapsNV', 'PFNGLMAPCONTROLPOINTSNVPROC',
'PFNGLMAPPARAMETERIVNVPROC', 'PFNGLMAPPARAMETERFVNVPROC',
'PFNGLGETMAPCONTROLPOINTSNVPROC', 'PFNGLGETMAPPARAMETERIVNVPROC',
'PFNGLGETMAPPARAMETERFVNVPROC', 'PFNGLGETMAPATTRIBPARAMETERIVNVPROC',
'PFNGLGETMAPATTRIBPARAMETERFVNVPROC', 'PFNGLEVALMAPSNVPROC',
'GL_NV_packed_depth_stencil', 'GL_NV_register_combiners2',
'glCombinerStageParameterfvNV', 'glGetCombinerStageParameterfvNV',
'PFNGLCOMBINERSTAGEPARAMETERFVNVPROC',
'PFNGLGETCOMBINERSTAGEPARAMETERFVNVPROC', 'GL_NV_texture_compression_vtc',
'GL_NV_texture_rectangle', 'GL_NV_texture_shader', 'GL_NV_texture_shader2',
'GL_NV_vertex_array_range2', 'GL_NV_vertex_program',
'glAreProgramsResidentNV', 'glBindProgramNV', 'glDeleteProgramsNV',
'glExecuteProgramNV', 'glGenProgramsNV', 'glGetProgramParameterdvNV',
'glGetProgramParameterfvNV', 'glGetProgramivNV', 'glGetProgramStringNV',
'glGetTrackMatrixivNV', 'glGetVertexAttribdvNV', 'glGetVertexAttribfvNV',
'glGetVertexAttribivNV', 'glGetVertexAttribPointervNV', 'glIsProgramNV',
'glLoadProgramNV', 'glProgramParameter4dNV', 'glProgramParameter4dvNV',
'glProgramParameter4fNV', 'glProgramParameter4fvNV',
'glProgramParameters4dvNV', 'glProgramParameters4fvNV',
'glRequestResidentProgramsNV', 'glTrackMatrixNV', 'glVertexAttribPointerNV',
'glVertexAttrib1dNV', 'glVertexAttrib1dvNV', 'glVertexAttrib1fNV',
'glVertexAttrib1fvNV', 'glVertexAttrib1sNV', 'glVertexAttrib1svNV',
'glVertexAttrib2dNV', 'glVertexAttrib2dvNV', 'glVertexAttrib2fNV',
'glVertexAttrib2fvNV', 'glVertexAttrib2sNV', 'glVertexAttrib2svNV',
'glVertexAttrib3dNV', 'glVertexAttrib3dvNV', 'glVertexAttrib3fNV',
'glVertexAttrib3fvNV', 'glVertexAttrib3sNV', 'glVertexAttrib3svNV',
'glVertexAttrib4dNV', 'glVertexAttrib4dvNV', 'glVertexAttrib4fNV',
'glVertexAttrib4fvNV', 'glVertexAttrib4sNV', 'glVertexAttrib4svNV',
'glVertexAttrib4ubNV', 'glVertexAttrib4ubvNV', 'glVertexAttribs1dvNV',
'glVertexAttribs1fvNV', 'glVertexAttribs1svNV', 'glVertexAttribs2dvNV',
'glVertexAttribs2fvNV', 'glVertexAttribs2svNV', 'glVertexAttribs3dvNV',
'glVertexAttribs3fvNV', 'glVertexAttribs3svNV', 'glVertexAttribs4dvNV',
'glVertexAttribs4fvNV', 'glVertexAttribs4svNV', 'glVertexAttribs4ubvNV',
'PFNGLAREPROGRAMSRESIDENTNVPROC', 'PFNGLBINDPROGRAMNVPROC',
'PFNGLDELETEPROGRAMSNVPROC', 'PFNGLEXECUTEPROGRAMNVPROC',
'PFNGLGENPROGRAMSNVPROC', 'PFNGLGETPROGRAMPARAMETERDVNVPROC',
'PFNGLGETPROGRAMPARAMETERFVNVPROC', 'PFNGLGETPROGRAMIVNVPROC',
'PFNGLGETPROGRAMSTRINGNVPROC', 'PFNGLGETTRACKMATRIXIVNVPROC',
'PFNGLGETVERTEXATTRIBDVNVPROC', 'PFNGLGETVERTEXATTRIBFVNVPROC',
'PFNGLGETVERTEXATTRIBIVNVPROC', 'PFNGLGETVERTEXATTRIBPOINTERVNVPROC',
'PFNGLISPROGRAMNVPROC', 'PFNGLLOADPROGRAMNVPROC',
'PFNGLPROGRAMPARAMETER4DNVPROC', 'PFNGLPROGRAMPARAMETER4DVNVPROC',
'PFNGLPROGRAMPARAMETER4FNVPROC', 'PFNGLPROGRAMPARAMETER4FVNVPROC',
'PFNGLPROGRAMPARAMETERS4DVNVPROC', 'PFNGLPROGRAMPARAMETERS4FVNVPROC',
'PFNGLREQUESTRESIDENTPROGRAMSNVPROC', 'PFNGLTRACKMATRIXNVPROC',
'PFNGLVERTEXATTRIBPOINTERNVPROC', 'PFNGLVERTEXATTRIB1DNVPROC',
'PFNGLVERTEXATTRIB1DVNVPROC', 'PFNGLVERTEXATTRIB1FNVPROC',
'PFNGLVERTEXATTRIB1FVNVPROC', 'PFNGLVERTEXATTRIB1SNVPROC',
'PFNGLVERTEXATTRIB1SVNVPROC', 'PFNGLVERTEXATTRIB2DNVPROC',
'PFNGLVERTEXATTRIB2DVNVPROC', 'PFNGLVERTEXATTRIB2FNVPROC',
'PFNGLVERTEXATTRIB2FVNVPROC', 'PFNGLVERTEXATTRIB2SNVPROC',
'PFNGLVERTEXATTRIB2SVNVPROC', 'PFNGLVERTEXATTRIB3DNVPROC',
'PFNGLVERTEXATTRIB3DVNVPROC', 'PFNGLVERTEXATTRIB3FNVPROC',
'PFNGLVERTEXATTRIB3FVNVPROC', 'PFNGLVERTEXATTRIB3SNVPROC',
'PFNGLVERTEXATTRIB3SVNVPROC', 'PFNGLVERTEXATTRIB4DNVPROC',
'PFNGLVERTEXATTRIB4DVNVPROC', 'PFNGLVERTEXATTRIB4FNVPROC',
'PFNGLVERTEXATTRIB4FVNVPROC', 'PFNGLVERTEXATTRIB4SNVPROC',
'PFNGLVERTEXATTRIB4SVNVPROC', 'PFNGLVERTEXATTRIB4UBNVPROC',
'PFNGLVERTEXATTRIB4UBVNVPROC', 'PFNGLVERTEXATTRIBS1DVNVPROC',
'PFNGLVERTEXATTRIBS1FVNVPROC', 'PFNGLVERTEXATTRIBS1SVNVPROC',
'PFNGLVERTEXATTRIBS2DVNVPROC', 'PFNGLVERTEXATTRIBS2FVNVPROC',
'PFNGLVERTEXATTRIBS2SVNVPROC', 'PFNGLVERTEXATTRIBS3DVNVPROC',
'PFNGLVERTEXATTRIBS3FVNVPROC', 'PFNGLVERTEXATTRIBS3SVNVPROC',
'PFNGLVERTEXATTRIBS4DVNVPROC', 'PFNGLVERTEXATTRIBS4FVNVPROC',
'PFNGLVERTEXATTRIBS4SVNVPROC', 'PFNGLVERTEXATTRIBS4UBVNVPROC',
'GL_SGIX_texture_coordinate_clamp', 'GL_SGIX_scalebias_hint',
'GL_OML_interlace', 'GL_OML_subsample', 'GL_OML_resample',
'GL_NV_copy_depth_to_color', 'GL_ATI_envmap_bumpmap',
'glTexBumpParameterivATI', 'glTexBumpParameterfvATI',
'glGetTexBumpParameterivATI', 'glGetTexBumpParameterfvATI',
'PFNGLTEXBUMPPARAMETERIVATIPROC', 'PFNGLTEXBUMPPARAMETERFVATIPROC',
'PFNGLGETTEXBUMPPARAMETERIVATIPROC', 'PFNGLGETTEXBUMPPARAMETERFVATIPROC',
'GL_ATI_fragment_shader', 'glGenFragmentShadersATI',
'glBindFragmentShaderATI', 'glDeleteFragmentShaderATI',
'glBeginFragmentShaderATI', 'glEndFragmentShaderATI', 'glPassTexCoordATI',
'glSampleMapATI', 'glColorFragmentOp1ATI', 'glColorFragmentOp2ATI',
'glColorFragmentOp3ATI', 'glAlphaFragmentOp1ATI', 'glAlphaFragmentOp2ATI',
'glAlphaFragmentOp3ATI', 'glSetFragmentShaderConstantATI',
'PFNGLGENFRAGMENTSHADERSATIPROC', 'PFNGLBINDFRAGMENTSHADERATIPROC',
'PFNGLDELETEFRAGMENTSHADERATIPROC', 'PFNGLBEGINFRAGMENTSHADERATIPROC',
'PFNGLENDFRAGMENTSHADERATIPROC', 'PFNGLPASSTEXCOORDATIPROC',
'PFNGLSAMPLEMAPATIPROC', 'PFNGLCOLORFRAGMENTOP1ATIPROC',
'PFNGLCOLORFRAGMENTOP2ATIPROC', 'PFNGLCOLORFRAGMENTOP3ATIPROC',
'PFNGLALPHAFRAGMENTOP1ATIPROC', 'PFNGLALPHAFRAGMENTOP2ATIPROC',
'PFNGLALPHAFRAGMENTOP3ATIPROC', 'PFNGLSETFRAGMENTSHADERCONSTANTATIPROC',
'GL_ATI_pn_triangles', 'glPNTrianglesiATI', 'glPNTrianglesfATI',
'PFNGLPNTRIANGLESIATIPROC', 'PFNGLPNTRIANGLESFATIPROC',
'GL_ATI_vertex_array_object', 'glNewObjectBufferATI', 'glIsObjectBufferATI',
'glUpdateObjectBufferATI', 'glGetObjectBufferfvATI', 'glGetObjectBufferivATI',
'glFreeObjectBufferATI', 'glArrayObjectATI', 'glGetArrayObjectfvATI',
'glGetArrayObjectivATI', 'glVariantArrayObjectATI',
'glGetVariantArrayObjectfvATI', 'glGetVariantArrayObjectivATI',
'PFNGLNEWOBJECTBUFFERATIPROC', 'PFNGLISOBJECTBUFFERATIPROC',
'PFNGLUPDATEOBJECTBUFFERATIPROC', 'PFNGLGETOBJECTBUFFERFVATIPROC',
'PFNGLGETOBJECTBUFFERIVATIPROC', 'PFNGLFREEOBJECTBUFFERATIPROC',
'PFNGLARRAYOBJECTATIPROC', 'PFNGLGETARRAYOBJECTFVATIPROC',
'PFNGLGETARRAYOBJECTIVATIPROC', 'PFNGLVARIANTARRAYOBJECTATIPROC',
'PFNGLGETVARIANTARRAYOBJECTFVATIPROC', 'PFNGLGETVARIANTARRAYOBJECTIVATIPROC',
'GL_EXT_vertex_shader', 'glBeginVertexShaderEXT', 'glEndVertexShaderEXT',
'glBindVertexShaderEXT', 'glGenVertexShadersEXT', 'glDeleteVertexShaderEXT',
'glShaderOp1EXT', 'glShaderOp2EXT', 'glShaderOp3EXT', 'glSwizzleEXT',
'glWriteMaskEXT', 'glInsertComponentEXT', 'glExtractComponentEXT',
'glGenSymbolsEXT', 'glSetInvariantEXT', 'glSetLocalConstantEXT',
'glVariantbvEXT', 'glVariantsvEXT', 'glVariantivEXT', 'glVariantfvEXT',
'glVariantdvEXT', 'glVariantubvEXT', 'glVariantusvEXT', 'glVariantuivEXT',
'glVariantPointerEXT', 'glEnableVariantClientStateEXT',
'glDisableVariantClientStateEXT', 'glBindLightParameterEXT',
'glBindMaterialParameterEXT', 'glBindTexGenParameterEXT',
'glBindTextureUnitParameterEXT', 'glBindParameterEXT',
'glIsVariantEnabledEXT', 'glGetVariantBooleanvEXT', 'glGetVariantIntegervEXT',
'glGetVariantFloatvEXT', 'glGetVariantPointervEXT',
'glGetInvariantBooleanvEXT', 'glGetInvariantIntegervEXT',
'glGetInvariantFloatvEXT', 'glGetLocalConstantBooleanvEXT',
'glGetLocalConstantIntegervEXT', 'glGetLocalConstantFloatvEXT',
'PFNGLBEGINVERTEXSHADEREXTPROC', 'PFNGLENDVERTEXSHADEREXTPROC',
'PFNGLBINDVERTEXSHADEREXTPROC', 'PFNGLGENVERTEXSHADERSEXTPROC',
'PFNGLDELETEVERTEXSHADEREXTPROC', 'PFNGLSHADEROP1EXTPROC',
'PFNGLSHADEROP2EXTPROC', 'PFNGLSHADEROP3EXTPROC', 'PFNGLSWIZZLEEXTPROC',
'PFNGLWRITEMASKEXTPROC', 'PFNGLINSERTCOMPONENTEXTPROC',
'PFNGLEXTRACTCOMPONENTEXTPROC', 'PFNGLGENSYMBOLSEXTPROC',
'PFNGLSETINVARIANTEXTPROC', 'PFNGLSETLOCALCONSTANTEXTPROC',
'PFNGLVARIANTBVEXTPROC', 'PFNGLVARIANTSVEXTPROC', 'PFNGLVARIANTIVEXTPROC',
'PFNGLVARIANTFVEXTPROC', 'PFNGLVARIANTDVEXTPROC', 'PFNGLVARIANTUBVEXTPROC',
'PFNGLVARIANTUSVEXTPROC', 'PFNGLVARIANTUIVEXTPROC',
'PFNGLVARIANTPOINTEREXTPROC', 'PFNGLENABLEVARIANTCLIENTSTATEEXTPROC',
'PFNGLDISABLEVARIANTCLIENTSTATEEXTPROC', 'PFNGLBINDLIGHTPARAMETEREXTPROC',
'PFNGLBINDMATERIALPARAMETEREXTPROC', 'PFNGLBINDTEXGENPARAMETEREXTPROC',
'PFNGLBINDTEXTUREUNITPARAMETEREXTPROC', 'PFNGLBINDPARAMETEREXTPROC',
'PFNGLISVARIANTENABLEDEXTPROC', 'PFNGLGETVARIANTBOOLEANVEXTPROC',
'PFNGLGETVARIANTINTEGERVEXTPROC', 'PFNGLGETVARIANTFLOATVEXTPROC',
'PFNGLGETVARIANTPOINTERVEXTPROC', 'PFNGLGETINVARIANTBOOLEANVEXTPROC',
'PFNGLGETINVARIANTINTEGERVEXTPROC', 'PFNGLGETINVARIANTFLOATVEXTPROC',
'PFNGLGETLOCALCONSTANTBOOLEANVEXTPROC',
'PFNGLGETLOCALCONSTANTINTEGERVEXTPROC', 'PFNGLGETLOCALCONSTANTFLOATVEXTPROC',
'GL_ATI_vertex_streams', 'glVertexStream1sATI', 'glVertexStream1svATI',
'glVertexStream1iATI', 'glVertexStream1ivATI', 'glVertexStream1fATI',
'glVertexStream1fvATI', 'glVertexStream1dATI', 'glVertexStream1dvATI',
'glVertexStream2sATI', 'glVertexStream2svATI', 'glVertexStream2iATI',
'glVertexStream2ivATI', 'glVertexStream2fATI', 'glVertexStream2fvATI',
'glVertexStream2dATI', 'glVertexStream2dvATI', 'glVertexStream3sATI',
'glVertexStream3svATI', 'glVertexStream3iATI', 'glVertexStream3ivATI',
'glVertexStream3fATI', 'glVertexStream3fvATI', 'glVertexStream3dATI',
'glVertexStream3dvATI', 'glVertexStream4sATI', 'glVertexStream4svATI',
'glVertexStream4iATI', 'glVertexStream4ivATI', 'glVertexStream4fATI',
'glVertexStream4fvATI', 'glVertexStream4dATI', 'glVertexStream4dvATI',
'glNormalStream3bATI', 'glNormalStream3bvATI', 'glNormalStream3sATI',
'glNormalStream3svATI', 'glNormalStream3iATI', 'glNormalStream3ivATI',
'glNormalStream3fATI', 'glNormalStream3fvATI', 'glNormalStream3dATI',
'glNormalStream3dvATI', 'glClientActiveVertexStreamATI',
'glVertexBlendEnviATI', 'glVertexBlendEnvfATI', 'PFNGLVERTEXSTREAM1SATIPROC',
'PFNGLVERTEXSTREAM1SVATIPROC', 'PFNGLVERTEXSTREAM1IATIPROC',
'PFNGLVERTEXSTREAM1IVATIPROC', 'PFNGLVERTEXSTREAM1FATIPROC',
'PFNGLVERTEXSTREAM1FVATIPROC', 'PFNGLVERTEXSTREAM1DATIPROC',
'PFNGLVERTEXSTREAM1DVATIPROC', 'PFNGLVERTEXSTREAM2SATIPROC',
'PFNGLVERTEXSTREAM2SVATIPROC', 'PFNGLVERTEXSTREAM2IATIPROC',
'PFNGLVERTEXSTREAM2IVATIPROC', 'PFNGLVERTEXSTREAM2FATIPROC',
'PFNGLVERTEXSTREAM2FVATIPROC', 'PFNGLVERTEXSTREAM2DATIPROC',
'PFNGLVERTEXSTREAM2DVATIPROC', 'PFNGLVERTEXSTREAM3SATIPROC',
'PFNGLVERTEXSTREAM3SVATIPROC', 'PFNGLVERTEXSTREAM3IATIPROC',
'PFNGLVERTEXSTREAM3IVATIPROC', 'PFNGLVERTEXSTREAM3FATIPROC',
'PFNGLVERTEXSTREAM3FVATIPROC', 'PFNGLVERTEXSTREAM3DATIPROC',
'PFNGLVERTEXSTREAM3DVATIPROC', 'PFNGLVERTEXSTREAM4SATIPROC',
'PFNGLVERTEXSTREAM4SVATIPROC', 'PFNGLVERTEXSTREAM4IATIPROC',
'PFNGLVERTEXSTREAM4IVATIPROC', 'PFNGLVERTEXSTREAM4FATIPROC',
'PFNGLVERTEXSTREAM4FVATIPROC', 'PFNGLVERTEXSTREAM4DATIPROC',
'PFNGLVERTEXSTREAM4DVATIPROC', 'PFNGLNORMALSTREAM3BATIPROC',
'PFNGLNORMALSTREAM3BVATIPROC', 'PFNGLNORMALSTREAM3SATIPROC',
'PFNGLNORMALSTREAM3SVATIPROC', 'PFNGLNORMALSTREAM3IATIPROC',
'PFNGLNORMALSTREAM3IVATIPROC', 'PFNGLNORMALSTREAM3FATIPROC',
'PFNGLNORMALSTREAM3FVATIPROC', 'PFNGLNORMALSTREAM3DATIPROC',
'PFNGLNORMALSTREAM3DVATIPROC', 'PFNGLCLIENTACTIVEVERTEXSTREAMATIPROC',
'PFNGLVERTEXBLENDENVIATIPROC', 'PFNGLVERTEXBLENDENVFATIPROC',
'GL_ATI_element_array', 'glElementPointerATI', 'glDrawElementArrayATI',
'glDrawRangeElementArrayATI', 'PFNGLELEMENTPOINTERATIPROC',
'PFNGLDRAWELEMENTARRAYATIPROC', 'PFNGLDRAWRANGEELEMENTARRAYATIPROC',
'GL_SUN_mesh_array', 'glDrawMeshArraysSUN', 'PFNGLDRAWMESHARRAYSSUNPROC',
'GL_SUN_slice_accum', 'GL_NV_multisample_filter_hint', 'GL_NV_depth_clamp',
'GL_NV_occlusion_query', 'glGenOcclusionQueriesNV',
'glDeleteOcclusionQueriesNV', 'glIsOcclusionQueryNV',
'glBeginOcclusionQueryNV', 'glEndOcclusionQueryNV', 'glGetOcclusionQueryivNV',
'glGetOcclusionQueryuivNV', 'PFNGLGENOCCLUSIONQUERIESNVPROC',
'PFNGLDELETEOCCLUSIONQUERIESNVPROC', 'PFNGLISOCCLUSIONQUERYNVPROC',
'PFNGLBEGINOCCLUSIONQUERYNVPROC', 'PFNGLENDOCCLUSIONQUERYNVPROC',
'PFNGLGETOCCLUSIONQUERYIVNVPROC', 'PFNGLGETOCCLUSIONQUERYUIVNVPROC',
'GL_NV_point_sprite', 'glPointParameteriNV', 'glPointParameterivNV',
'PFNGLPOINTPARAMETERINVPROC', 'PFNGLPOINTPARAMETERIVNVPROC',
'GL_NV_texture_shader3', 'GL_NV_vertex_program1_1', 'GL_EXT_shadow_funcs',
'GL_EXT_stencil_two_side', 'glActiveStencilFaceEXT',
'PFNGLACTIVESTENCILFACEEXTPROC', 'GL_ATI_text_fragment_shader',
'GL_APPLE_client_storage', 'GL_APPLE_element_array', 'glElementPointerAPPLE',
'glDrawElementArrayAPPLE', 'glDrawRangeElementArrayAPPLE',
'glMultiDrawElementArrayAPPLE', 'glMultiDrawRangeElementArrayAPPLE',
'PFNGLELEMENTPOINTERAPPLEPROC', 'PFNGLDRAWELEMENTARRAYAPPLEPROC',
'PFNGLDRAWRANGEELEMENTARRAYAPPLEPROC', 'PFNGLMULTIDRAWELEMENTARRAYAPPLEPROC',
'PFNGLMULTIDRAWRANGEELEMENTARRAYAPPLEPROC', 'GL_APPLE_fence',
'glGenFencesAPPLE', 'glDeleteFencesAPPLE', 'glSetFenceAPPLE',
'glIsFenceAPPLE', 'glTestFenceAPPLE', 'glFinishFenceAPPLE',
'glTestObjectAPPLE', 'glFinishObjectAPPLE', 'PFNGLGENFENCESAPPLEPROC',
'PFNGLDELETEFENCESAPPLEPROC', 'PFNGLSETFENCEAPPLEPROC',
'PFNGLISFENCEAPPLEPROC', 'PFNGLTESTFENCEAPPLEPROC',
'PFNGLFINISHFENCEAPPLEPROC', 'PFNGLTESTOBJECTAPPLEPROC',
'PFNGLFINISHOBJECTAPPLEPROC', 'GL_APPLE_vertex_array_object',
'glBindVertexArrayAPPLE', 'glDeleteVertexArraysAPPLE',
'glGenVertexArraysAPPLE', 'glIsVertexArrayAPPLE',
'PFNGLBINDVERTEXARRAYAPPLEPROC', 'PFNGLDELETEVERTEXARRAYSAPPLEPROC',
'PFNGLGENVERTEXARRAYSAPPLEPROC', 'PFNGLISVERTEXARRAYAPPLEPROC',
'GL_APPLE_vertex_array_range', 'glVertexArrayRangeAPPLE',
'glFlushVertexArrayRangeAPPLE', 'glVertexArrayParameteriAPPLE',
'PFNGLVERTEXARRAYRANGEAPPLEPROC', 'PFNGLFLUSHVERTEXARRAYRANGEAPPLEPROC',
'PFNGLVERTEXARRAYPARAMETERIAPPLEPROC', 'GL_APPLE_ycbcr_422', 'GL_S3_s3tc',
'GL_ATI_draw_buffers', 'glDrawBuffersATI', 'PFNGLDRAWBUFFERSATIPROC',
'GL_ATI_pixel_format_float', 'GL_ATI_texture_env_combine3',
'GL_ATI_texture_float', 'GL_NV_float_buffer', 'GL_NV_fragment_program',
'glProgramNamedParameter4fNV', 'glProgramNamedParameter4dNV',
'glProgramNamedParameter4fvNV', 'glProgramNamedParameter4dvNV',
'glGetProgramNamedParameterfvNV', 'glGetProgramNamedParameterdvNV',
'PFNGLPROGRAMNAMEDPARAMETER4FNVPROC', 'PFNGLPROGRAMNAMEDPARAMETER4DNVPROC',
'PFNGLPROGRAMNAMEDPARAMETER4FVNVPROC', 'PFNGLPROGRAMNAMEDPARAMETER4DVNVPROC',
'PFNGLGETPROGRAMNAMEDPARAMETERFVNVPROC',
'PFNGLGETPROGRAMNAMEDPARAMETERDVNVPROC', 'GL_NV_half_float', 'glVertex2hNV',
'glVertex2hvNV', 'glVertex3hNV', 'glVertex3hvNV', 'glVertex4hNV',
'glVertex4hvNV', 'glNormal3hNV', 'glNormal3hvNV', 'glColor3hNV',
'glColor3hvNV', 'glColor4hNV', 'glColor4hvNV', 'glTexCoord1hNV',
'glTexCoord1hvNV', 'glTexCoord2hNV', 'glTexCoord2hvNV', 'glTexCoord3hNV',
'glTexCoord3hvNV', 'glTexCoord4hNV', 'glTexCoord4hvNV', 'glMultiTexCoord1hNV',
'glMultiTexCoord1hvNV', 'glMultiTexCoord2hNV', 'glMultiTexCoord2hvNV',
'glMultiTexCoord3hNV', 'glMultiTexCoord3hvNV', 'glMultiTexCoord4hNV',
'glMultiTexCoord4hvNV', 'glFogCoordhNV', 'glFogCoordhvNV',
'glSecondaryColor3hNV', 'glSecondaryColor3hvNV', 'glVertexWeighthNV',
'glVertexWeighthvNV', 'glVertexAttrib1hNV', 'glVertexAttrib1hvNV',
'glVertexAttrib2hNV', 'glVertexAttrib2hvNV', 'glVertexAttrib3hNV',
'glVertexAttrib3hvNV', 'glVertexAttrib4hNV', 'glVertexAttrib4hvNV',
'glVertexAttribs1hvNV', 'glVertexAttribs2hvNV', 'glVertexAttribs3hvNV',
'glVertexAttribs4hvNV', 'PFNGLVERTEX2HNVPROC', 'PFNGLVERTEX2HVNVPROC',
'PFNGLVERTEX3HNVPROC', 'PFNGLVERTEX3HVNVPROC', 'PFNGLVERTEX4HNVPROC',
'PFNGLVERTEX4HVNVPROC', 'PFNGLNORMAL3HNVPROC', 'PFNGLNORMAL3HVNVPROC',
'PFNGLCOLOR3HNVPROC', 'PFNGLCOLOR3HVNVPROC', 'PFNGLCOLOR4HNVPROC',
'PFNGLCOLOR4HVNVPROC', 'PFNGLTEXCOORD1HNVPROC', 'PFNGLTEXCOORD1HVNVPROC',
'PFNGLTEXCOORD2HNVPROC', 'PFNGLTEXCOORD2HVNVPROC', 'PFNGLTEXCOORD3HNVPROC',
'PFNGLTEXCOORD3HVNVPROC', 'PFNGLTEXCOORD4HNVPROC', 'PFNGLTEXCOORD4HVNVPROC',
'PFNGLMULTITEXCOORD1HNVPROC', 'PFNGLMULTITEXCOORD1HVNVPROC',
'PFNGLMULTITEXCOORD2HNVPROC', 'PFNGLMULTITEXCOORD2HVNVPROC',
'PFNGLMULTITEXCOORD3HNVPROC', 'PFNGLMULTITEXCOORD3HVNVPROC',
'PFNGLMULTITEXCOORD4HNVPROC', 'PFNGLMULTITEXCOORD4HVNVPROC',
'PFNGLFOGCOORDHNVPROC', 'PFNGLFOGCOORDHVNVPROC',
'PFNGLSECONDARYCOLOR3HNVPROC', 'PFNGLSECONDARYCOLOR3HVNVPROC',
'PFNGLVERTEXWEIGHTHNVPROC', 'PFNGLVERTEXWEIGHTHVNVPROC',
'PFNGLVERTEXATTRIB1HNVPROC', 'PFNGLVERTEXATTRIB1HVNVPROC',
'PFNGLVERTEXATTRIB2HNVPROC', 'PFNGLVERTEXATTRIB2HVNVPROC',
'PFNGLVERTEXATTRIB3HNVPROC', 'PFNGLVERTEXATTRIB3HVNVPROC',
'PFNGLVERTEXATTRIB4HNVPROC', 'PFNGLVERTEXATTRIB4HVNVPROC',
'PFNGLVERTEXATTRIBS1HVNVPROC', 'PFNGLVERTEXATTRIBS2HVNVPROC',
'PFNGLVERTEXATTRIBS3HVNVPROC', 'PFNGLVERTEXATTRIBS4HVNVPROC',
'GL_NV_pixel_data_range', 'glPixelDataRangeNV', 'glFlushPixelDataRangeNV',
'PFNGLPIXELDATARANGENVPROC', 'PFNGLFLUSHPIXELDATARANGENVPROC',
'GL_NV_primitive_restart', 'glPrimitiveRestartNV',
'glPrimitiveRestartIndexNV', 'PFNGLPRIMITIVERESTARTNVPROC',
'PFNGLPRIMITIVERESTARTINDEXNVPROC', 'GL_NV_texture_expand_normal',
'GL_NV_vertex_program2', 'GL_ATI_map_object_buffer', 'glMapObjectBufferATI',
'glUnmapObjectBufferATI', 'PFNGLMAPOBJECTBUFFERATIPROC',
'PFNGLUNMAPOBJECTBUFFERATIPROC', 'GL_ATI_separate_stencil',
'glStencilOpSeparateATI', 'glStencilFuncSeparateATI',
'PFNGLSTENCILOPSEPARATEATIPROC', 'PFNGLSTENCILFUNCSEPARATEATIPROC',
'GL_ATI_vertex_attrib_array_object', 'glVertexAttribArrayObjectATI',
'glGetVertexAttribArrayObjectfvATI', 'glGetVertexAttribArrayObjectivATI',
'PFNGLVERTEXATTRIBARRAYOBJECTATIPROC',
'PFNGLGETVERTEXATTRIBARRAYOBJECTFVATIPROC',
'PFNGLGETVERTEXATTRIBARRAYOBJECTIVATIPROC', 'GL_OES_read_format',
'GL_EXT_depth_bounds_test', 'glDepthBoundsEXT', 'PFNGLDEPTHBOUNDSEXTPROC',
'GL_EXT_texture_mirror_clamp', 'GL_EXT_blend_equation_separate',
'glBlendEquationSeparateEXT', 'PFNGLBLENDEQUATIONSEPARATEEXTPROC',
'GL_MESA_pack_invert', 'GL_MESA_ycbcr_texture', 'GL_EXT_pixel_buffer_object',
'GL_NV_fragment_program_option', 'GL_NV_fragment_program2',
'GL_NV_vertex_program2_option', 'GL_NV_vertex_program3',
'GL_EXT_framebuffer_object', 'glIsRenderbufferEXT', 'glBindRenderbufferEXT',
'glDeleteRenderbuffersEXT', 'glGenRenderbuffersEXT',
'glRenderbufferStorageEXT', 'glGetRenderbufferParameterivEXT',
'glIsFramebufferEXT', 'glBindFramebufferEXT', 'glDeleteFramebuffersEXT',
'glGenFramebuffersEXT', 'glCheckFramebufferStatusEXT',
'glFramebufferTexture1DEXT', 'glFramebufferTexture2DEXT',
'glFramebufferTexture3DEXT', 'glFramebufferRenderbufferEXT',
'glGetFramebufferAttachmentParameterivEXT', 'glGenerateMipmapEXT',
'PFNGLISRENDERBUFFEREXTPROC', 'PFNGLBINDRENDERBUFFEREXTPROC',
'PFNGLDELETERENDERBUFFERSEXTPROC', 'PFNGLGENRENDERBUFFERSEXTPROC',
'PFNGLRENDERBUFFERSTORAGEEXTPROC', 'PFNGLGETRENDERBUFFERPARAMETERIVEXTPROC',
'PFNGLISFRAMEBUFFEREXTPROC', 'PFNGLBINDFRAMEBUFFEREXTPROC',
'PFNGLDELETEFRAMEBUFFERSEXTPROC', 'PFNGLGENFRAMEBUFFERSEXTPROC',
'PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC', 'PFNGLFRAMEBUFFERTEXTURE1DEXTPROC',
'PFNGLFRAMEBUFFERTEXTURE2DEXTPROC', 'PFNGLFRAMEBUFFERTEXTURE3DEXTPROC',
'PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC',
'PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC',
'PFNGLGENERATEMIPMAPEXTPROC', 'GL_GREMEDY_string_marker',
'glStringMarkerGREMEDY', 'PFNGLSTRINGMARKERGREMEDYPROC']
# END GENERATED CONTENT (do not edit above this line)
| bsd-3-clause | c91b812bbc736a638257f66871af4522 | 55.586908 | 269 | 0.757107 | 2.581848 | false | false | false | false |
mattpap/sympy-polys | sympy/core/tests/test_functions.py | 1 | 10986 | from sympy import Lambda, Symbol, Function, WildFunction, Derivative, sqrt, \
log, exp, Rational, Real, sign, Basic, sin, cos, diff, I, re, im, \
oo, zoo, nan, E, expand, pi, raises, O, Sum
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y
from sympy.core.function import PoleError
def test_log():
assert log(2) > 0
assert log(1).is_zero
assert log(0.5).is_negative == True
def test_exp_log():
x = Symbol("x", real=True)
assert log(exp(x)) == x
assert exp(log(x)) == x
def test_log_expansion():
x = Symbol("x", positive=True)
y = Symbol("y", positive=True)
# ok in interactive, fails in py.test
#assert log(x*y) != log(x)+log(y)
#assert log(x**2) != 2*log(x)
assert log(x*y).expand() == log(x)+log(y)
assert log(x**2).expand() == 2*log(x)
assert (log(x**-5)**-1).expand() == -1/log(x)/5
def test_log_hashing_bug():
x = Symbol("y")
assert x != log(log(x))
assert hash(x) != hash(log(log(x)))
assert log(x) != log(log(log(x)))
e = 1/log(log(x)+log(log(x)))
assert e.base.func is log
e = 1/log(log(x)+log(log(log(x))))
assert e.base.func is log
x = Symbol("x")
e = log(log(x))
assert e.func is log
assert not x.func is log
assert hash(log(log(x))) != hash(x)
assert e != x
def test_sign():
assert sign(log(2)) == 1
def test_exp_bug():
x = Symbol("x")
assert exp(1*log(x)) == x
def test_exp_expand():
x = Symbol("x")
y = Symbol("y")
e = exp(log(Rational(2))*(1+x)-log(Rational(2))*x)
assert e.expand() == 2
assert exp(x+y) != exp(x)*exp(y)
assert exp(x+y).expand() == exp(x)*exp(y)
def test_f_expand_complex():
f = Function('f')
x = Symbol('x', real=True)
z = Symbol('z')
assert f(x).expand(complex=True) == I*im(f(x)) + re(f(x))
assert exp(x).expand(complex=True) == exp(x)
assert exp(I*x).expand(complex=True) == cos(x) + I*sin(x)
assert exp(z).expand(complex=True) == cos(im(z))*exp(re(z)) + \
I*sin(im(z))*exp(re(z))
def test_bug1():
x = Symbol("x")
w = Symbol("w")
e = sqrt(-log(w))
assert e.subs(log(w),-x) == sqrt(x)
e = sqrt(-5*log(w))
assert e.subs(log(w),-x) == sqrt(5*x)
def test_general_function():
nu = Function('nu', nargs=1)
x = Symbol("x")
y = Symbol("y")
e = nu(x)
edx = e.diff(x)
edy = e.diff(y)
edxdx = e.diff(x).diff(x)
edxdy = e.diff(x).diff(y)
assert e == nu(x)
assert edx != nu(x)
assert edx == diff(nu(x), x)
assert edy == 0
assert edxdx == diff(diff(nu(x), x), x)
assert edxdy == 0
def test_function_nargs():
f = Function('f')
x = Symbol('x')
assert f.nargs == None
assert f(x).nargs == 1
assert f(x, x, x, x).nargs == 4
def test_derivative_subs_bug():
x = Symbol("x y")
l = Function('l', nargs=1)
n = Function('n', nargs=1)
e = diff(n(x), x)
assert e.subs(n(x), l(x)) != e
assert e.subs(n(x), l(x)) == diff(l(x), x)
assert e.subs(n(x), -l(x)) == diff(-l(x), x)
assert e.subs(x, y) == diff(n(y), y)
def test_derivative_subs_self_bug():
f = Function('f')
d = diff(f(x), x)
assert d.subs(d, y) == y
def test_derivative_linearity():
x = Symbol("x")
y = Symbol("y")
n = Function('n', nargs=1)
assert diff(-n(x), x) == -diff(n(x), x)
assert diff(8*n(x), x) == 8*diff(n(x), x)
assert diff(8*n(x), x) != 7*diff(n(x), x)
assert diff(8*n(x)*x, x) == 8*n(x) + 8*x*diff(n(x), x)
assert diff(8*n(x)*y*x, x) == 8*y*n(x) + 8*y*x*diff(n(x), x)
def test_derivative_evaluate():
x = Symbol('x')
assert Derivative(sin(x), x) != diff(sin(x), x)
assert Derivative(sin(x), x).doit() == diff(sin(x), x)
f = Function('f')
assert Derivative(Derivative(f(x), x), x) == diff(f(x), x, x)
assert Derivative(sin(x), x, 0) == sin(x)
def test_diff_symbols():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f = Function('f')
g = Function('g')
assert diff(f(x, y, z), x, y, z) == Derivative(f(x, y, z), x, y, z)
assert diff(f(x, y, z), x, x, x) == Derivative(f(x, y, z), x, x, x)
assert diff(f(x, y, z), x, 3) == Derivative(f(x, y, z), x, 3)
assert diff([f(x, y, z), g(x, y, z)], [x, y, z, (x, x), (y, 2), (z, 3),
(x, y, z, 2), (x, x, x)]) == \
[[Derivative(f(x, y, z), x), Derivative(f(x, y, z), y),
Derivative(f(x, y, z), z), Derivative(f(x, y, z), x, x),
Derivative(f(x, y, z), y, y), Derivative(f(x, y, z), z, z, z),
Derivative(f(x, y, z), x, y, z, z), Derivative(f(x, y, z), x, x, x)],
[Derivative(g(x, y, z), x), Derivative(g(x, y, z), y),
Derivative(g(x, y, z), z), Derivative(g(x, y, z), x, x),
Derivative(g(x, y, z), y, y), Derivative(g(x, y, z), z, z, z),
Derivative(g(x, y, z), x, y, z, z), Derivative(g(x, y, z), x, x, x)]]
# issue 1929
assert diff(-z + x/y, (z, x, y)) == [-1, 1/y, -x/y**2]
assert diff(f(x, y, z), x, y, z, 2) == Derivative(f(x, y, z), x, y, z, z)
assert diff(f(x, y, z), x, y, z, 2, evaluate=False) == \
Derivative(f(x, y, z), x, y, z, z)
assert Derivative(f(x, y, z), x, y, z)._eval_derivative(z) == \
Derivative(f(x, y, z), x, y, z, z)
assert Derivative(Derivative(f(x, y, z), x), y)._eval_derivative(z) == \
Derivative(f(x, y, z), x, y, z)
@XFAIL
def test_combine():
# XXX combine no longer exists
x = Symbol("x")
y = Symbol("y")
assert exp(x)*exp(-x) != 1
assert (exp(x)*exp(-x)).combine() == 1
assert exp(x)**2 != exp(2*x)
assert (exp(x)**2).combine() == exp(2*x)
assert exp(x)*exp(-x/2)*exp(-x/2) != 1
assert (exp(x)*exp(-x/2)*exp(-x/2)).combine() == 1
assert (2*log(x)).combine() == log(x**2)
assert exp(2*log(x)) != x**2
assert exp(2*log(x)).combine() == x**2
assert exp(x)*exp(-x)-1 !=0
assert (exp(x)*exp(-x)-1).combine() == 0
assert (2*exp(x)*exp(-x)).combine() == 2
assert (x/exp(x)*exp(-x)).combine() == x*exp(-2*x)
def test_Lambda():
e = Lambda(x, x**2)
f = Function('f')
assert e(4) == 16
assert e(x) == x**2
assert e(y) == y**2
assert Lambda(x, x**2) == Lambda(x, x**2)
assert Lambda(x, x**2) == Lambda(y, y**2)
assert Lambda(x, x**2) != Lambda(y, y**2+1)
assert Lambda(x,y,x**y) == Lambda(y,x,y**x)
assert Lambda(x,y,x**y) != Lambda(x,y,y**x)
assert Lambda(x,y,x**y)(x,y) == x**y
assert Lambda(x,y,x**y)(x) == Lambda(y,x**y)
assert Lambda(x,y,x**y)(x)(y) == x**y
assert Lambda(x,y,x**y)(x)(3) == x**3
assert Lambda(x,y,x**y)(3)(y) == 3**y
assert Lambda(x,y,x**y)(3)(3) == 3**3
assert Lambda(x,y,x**y)(3,3) == 3**3
assert Lambda(x,y,x**y)(x,3) == x**3
assert Lambda(x,y,x**y)(3,y) == 3**y
assert Lambda(x,f(x))(x) == f(x)
assert Lambda(x,f(x))() == Lambda(x,f(x))
assert Lambda(x,x**2)(e(x)) == x**4
assert e(e(x)) == x**4
assert Lambda(x,y,f(x)+f(y))(x) == Lambda(y,f(x)+f(y))
#doesn't work yet:
#class F(Function):
# pass
#assert Lambda(x, F(x)) == F
assert Lambda(x, y, x+y).nargs == 2
z = Symbol('z')
t = Symbol('t')
p = x, y, z, t
assert Lambda(p, t*(x+y+z))(*p) == t * (x + y + z)
def test_expand_function():
assert expand(x+y) == x + y
assert expand(x+y, complex=True) == I*im(x) + I*im(y) + re(x) + re(y)
def test_function_comparable():
x = Symbol('x')
assert sin(x).is_comparable == False
assert cos(x).is_comparable == False
assert sin(Real('0.1')).is_comparable == True
assert cos(Real('0.1')).is_comparable == True
assert sin(E).is_comparable == True
assert cos(E).is_comparable == True
assert sin(Rational(1,3)).is_comparable == True
assert cos(Rational(1,3)).is_comparable == True
@XFAIL
def test_function_comparable_fail():
x = Symbol('x')
assert sin(oo).is_comparable == False
assert sin(-oo).is_comparable == False
assert sin(zoo).is_comparable == False
assert sin(nan).is_comparable == False
def test_deriv1():
f=Function('f')
g=Function('g')
x = Symbol('x')
assert f(g(x)).diff(x) == Derivative(f(g(x)), g(x)) * Derivative(g(x), x)
def test_deriv2():
f=Function('f')
g=Function('g')
x = Symbol('x')
assert f(x).diff(x) == Derivative(f(x), x)
assert f(2*x).diff(x) == 2*Derivative(f(2*x), 2*x)
assert (f(x)**3).diff(x) == 3*f(x)**2*f(x).diff(x)
assert (f(2*x)**3).diff(x) == 6*f(2*x)**2*Derivative(f(2*x), 2*x)
assert f(2+x).diff(x) == Derivative(f(2+x), 2+x)
assert f(2+3*x).diff(x) == 3*Derivative(f(2+3*x), 2+3*x)
assert f(sin(x)).diff(x) == Derivative(f(sin(x)), sin(x)) * cos(x)
assert f(3*sin(x)).diff(x) == 3*Derivative(f(3*sin(x)), 3*sin(x)) * cos(x)
def test_deriv3():
f=Function('f')
g=Function('g')
x = Symbol('x')
assert (x**3).diff(x) == 3*x**2
assert (x**3).diff(x, evaluate=False) != 3*x**2
assert (x**3).diff(x, evaluate=False) == Derivative(x**3, x)
assert diff(x**3, x) == 3*x**2
assert diff(x**3, x, evaluate=False) != 3*x**2
assert diff(x**3, x, evaluate=False) == Derivative(x**3, x)
def test_suppressed_evaluation():
a = sin(0, evaluate=False)
assert a != 0
assert a.func is sin
assert a.args == (0,)
def test_function_evalf():
def eq(a,b,eps):
return abs(a-b) < eps
assert eq(sin(1).evalf(15), Real("0.841470984807897"), 1e-13)
assert eq(sin(2).evalf(25), Real("0.9092974268256816953960199",25), 1e-23)
assert eq(sin(1+I).evalf(15), Real("1.29845758141598") + Real("0.634963914784736")*I, 1e-13)
assert eq(exp(1+I).evalf(15), Real("1.46869393991588") + Real("2.28735528717884239")*I, 1e-13)
assert eq(exp(-0.5+1.5*I).evalf(15), Real("0.0429042815937374") + Real("0.605011292285002")*I, 1e-13)
assert eq(log(pi+sqrt(2)*I).evalf(15), Real("1.23699044022052") + Real("0.422985442737893")*I, 1e-13)
assert eq(cos(100).evalf(15), Real("0.86231887228768"), 1e-13)
def test_extensibility_eval():
class MyFunc(Function):
@classmethod
def eval(cls, *args):
return (0,0,0)
assert MyFunc(0) == (0,0,0)
def test_function_non_commutative():
x = Symbol('x', commutative=False)
f = Function('f')
assert f(x).is_commutative == False
assert sin(x).is_commutative == False
assert exp(x).is_commutative == False
assert log(x).is_commutative == False
def test_function__eval_nseries():
x = Symbol('x')
assert sin(x)._eval_nseries(x,0,2) == x + O(x**2)
assert sin(x+1)._eval_nseries(x,0,2) == x*cos(1) + sin(1) + O(x**2)
assert sin(pi*(1-x))._eval_nseries(x,0,2) == pi*x + O(x**2)
raises(PoleError, 'sin(1/x)._eval_nseries(x,0,2)')
def test_doit():
n = Symbol('n', integer = True)
f = Sum(2 * n * x, (n, 1, 3))
d = Derivative(f, x)
assert d.doit() == 12
assert d.doit(deep = False) == d
| bsd-3-clause | f2cb20485d53a370d82c74b08b020bdb | 30.210227 | 105 | 0.533588 | 2.476555 | false | true | false | false |
wtforms/wtforms | tests/test_widgets.py | 1 | 8398 | import pytest
from markupsafe import Markup
from wtforms.widgets.core import CheckboxInput
from wtforms.widgets.core import FileInput
from wtforms.widgets.core import HiddenInput
from wtforms.widgets.core import html_params
from wtforms.widgets.core import Input
from wtforms.widgets.core import ListWidget
from wtforms.widgets.core import NumberInput
from wtforms.widgets.core import PasswordInput
from wtforms.widgets.core import RadioInput
from wtforms.widgets.core import RangeInput
from wtforms.widgets.core import Select
from wtforms.widgets.core import TableWidget
from wtforms.widgets.core import TextArea
from wtforms.widgets.core import TextInput
class TestHTMLParams:
def test_basic(self):
assert html_params(foo=9, k="wuuu") == 'foo="9" k="wuuu"'
assert html_params(class_="foo") == 'class="foo"'
assert html_params(class__="foo") == 'class="foo"'
assert html_params(for_="foo") == 'for="foo"'
assert html_params(readonly=False, foo=9) == 'foo="9"'
assert (
html_params(accept="image/png, image/jpeg", required=True)
== 'accept="image/png, image/jpeg" required'
)
def test_data_prefix(self):
assert html_params(data_foo=22) == 'data-foo="22"'
assert html_params(data_foo_bar=1) == 'data-foo-bar="1"'
def test_aria_prefix(self):
assert html_params(aria_foo="bar") == 'aria-foo="bar"'
assert html_params(aria_foo_bar="foobar") == 'aria-foo-bar="foobar"'
def test_quoting(self):
assert html_params(foo='hi&bye"quot') == 'foo="hi&bye"quot"'
class TestListWidget:
def test_listwidget(self, dummy_field_class):
# ListWidget just expects an iterable of field-like objects as its
# 'field' so that is what we will give it
field = dummy_field_class(
[dummy_field_class(x, label="l" + x) for x in ["foo", "bar"]], id="hai"
)
assert (
ListWidget()(field)
== '<ul id="hai"><li>lfoo foo</li><li>lbar bar</li></ul>'
)
w = ListWidget(html_tag="ol", prefix_label=False)
assert w(field) == '<ol id="hai"><li>foo lfoo</li><li>bar lbar</li></ol>'
class TestTableWidget:
def test_tablewidget(self, dummy_field_class):
inner_fields = [
dummy_field_class(data="hidden1", field_type="HiddenField"),
dummy_field_class(data="foo", label="lfoo"),
dummy_field_class(data="bar", label="lbar"),
dummy_field_class(data="hidden2", field_type="HiddenField"),
]
field = dummy_field_class(inner_fields, id="hai")
assert (
TableWidget()(field)
== '<table id="hai"><tr><th>lfoo</th><td>hidden1foo</td></tr>'
"<tr><th>lbar</th><td>bar</td></tr></table>hidden2"
)
class TestBasicWidgets:
"""Test most of the basic input widget types"""
def test_input_type(self):
with pytest.raises(AttributeError):
Input().input_type
test_input = Input(input_type="test")
assert test_input.input_type == "test"
def test_html_marking(self, basic_widget_dummy_field):
html = TextInput()(basic_widget_dummy_field)
assert hasattr(html, "__html__")
assert html.__html__() is html
def test_text_input(self, basic_widget_dummy_field):
assert (
TextInput()(basic_widget_dummy_field)
== '<input id="id" name="bar" type="text" value="foo">'
)
def test_password_input(self, basic_widget_dummy_field):
assert 'type="password"' in PasswordInput()(basic_widget_dummy_field)
assert 'value=""' in PasswordInput()(basic_widget_dummy_field)
assert 'value="foo"' in PasswordInput(hide_value=False)(
basic_widget_dummy_field
)
def test_hidden_input(self, basic_widget_dummy_field):
assert 'type="hidden"' in HiddenInput()(basic_widget_dummy_field)
assert "hidden" in HiddenInput().field_flags
def test_checkbox_input(self, basic_widget_dummy_field):
assert (
CheckboxInput()(basic_widget_dummy_field, value="v")
== '<input checked id="id" name="bar" type="checkbox" value="v">'
)
# set falsy value to dummy field
basic_widget_dummy_field.data = ""
assert "checked" not in CheckboxInput()(basic_widget_dummy_field)
basic_widget_dummy_field.data = False
assert "checked" not in CheckboxInput()(basic_widget_dummy_field)
def test_radio_input(self, basic_widget_dummy_field):
basic_widget_dummy_field.checked = True
expected = '<input checked id="id" name="bar" type="radio" value="foo">'
assert RadioInput()(basic_widget_dummy_field) == expected
basic_widget_dummy_field.checked = False
assert RadioInput()(basic_widget_dummy_field) == expected.replace(
" checked", ""
)
def test_textarea(self, basic_widget_dummy_field):
# Make sure textareas escape properly and render properly
basic_widget_dummy_field.data = "hi<>bye"
basic_widget_dummy_field.name = "f"
basic_widget_dummy_field.id = ""
assert (
TextArea()(basic_widget_dummy_field)
== '<textarea id="" name="f">\r\nhi<>bye</textarea>'
)
def test_file(self, basic_widget_dummy_field):
assert (
FileInput()(basic_widget_dummy_field)
== '<input id="id" name="bar" type="file">'
)
assert (
FileInput(multiple=True)(basic_widget_dummy_field)
== '<input id="id" multiple name="bar" type="file">'
)
class TestSelect:
def test_select(self, select_dummy_field):
select_dummy_field.name = "f"
assert (
Select()(select_dummy_field)
== '<select id="" name="f"><option selected value="foo">lfoo</option>'
'<option value="bar">lbar</option></select>'
)
assert (
Select(multiple=True)(select_dummy_field)
== '<select id="" multiple name="f"><option selected value="foo">'
'lfoo</option><option value="bar">lbar</option></select>'
)
def test_render_option(self):
# value, label, selected
assert (
Select.render_option("bar", "foo", False)
== '<option value="bar">foo</option>'
)
assert (
Select.render_option(True, "foo", True)
== '<option selected value="True">foo</option>'
)
assert (
Select.render_option("bar", '<i class="bar"></i>foo', False)
== '<option value="bar"><i class="bar"></i>foo</option>'
)
assert (
Select.render_option("bar", Markup('<i class="bar"></i>foo'), False)
== '<option value="bar"><i class="bar"></i>foo</option>'
)
class TestHTML5:
def test_number(self, html5_dummy_field):
i1 = NumberInput(step="any")
assert (
i1(html5_dummy_field)
== '<input id="id" name="bar" step="any" type="number" value="42">'
)
i2 = NumberInput(step=2)
assert (
i2(html5_dummy_field, step=3)
== '<input id="id" name="bar" step="3" type="number" value="42">'
)
i3 = NumberInput(min=10)
assert (
i3(html5_dummy_field)
== '<input id="id" min="10" name="bar" type="number" value="42">'
)
assert (
i3(html5_dummy_field, min=5)
== '<input id="id" min="5" name="bar" type="number" value="42">'
)
i4 = NumberInput(max=100)
assert (
i4(html5_dummy_field)
== '<input id="id" max="100" name="bar" type="number" value="42">'
)
assert (
i4(html5_dummy_field, max=50)
== '<input id="id" max="50" name="bar" type="number" value="42">'
)
def test_range(self, html5_dummy_field):
i1 = RangeInput(step="any")
assert (
i1(html5_dummy_field)
== '<input id="id" name="bar" step="any" type="range" value="42">'
)
i2 = RangeInput(step=2)
assert (
i2(html5_dummy_field, step=3)
== '<input id="id" name="bar" step="3" type="range" value="42">'
)
| bsd-3-clause | 74e324cecbf4d86b3f5eb6424578e8ae | 34.73617 | 88 | 0.576447 | 3.632353 | false | true | false | false |
fredrik-johansson/mpmath | mpmath/libmp/libmpf.py | 1 | 45021 | """
Low-level functions for arbitrary-precision floating-point arithmetic.
"""
__docformat__ = 'plaintext'
import math
from bisect import bisect
import sys
# Importing random is slow
#from random import getrandbits
getrandbits = None
from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils)
from .libintmath import (giant_steps,
trailtable, bctable, lshift, rshift, bitcount, trailing,
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
bin_to_radix)
# We don't pickle tuples directly for the following reasons:
# 1: pickle uses str() for ints, which is inefficient when they are large
# 2: pickle doesn't work for gmpy mpzs
# Both problems are solved by using hex()
if BACKEND == 'sage':
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man), exp, bc
else:
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man)[2:], exp, bc
def from_pickable(x):
sign, man, exp, bc = x
return (sign, MPZ(man, 16), exp, bc)
class ComplexResult(ValueError):
pass
try:
intern
except NameError:
intern = lambda x: x
# All supported rounding modes
round_nearest = intern('n')
round_floor = intern('f')
round_ceiling = intern('c')
round_up = intern('u')
round_down = intern('d')
round_fast = round_down
def prec_to_dps(n):
"""Return number of accurate decimals that can be represented
with a precision of n bits."""
return max(1, int(round(int(n)/3.3219280948873626)-1))
def dps_to_prec(n):
"""Return the number of bits required to represent n decimals
accurately."""
return max(1, int(round((int(n)+1)*3.3219280948873626)))
def repr_dps(n):
"""Return the number of decimal digits required to represent
a number with n-bit precision so that it can be uniquely
reconstructed from the representation."""
dps = prec_to_dps(n)
if dps == 15:
return 17
return dps + 3
#----------------------------------------------------------------------------#
# Some commonly needed float values #
#----------------------------------------------------------------------------#
# Regular number format:
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
fzero = (0, MPZ_ZERO, 0, 0)
fnzero = (1, MPZ_ZERO, 0, 0)
fone = (0, MPZ_ONE, 0, 1)
fnone = (1, MPZ_ONE, 0, 1)
ftwo = (0, MPZ_ONE, 1, 1)
ften = (0, MPZ_FIVE, 1, 3)
fhalf = (0, MPZ_ONE, -1, 1)
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
fnan = (0, MPZ_ZERO, -123, -1)
finf = (0, MPZ_ZERO, -456, -2)
fninf = (1, MPZ_ZERO, -789, -3)
# Was 1e1000; this is broken in Python 2.4
math_float_inf = 1e300 * 1e300
#----------------------------------------------------------------------------#
# Rounding #
#----------------------------------------------------------------------------#
# This function can be used to round a mantissa generally. However,
# we will try to do most rounding inline for efficiency.
def round_int(x, n, rnd):
if rnd == round_nearest:
if x >= 0:
t = x >> (n-1)
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
return (t>>1)+1
else:
return t>>1
else:
return -round_int(-x, n, rnd)
if rnd == round_floor:
return x >> n
if rnd == round_ceiling:
return -((-x) >> n)
if rnd == round_down:
if x >= 0:
return x >> n
return -((-x) >> n)
if rnd == round_up:
if x >= 0:
return -((-x) >> n)
return x >> n
# These masks are used to pick out segments of numbers to determine
# which direction to round when rounding to nearest.
class h_mask_big:
def __getitem__(self, n):
return (MPZ_ONE<<(n-1))-1
h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
h_mask = [h_mask_big(), h_mask_small]
# The >> operator rounds to floor. shifts_down[rnd][sign]
# tells whether this is the right direction to use, or if the
# number should be negated before shifting
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
round_down:(1,1), round_up:(0,0)}
#----------------------------------------------------------------------------#
# Normalization of raw mpfs #
#----------------------------------------------------------------------------#
# This function is called almost every time an mpf is created.
# It has been optimized accordingly.
def _normalize(sign, man, exp, bc, prec, rnd):
"""
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
normalized mantissa. The mantissa is rounded in the specified
direction if its size exceeds the precision. Trailing zero bits
are also stripped from the mantissa to ensure that the
representation is canonical.
Conditions on the input:
* The input must represent a regular (finite) number
* The sign bit must be 0 or 1
* The mantissa must be positive
* The exponent must be an integer
* The bitcount must be exact
If these conditions are not met, use from_man_exp, mpf_pos, or any
of the conversion functions to create normalized raw mpf tuples.
"""
if not man:
return fzero
# Cut mantissa down to size if larger than target precision
n = bc - prec
if n > 0:
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def _normalize1(sign, man, exp, bc, prec, rnd):
"""same as normalize, but with the added condition that
man is odd or zero
"""
if not man:
return fzero
if bc <= prec:
return sign, man, exp, bc
n = bc - prec
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
try:
_exp_types = (int, long)
except NameError:
_exp_types = (int,)
def strict_normalize(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
return _normalize(sign, man, exp, bc, prec, rnd)
def strict_normalize1(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in _exp_types
assert type(exp) in _exp_types
assert bc == bitcount(man)
assert (not man) or (man & 1)
return _normalize1(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
_normalize = gmpy._mpmath_normalize
_normalize1 = gmpy._mpmath_normalize
if BACKEND == 'sage':
_normalize = _normalize1 = sage_utils.normalize
if STRICT:
normalize = strict_normalize
normalize1 = strict_normalize1
else:
normalize = _normalize
normalize1 = _normalize1
#----------------------------------------------------------------------------#
# Conversion functions #
#----------------------------------------------------------------------------#
def from_man_exp(man, exp, prec=None, rnd=round_fast):
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
If no precision is specified, the mantissa is stored exactly."""
man = MPZ(man)
sign = 0
if man < 0:
sign = 1
man = -man
if man < 1024:
bc = bctable[int(man)]
else:
bc = bitcount(man)
if not prec:
if not man:
return fzero
if not man & 1:
if man & 2:
return (sign, man >> 1, exp + 1, bc - 1)
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
return (sign, man, exp, bc)
return normalize(sign, man, exp, bc, prec, rnd)
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
from_man_exp = gmpy._mpmath_create
if BACKEND == 'sage':
from_man_exp = sage_utils.from_man_exp
def from_int(n, prec=0, rnd=round_fast):
"""Create a raw mpf from an integer. If no precision is specified,
the mantissa is stored exactly."""
if not prec:
if n in int_cache:
return int_cache[n]
return from_man_exp(n, 0, prec, rnd)
def to_man_exp(s):
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("mantissa and exponent are undefined for %s" % man)
return man, exp
def to_int(s, rnd=None):
"""Convert a raw mpf to the nearest int. Rounding is done down by
default (same as int(float) in Python), but can be changed. If the
input is inf/nan, an exception is raised."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("cannot convert inf or nan to int")
if exp >= 0:
if sign:
return (-man) << exp
return man << exp
# Make default rounding fast
if not rnd:
if sign:
return -(man >> (-exp))
else:
return man >> (-exp)
if sign:
return round_int(-man, -exp, rnd)
else:
return round_int(man, -exp, rnd)
def mpf_round_int(s, rnd):
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp >= 0:
return s
mag = exp+bc
if mag < 1:
if rnd == round_ceiling:
if sign: return fzero
else: return fone
elif rnd == round_floor:
if sign: return fnone
else: return fzero
elif rnd == round_nearest:
if mag < 0 or man == MPZ_ONE: return fzero
elif sign: return fnone
else: return fone
else:
raise NotImplementedError
return mpf_pos(s, min(bc, mag), rnd)
def mpf_floor(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_floor)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_ceil(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_ceiling)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_nint(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_nearest)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_frac(s, prec=0, rnd=round_fast):
return mpf_sub(s, mpf_floor(s), prec, rnd)
def from_float(x, prec=53, rnd=round_fast):
"""Create a raw mpf from a Python float, rounding if necessary.
If prec >= 53, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=53."""
# frexp only raises an exception for nan on some platforms
if x != x:
return fnan
# in Python2.5 math.frexp gives an exception for float infinity
# in Python2.6 it returns (float infinity, 0)
try:
m, e = math.frexp(x)
except:
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return fnan
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
def from_npfloat(x, prec=113, rnd=round_fast):
"""Create a raw mpf from a numpy float, rounding if necessary.
If prec >= 113, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=113."""
y = float(x)
if x == y: # ldexp overflows for float16
return from_float(y, prec, rnd)
import numpy as np
if np.isfinite(x):
m, e = np.frexp(x)
return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd)
if np.isposinf(x): return finf
if np.isneginf(x): return fninf
return fnan
def from_Decimal(x, prec=None, rnd=round_fast):
"""Create a raw mpf from a Decimal, rounding if necessary.
If prec is not specified, use the equivalent bit precision
of the number of significant digits in x."""
if x.is_nan(): return fnan
if x.is_infinite(): return fninf if x.is_signed() else finf
if prec is None:
prec = int(len(x.as_tuple()[1])*3.3219280948873626)
return from_str(str(x), prec, rnd)
def to_float(s, strict=False, rnd=round_fast):
"""
Convert a raw mpf to a Python float. The result is exact if the
bitcount of s is <= 53 and no underflow/overflow occurs.
If the number is too large or too small to represent as a regular
float, it will be converted to inf or 0.0. Setting strict=True
forces an OverflowError to be raised instead.
Warning: with a directed rounding mode, the correct nearest representable
floating-point number in the specified direction might not be computed
in case of overflow or (gradual) underflow.
"""
sign, man, exp, bc = s
if not man:
if s == fzero: return 0.0
if s == finf: return math_float_inf
if s == fninf: return -math_float_inf
return math_float_inf/math_float_inf
if bc > 53:
sign, man, exp, bc = normalize1(sign, man, exp, bc, 53, rnd)
if sign:
man = -man
try:
return math.ldexp(man, exp)
except OverflowError:
if strict:
raise
# Overflow to infinity
if exp + bc > 0:
if sign:
return -math_float_inf
else:
return math_float_inf
# Underflow to zero
return 0.0
def from_rational(p, q, prec, rnd=round_fast):
"""Create a raw mpf from a rational number p/q, round if
necessary."""
return mpf_div(from_int(p), from_int(q), prec, rnd)
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
def to_fixed(s, prec):
"""Convert a raw mpf to a fixed-point big integer"""
sign, man, exp, bc = s
offset = exp + prec
if sign:
if offset >= 0: return (-man) << offset
else: return (-man) >> (-offset)
else:
if offset >= 0: return man << offset
else: return man >> (-offset)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Arithmetic operations, etc. #
#----------------------------------------------------------------------------#
def mpf_rand(prec):
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
in the mantissa."""
global getrandbits
if not getrandbits:
import random
getrandbits = random.getrandbits
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
def mpf_eq(s, t):
"""Test equality of two raw mpfs. This is simply tuple comparison
unless either number is nan, in which case the result is False."""
if not s[1] or not t[1]:
if s == fnan or t == fnan:
return False
return s == t
def mpf_hash(s):
# Duplicate the new hash algorithm introduces in Python 3.2.
if sys.version_info >= (3, 2):
ssign, sman, sexp, sbc = s
# Handle special numbers
if not sman:
if s == fnan: return sys.hash_info.nan
if s == finf: return sys.hash_info.inf
if s == fninf: return -sys.hash_info.inf
h = sman % HASH_MODULUS
if sexp >= 0:
sexp = sexp % HASH_BITS
else:
sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS)
h = (h << sexp) % HASH_MODULUS
if ssign: h = -h
if h == -1: h = -2
return int(h)
else:
try:
# Try to be compatible with hash values for floats and ints
return hash(to_float(s, strict=1))
except OverflowError:
# We must unfortunately sacrifice compatibility with ints here.
# We could do hash(man << exp) when the exponent is positive, but
# this would cause unreasonable inefficiency for large numbers.
return hash(s)
def mpf_cmp(s, t):
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
and 1 if s > t. (Same convention as Python's cmp() function.)"""
# In principle, a comparison amounts to determining the sign of s-t.
# A full subtraction is relatively slow, however, so we first try to
# look at the components.
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
# Handle zeros and special numbers
if not sman or not tman:
if s == fzero: return -mpf_sign(t)
if t == fzero: return mpf_sign(s)
if s == t: return 0
# Follow same convention as Python's cmp for float nan
if t == fnan: return 1
if s == finf: return 1
if t == fninf: return 1
return -1
# Different sides of zero
if ssign != tsign:
if not ssign: return 1
return -1
# This reduces to direct integer comparison
if sexp == texp:
if sman == tman:
return 0
if sman > tman:
if ssign: return -1
else: return 1
else:
if ssign: return 1
else: return -1
# Check position of the highest set bit in each number. If
# different, there is certainly an inequality.
a = sbc + sexp
b = tbc + texp
if ssign:
if a < b: return 1
if a > b: return -1
else:
if a < b: return -1
if a > b: return 1
# Both numbers have the same highest bit. Subtract to find
# how the lower bits compare.
delta = mpf_sub(s, t, 5, round_floor)
if delta[0]:
return -1
return 1
def mpf_lt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) < 0
def mpf_le(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) <= 0
def mpf_gt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) > 0
def mpf_ge(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) >= 0
def mpf_min_max(seq):
min = max = seq[0]
for x in seq[1:]:
if mpf_lt(x, min): min = x
if mpf_gt(x, max): max = x
return min, max
def mpf_pos(s, prec=0, rnd=round_fast):
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
precision)."""
if prec:
sign, man, exp, bc = s
if (not man) and exp:
return s
return normalize1(sign, man, exp, bc, prec, rnd)
return s
def mpf_neg(s, prec=None, rnd=round_fast):
"""Negate a raw mpf (return -s), rounding the result to the
specified precision. The prec argument can be omitted to do the
operation exactly."""
sign, man, exp, bc = s
if not man:
if exp:
if s == finf: return fninf
if s == fninf: return finf
return s
if not prec:
return (1-sign, man, exp, bc)
return normalize1(1-sign, man, exp, bc, prec, rnd)
def mpf_abs(s, prec=None, rnd=round_fast):
"""Return abs(s) of the raw mpf s, rounded to the specified
precision. The prec argument can be omitted to generate an
exact result."""
sign, man, exp, bc = s
if (not man) and exp:
if s == fninf:
return finf
return s
if not prec:
if sign:
return (0, man, exp, bc)
return s
return normalize1(0, man, exp, bc, prec, rnd)
def mpf_sign(s):
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
sign, man, exp, bc = s
if not man:
if s == finf: return 1
if s == fninf: return -1
return 0
return (-1) ** sign
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
"""
Add the two raw mpf values s and t.
With prec=0, no rounding is performed. Note that this can
produce a very large mantissa (potentially too large to fit
in memory) if exponents are far apart.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
tsign ^= _sub
# Standard case: two nonzero, regular numbers
if sman and tman:
offset = sexp - texp
if offset:
if offset > 0:
# Outside precision range; only need to perturb
if offset > 100 and prec:
delta = sbc + sexp - tbc - texp
if delta > prec + 4:
offset = prec + 4
sman <<= offset
if tsign == ssign: sman += 1
else: sman -= 1
return normalize1(ssign, sman, sexp-offset,
bitcount(sman), prec, rnd)
# Add
if ssign == tsign:
man = tman + (sman << offset)
# Subtract
else:
if ssign: man = tman - (sman << offset)
else: man = (sman << offset) - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
elif offset < 0:
# Outside precision range; only need to perturb
if offset < -100 and prec:
delta = tbc + texp - sbc - sexp
if delta > prec + 4:
offset = prec + 4
tman <<= offset
if ssign == tsign: tman += 1
else: tman -= 1
return normalize1(tsign, tman, texp-offset,
bitcount(tman), prec, rnd)
# Add
if ssign == tsign:
man = sman + (tman << -offset)
# Subtract
else:
if tsign: man = sman - (tman << -offset)
else: man = (tman << -offset) - sman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
# Equal exponents; no shifting necessary
if ssign == tsign:
man = tman + sman
else:
if ssign: man = tman - sman
else: man = sman - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize(ssign, man, texp, bc, prec or bc, rnd)
# Handle zeros and special numbers
if _sub:
t = mpf_neg(t)
if not sman:
if sexp:
if s == t or tman or not texp:
return s
return fnan
if tman:
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
return t
if texp:
return t
if sman:
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
return s
def mpf_sub(s, t, prec=0, rnd=round_fast):
"""Return the difference of two raw mpfs, s-t. This function is
simply a wrapper of mpf_add that changes the sign of t."""
return mpf_add(s, t, prec, rnd, 1)
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
"""
Sum a list of mpf values efficiently and accurately
(typically no temporary roundoff occurs). If prec=0,
the final result will not be rounded either.
There may be roundoff error or cancellation if extremely
large exponent differences occur.
With absolute=True, sums the absolute values.
"""
man = 0
exp = 0
max_extra_prec = prec*2 or 1000000 # XXX
special = None
for x in xs:
xsign, xman, xexp, xbc = x
if xman:
if xsign and not absolute:
xman = -xman
delta = xexp - exp
if xexp >= exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
man = xman
exp = xexp
else:
man += (xman << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-xbc > max_extra_prec:
if not man:
man, exp = xman, xexp
else:
man = (man << delta) + xman
exp = xexp
elif xexp:
if absolute:
x = mpf_abs(x)
special = mpf_add(special or fzero, x, 1)
# Will be inf or nan
if special:
return special
return from_man_exp(man, exp, prec, rnd)
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = bitcount(man)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
return normalize(sign, man, exp, bitcount(man), prec, rnd)
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = sbc + tbc - 1
bc += int(man>>bc)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
# Generally n will be small
if n < 1024:
bc += bctable[int(n)] - 1
else:
bc += bitcount(n) - 1
bc += int(man>>bc)
return normalize(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy':
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
else:
mpf_mul = python_mpf_mul
mpf_mul_int = python_mpf_mul_int
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp+n, bc
def mpf_frexp(x):
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
sign, man, exp, bc = x
if not man:
if x == fzero:
return (fzero, 0)
else:
raise ValueError
return mpf_shift(x, -bc-exp), bc+exp
def mpf_div(s, t, prec, rnd=round_fast):
"""Floating-point division"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if not sman or not tman:
if s == fzero:
if t == fzero: raise ZeroDivisionError
if t == fnan: return fnan
return fzero
if t == fzero:
raise ZeroDivisionError
s_special = (not sman) and sexp
t_special = (not tman) and texp
if s_special and t_special:
return fnan
if s == fnan or t == fnan:
return fnan
if not t_special:
if t == fzero:
return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
return fzero
sign = ssign ^ tsign
if tman == 1:
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
# Same strategy as for addition: if there is a remainder, perturb
# the result a few bits outside the precision range before rounding
extra = prec - sbc + tbc + 5
if extra < 5:
extra = 5
quot, rem = divmod(sman<<extra, tman)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
"""Floating-point division n/t with a Python integer as numerator"""
sign, man, exp, bc = t
if not n or not man:
return mpf_div(from_int(n), t, prec, rnd)
if n < 0:
sign ^= 1
n = -n
extra = prec + bc + 5
quot, rem = divmod(n<<extra, man)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
def mpf_mod(s, t, prec, rnd=round_fast):
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ((not sman) and sexp) or ((not tman) and texp):
return fnan
# Important special case: do nothing if t is larger
if ssign == tsign and texp > sexp+sbc:
return s
# Another important special case: this allows us to do e.g. x % 1.0
# to find the fractional part of x, and it will work when x is huge.
if tman == 1 and sexp > texp+tbc:
return fzero
base = min(sexp, texp)
sman = (-1)**ssign * sman
tman = (-1)**tsign * tman
man = (sman << (sexp-base)) % (tman << (texp-base))
if man >= 0:
sign = 0
else:
man = -man
sign = 1
return normalize(sign, man, base, bitcount(man), prec, rnd)
reciprocal_rnd = {
round_down : round_up,
round_up : round_down,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
negative_rnd = {
round_down : round_down,
round_up : round_up,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
def mpf_pow_int(s, n, prec, rnd=round_fast):
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
sign, man, exp, bc = s
if (not man) and exp:
if s == finf:
if n > 0: return s
if n == 0: return fnan
return fzero
if s == fninf:
if n > 0: return [finf, fninf][n & 1]
if n == 0: return fnan
return fzero
return fnan
n = int(n)
if n == 0: return fone
if n == 1: return mpf_pos(s, prec, rnd)
if n == 2:
_, man, exp, bc = s
if not man:
return fzero
man = man*man
if man == 1:
return (0, MPZ_ONE, exp+exp, 1)
bc = bc + bc - 2
bc += bctable[int(man>>bc)]
return normalize1(0, man, exp+exp, bc, prec, rnd)
if n == -1: return mpf_div(fone, s, prec, rnd)
if n < 0:
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
return mpf_div(fone, inverse, prec, rnd)
result_sign = sign & n
# Use exact integer power when the exact mantissa is small
if man == 1:
return (result_sign, MPZ_ONE, exp*n, 1)
if bc*n < 1000:
man **= n
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
# Use directed rounding all the way through to maintain rigorous
# bounds for interval arithmetic
rounds_down = (rnd == round_nearest) or \
shifts_down[rnd][result_sign]
# Now we perform binary exponentiation. Need to estimate precision
# to avoid rounding errors from temporary operations. Roughly log_2(n)
# operations are performed.
workprec = prec + 4*bitcount(n) + 4
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*man
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
if rounds_down:
pm = pm >> (pbc-workprec)
else:
pm = -((-pm) >> (pbc-workprec))
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
man = man*man
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(man >> bc)]
if bc > workprec:
if rounds_down:
man = man >> (bc-workprec)
else:
man = -((-man) >> (bc-workprec))
exp += bc - workprec
bc = workprec
n = n // 2
return normalize(result_sign, pm, pe, pbc, prec, rnd)
def mpf_perturb(x, eps_sign, prec, rnd):
"""
For nonzero x, calculate x + eps with directed rounding, where
eps < prec relatively and eps has the given sign (0 for
positive, 1 for negative).
With rounding to nearest, this is taken to simply normalize
x to the given precision.
"""
if rnd == round_nearest:
return mpf_pos(x, prec, rnd)
sign, man, exp, bc = x
eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
if sign:
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
else:
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
if away:
return mpf_add(x, eps, prec, rnd)
else:
return mpf_pos(x, prec, rnd)
#----------------------------------------------------------------------------#
# Radix conversion #
#----------------------------------------------------------------------------#
def to_digits_exp(s, dps):
"""Helper function for representing the floating-point number s as
a decimal with dps digits. Returns (sign, string, exponent) where
sign is '' or '-', string is the digit string, and exponent is
the decimal exponent as an int.
If inexact, the decimal representation is rounded toward zero."""
# Extract sign first so it doesn't mess up the string digit count
if s[0]:
sign = '-'
s = mpf_neg(s)
else:
sign = ''
_sign, man, exp, bc = s
if not man:
return '', '0', 0
bitprec = int(dps * math.log(10,2)) + 10
# Cut down to size
# TODO: account for precision when doing this
exp_from_1 = exp + bc
if abs(exp_from_1) > 3500:
from .libelefun import mpf_ln2, mpf_ln10
# Set b = int(exp * log(2)/log(10))
# If exp is huge, we must use high-precision arithmetic to
# find the nearest power of ten
expprec = bitcount(abs(exp)) + 5
tmp = from_int(exp)
tmp = mpf_mul(tmp, mpf_ln2(expprec))
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
b = to_int(tmp)
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
_sign, man, exp, bc = s
exponent = b
else:
exponent = 0
# First, calculate mantissa digits by converting to a binary
# fixed-point number and then converting that number to
# a decimal fixed-point number.
fixprec = max(bitprec - exp - bc, 0)
fixdps = int(fixprec / math.log(10,2) + 0.5)
sf = to_fixed(s, fixprec)
sd = bin_to_radix(sf, fixprec, 10, fixdps)
digits = numeral(sd, base=10, size=dps)
exponent += len(digits) - fixdps - 1
return sign, digits, exponent
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
show_zero_exponent=False):
"""
Convert a raw mpf to a decimal floating-point literal with at
most `dps` decimal digits in the mantissa (not counting extra zeros
that may be inserted for visual purposes).
The number will be printed in fixed-point format if the position
of the leading digit is strictly between min_fixed
(default = min(-dps/3,-5)) and max_fixed (default = dps).
To force fixed-point format always, set min_fixed = -inf,
max_fixed = +inf. To force floating-point format, set
min_fixed >= max_fixed.
The literal is formatted so that it can be parsed back to a number
by to_str, float() or Decimal().
"""
# Special numbers
if not s[1]:
if s == fzero:
if dps: t = '0.0'
else: t = '.0'
if show_zero_exponent:
t += 'e+0'
return t
if s == finf: return '+inf'
if s == fninf: return '-inf'
if s == fnan: return 'nan'
raise ValueError
if min_fixed is None: min_fixed = min(-(dps//3), -5)
if max_fixed is None: max_fixed = dps
# to_digits_exp rounds to floor.
# This sometimes kills some instances of "...00001"
sign, digits, exponent = to_digits_exp(s, dps+3)
# No digits: show only .0; round exponent to nearest
if not dps:
if digits[0] in '56789':
exponent += 1
digits = ".0"
else:
# Rounding up kills some instances of "...99999"
if len(digits) > dps and digits[dps] in '56789':
digits = digits[:dps]
i = dps - 1
while i >= 0 and digits[i] == '9':
i -= 1
if i >= 0:
digits = digits[:i] + str(int(digits[i]) + 1) + '0' * (dps - i - 1)
else:
digits = '1' + '0' * (dps - 1)
exponent += 1
else:
digits = digits[:dps]
# Prettify numbers close to unit magnitude
if min_fixed < exponent < max_fixed:
if exponent < 0:
digits = ("0"*int(-exponent)) + digits
split = 1
else:
split = exponent + 1
if split > dps:
digits += "0"*(split-dps)
exponent = 0
else:
split = 1
digits = (digits[:split] + "." + digits[split:])
if strip_zeros:
# Clean up trailing zeros
digits = digits.rstrip('0')
if digits[-1] == ".":
digits += "0"
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
if exponent >= 0: return sign + digits + "e+" + str(exponent)
if exponent < 0: return sign + digits + "e" + str(exponent)
def str_to_man_exp(x, base=10):
"""Helper function for from_str."""
x = x.lower().rstrip('l')
# Verify that the input is a valid float literal
float(x)
# Split into mantissa, exponent
parts = x.split('e')
if len(parts) == 1:
exp = 0
else: # == 2
x = parts[0]
exp = int(parts[1])
# Look for radix point in mantissa
parts = x.split('.')
if len(parts) == 2:
a, b = parts[0], parts[1].rstrip('0')
exp -= len(b)
x = a + b
x = MPZ(int(x, base))
return x, exp
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
def from_str(x, prec, rnd=round_fast):
"""Create a raw mpf from a decimal literal, rounding in the
specified direction if the input number cannot be represented
exactly as a binary floating-point number with the given number of
bits. The literal syntax accepted is the same as for Python
floats.
TODO: the rounding does not work properly for large exponents.
"""
x = x.lower().strip()
if x in special_str:
return special_str[x]
if '/' in x:
p, q = x.split('/')
p, q = p.rstrip('l'), q.rstrip('l')
return from_rational(int(p), int(q), prec, rnd)
man, exp = str_to_man_exp(x, base=10)
# XXX: appropriate cutoffs & track direction
# note no factors of 5
if abs(exp) > 400:
s = from_int(man, prec+10)
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
else:
if exp >= 0:
s = from_int(man * 10**exp, prec, rnd)
else:
s = from_rational(man, 10**-exp, prec, rnd)
return s
# Binary string conversion. These are currently mainly used for debugging
# and could use some improvement in the future
def from_bstr(x):
man, exp = str_to_man_exp(x, base=2)
man = MPZ(man)
sign = 0
if man < 0:
man = -man
sign = 1
bc = bitcount(man)
return normalize(sign, man, exp, bc, bc, round_floor)
def to_bstr(x):
sign, man, exp, bc = x
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
#----------------------------------------------------------------------------#
# Square roots #
#----------------------------------------------------------------------------#
def mpf_sqrt(s, prec, rnd=round_fast):
"""
Compute the square root of a nonnegative mpf value. The
result is correctly rounded.
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("square root of a negative number")
if not man:
return s
if exp & 1:
exp -= 1
man <<= 1
bc += 1
elif man == 1:
return normalize1(sign, man, exp//2, bc, prec, rnd)
shift = max(4, 2*prec-bc+4)
shift += shift & 1
if rnd in 'fd':
man = isqrt(man<<shift)
else:
man, rem = sqrtrem(man<<shift)
# Perturb up
if rem:
man = (man<<1)+1
shift += 2
return from_man_exp(man, (exp-shift)//2, prec, rnd)
def mpf_hypot(x, y, prec, rnd=round_fast):
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
x and y."""
if y == fzero: return mpf_abs(x, prec, rnd)
if x == fzero: return mpf_abs(y, prec, rnd)
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
return mpf_sqrt(hypot2, prec, rnd)
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as ext_lib
mpf_add = ext_lib.mpf_add
mpf_sub = ext_lib.mpf_sub
mpf_mul = ext_lib.mpf_mul
mpf_div = ext_lib.mpf_div
mpf_sqrt = ext_lib.mpf_sqrt
except ImportError:
pass
| bsd-3-clause | 77f41804e845fdcd144fbc62a30d3014 | 30.839463 | 84 | 0.528353 | 3.526357 | false | false | false | false |
desihub/desisim | py/desisim/scripts/quickspectra.py | 1 | 19534 | from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import time
import numpy as np
import astropy.table
import astropy.time
import astropy.units as u
import astropy.io.fits as pyfits
import desisim.specsim
import desisim.simexp
import desisim.obs
import desisim.io
import desisim.util
from desiutil.log import get_logger
import desispec.io
import desispec.io.util
import desimodel.io
import desitarget
from desispec.spectra import Spectra
from desispec.resolution import Resolution
def _fft_gaussian_smooth(array, sigmapix, pad_size=10):
iwavesize, nspec = array.shape
# Pad the input array to get rid of annoying edge effects
# Pad values are set to the edge value
arrsize = iwavesize+2*pad_size
padded_arr = np.empty((arrsize, nspec))
padded_arr[:pad_size, :] = array[0, :]
padded_arr[iwavesize+pad_size:, :] = array[-1, :]
padded_arr[pad_size:iwavesize+pad_size, :] = array
kvals = np.fft.rfftfreq(arrsize)
kernel_k = np.exp(-(kvals*sigmapix)**2/2.)
snumsource_k = np.fft.rfft(padded_arr, axis=0)*kernel_k[:, None]
return np.fft.irfft(snumsource_k, n=arrsize, axis=0)[pad_size:-pad_size]
# camera_output seems to change without assignment
# Assignment yields attribute error
# assumes dwave_out is not None
def _smooth_source_variance(camera_output, sigma_A, dwave_out):
# arm_output shape is (wave.size, nspec)
for i in range(3):
arm_output = camera_output[i]
# num_source_electrons goes into poisson noise
# Remove it from the variance first
arm_output['variance_electrons'] -= arm_output['num_source_electrons']
sigmapix = sigma_A/dwave_out
arm_output['num_source_electrons'] = _fft_gaussian_smooth(arm_output['num_source_electrons'], sigmapix)
# add smoothed source electrons back to variance
arm_output['variance_electrons'] += arm_output['num_source_electrons']
arm_output['flux_inverse_variance'] = (
arm_output['flux_calibration'] ** -2 *
arm_output['variance_electrons'] ** -1)
def sim_spectra(wave, flux, program, spectra_filename, obsconditions=None,
sourcetype=None, targetid=None, redshift=None, expid=0, seed=0, skyerr=0.0, ra=None,
dec=None, meta=None, fibermap_columns=None, fullsim=False, use_poisson=True, specsim_config_file="desi", dwave_out=None, save_resolution=True, source_contribution_smoothing=0):
"""
Simulate spectra from an input set of wavelength and flux and writes a FITS file in the Spectra format that can
be used as input to the redshift fitter.
Args:
wave : 1D np.array of wavelength in Angstrom (in vacuum) in observer frame (i.e. redshifted)
flux : 1D or 2D np.array. 1D array must have same size as wave, 2D array must have shape[1]=wave.size
flux has to be in units of 10^-17 ergs/s/cm2/A
spectra_filename : path to output FITS file in the Spectra format
program : dark, lrg, qso, gray, grey, elg, bright, mws, bgs
ignored if obsconditions is not None
Optional:
obsconditions : dictionnary of observation conditions with SEEING EXPTIME AIRMASS MOONFRAC MOONALT MOONSEP
sourcetype : list of string, allowed values are (sky,elg,lrg,qso,bgs,star), type of sources, used for fiber aperture loss , default is star
targetid : list of targetids for each target. default of None has them generated as str(range(nspec))
redshift : list/array with each index being the redshifts for that target
expid : this expid number will be saved in the Spectra fibermap
seed : random seed
skyerr : fractional sky subtraction error
ra : numpy array with targets RA (deg)
dec : numpy array with targets Dec (deg)
meta : dictionnary, saved in primary fits header of the spectra file
fibermap_columns : add these columns to the fibermap
fullsim : if True, write full simulation data in extra file per camera
use_poisson : if False, do not use numpy.random.poisson to simulate the Poisson noise. This is useful to get reproducible random
realizations.
save_resolution : if True it will save the Resolution matrix for each spectra.
If False returns a resolution matrix (useful for mocks to save disk space).
source_contribution_smoothing : If > 0, contribution of source electrons to the noise and variance is
Gaussian smoothed by this value. This reduces signal-noise coupling especially for Lya forest.
"""
log = get_logger()
if len(flux.shape)==1 :
flux=flux.reshape((1,flux.size))
nspec=flux.shape[0]
log.info("Starting simulation of {} spectra".format(nspec))
if sourcetype is None :
sourcetype = np.array(["star" for i in range(nspec)])
log.debug("sourcetype = {}".format(sourcetype))
tileid = 0
telera = 0
teledec = 0
dateobs = time.gmtime()
night = desisim.obs.get_night(utc=dateobs)
program = program.lower()
frame_fibermap = desispec.io.fibermap.empty_fibermap(nspec)
frame_fibermap.meta["FLAVOR"]="custom"
frame_fibermap.meta["NIGHT"]=night
frame_fibermap.meta["EXPID"]=expid
# add DESI_TARGET
tm = desitarget.targetmask.desi_mask
frame_fibermap['DESI_TARGET'][sourcetype=="star"]=tm.STD_FAINT
frame_fibermap['DESI_TARGET'][sourcetype=="lrg"]=tm.LRG
frame_fibermap['DESI_TARGET'][sourcetype=="elg"]=tm.ELG
frame_fibermap['DESI_TARGET'][sourcetype=="qso"]=tm.QSO
frame_fibermap['DESI_TARGET'][sourcetype=="sky"]=tm.SKY
frame_fibermap['DESI_TARGET'][sourcetype=="bgs"]=tm.BGS_ANY
if fibermap_columns is not None :
for k in fibermap_columns.keys() :
frame_fibermap[k] = fibermap_columns[k]
if targetid is None:
targetid = np.arange(nspec).astype(int)
# add TARGETID
frame_fibermap['TARGETID'] = targetid
# spectra fibermap has two extra fields : night and expid
# This would be cleaner if desispec would provide the spectra equivalent
# of desispec.io.empty_fibermap()
spectra_fibermap = desispec.io.empty_fibermap(nspec)
spectra_fibermap = desispec.io.util.add_columns(spectra_fibermap,
['NIGHT', 'EXPID', 'TILEID'],
[np.int32(night), np.int32(expid), np.int32(tileid)],
)
for s in range(nspec):
for tp in frame_fibermap.dtype.fields:
spectra_fibermap[s][tp] = frame_fibermap[s][tp]
if ra is not None :
spectra_fibermap["TARGET_RA"] = ra
spectra_fibermap["FIBER_RA"] = ra
if dec is not None :
spectra_fibermap["TARGET_DEC"] = dec
spectra_fibermap["FIBER_DEC"] = dec
if obsconditions is None:
if program in ['dark', 'lrg', 'qso']:
obsconditions = desisim.simexp.reference_conditions['DARK']
elif program in ['elg', 'gray', 'grey']:
obsconditions = desisim.simexp.reference_conditions['GRAY']
elif program in ['mws', 'bgs', 'bright']:
obsconditions = desisim.simexp.reference_conditions['BRIGHT']
else:
raise ValueError('unknown program {}'.format(program))
elif isinstance(obsconditions, str):
try:
obsconditions = desisim.simexp.reference_conditions[obsconditions.upper()]
except KeyError:
raise ValueError('obsconditions {} not in {}'.format(
obsconditions.upper(),
list(desisim.simexp.reference_conditions.keys())))
try:
params = desimodel.io.load_desiparams()
wavemin = params['ccd']['b']['wavemin']
wavemax = params['ccd']['z']['wavemax']
except KeyError:
wavemin = desimodel.io.load_throughput('b').wavemin
wavemax = desimodel.io.load_throughput('z').wavemax
if specsim_config_file == "eboss":
wavemin = 3500
wavemax = 10000
if wave[0] > wavemin:
log.warning('Minimum input wavelength {}>{}; padding with zeros'.format(
wave[0], wavemin))
dwave = wave[1] - wave[0]
npad = int((wave[0] - wavemin)/dwave + 1)
wavepad = np.arange(npad) * dwave
wavepad += wave[0] - dwave - wavepad[-1]
fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
wave = np.concatenate([wavepad, wave])
flux = np.hstack([fluxpad, flux])
assert flux.shape[1] == len(wave)
assert np.allclose(dwave, np.diff(wave))
assert wave[0] <= wavemin
if wave[-1] < wavemax:
log.warning('Maximum input wavelength {}<{}; padding with zeros'.format(
wave[-1], wavemax))
dwave = wave[-1] - wave[-2]
npad = int( (wavemax - wave[-1])/dwave + 1 )
wavepad = wave[-1] + dwave + np.arange(npad)*dwave
fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
wave = np.concatenate([wave, wavepad])
flux = np.hstack([flux, fluxpad])
assert flux.shape[1] == len(wave)
assert np.allclose(dwave, np.diff(wave))
assert wavemax <= wave[-1]
ii = (wavemin <= wave) & (wave <= wavemax)
flux_unit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm ** 2 )
wave = wave[ii]*u.Angstrom
flux = flux[:,ii]*flux_unit
sim = desisim.simexp.simulate_spectra(wave, flux, fibermap=frame_fibermap,
obsconditions=obsconditions, redshift=redshift, seed=seed,
psfconvolve=True, specsim_config_file=specsim_config_file, dwave_out=dwave_out)
random_state = np.random.RandomState(seed)
sim.generate_random_noise(random_state,use_poisson=use_poisson)
# Smoothing source electron numbers only works for DESI mocks
if specsim_config_file != "eboss" and source_contribution_smoothing > 0:
log.info("Smoothing source contribution to noise estimates by {} A.".format(source_contribution_smoothing))
_smooth_source_variance(sim.camera_output, source_contribution_smoothing, dwave_out)
scale=1e17
specdata = None
resolution={}
for camera in sim.instrument.cameras:
R = Resolution(camera.get_output_resolution_matrix())
resolution[camera.name] = np.tile(R.to_fits_array(), [nspec, 1, 1])
if not save_resolution :
resolution[camera.name] = R.to_fits_array()
skyscale = skyerr * random_state.normal(size=sim.num_fibers)
if fullsim :
for table in sim.camera_output :
band = table.meta['name'].strip()[0]
table_filename=spectra_filename.replace(".fits","-fullsim-{}.fits".format(band))
table.write(table_filename,format="fits",overwrite=True)
print("wrote",table_filename)
if specsim_config_file == "eboss":
for table in sim._eboss_camera_output:
wave = table['wavelength'].astype(float)
flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
if np.any(skyscale):
flux += ((table['num_sky_electrons']*skyscale)*table['flux_calibration']).T.astype(float)
ivar = table['flux_inverse_variance'].T.astype(float)
band = table.meta['name'].strip()[0]
flux = flux * scale
ivar = ivar / scale**2
mask = np.zeros(flux.shape).astype(int)
spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar},
resolution_data=None,
mask={band : mask},
fibermap=spectra_fibermap,
meta=meta,
single=True)
if specdata is None :
specdata = spec
else :
specdata.update(spec)
else:
for table in sim.camera_output :
wave = table['wavelength'].astype(float)
flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
if np.any(skyscale):
flux += ((table['num_sky_electrons']*skyscale)*table['flux_calibration']).T.astype(float)
ivar = table['flux_inverse_variance'].T.astype(float)
band = table.meta['name'].strip()[0]
flux = flux * scale
ivar = ivar / scale**2
mask = np.zeros(flux.shape).astype(int)
if not save_resolution :
spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar},
resolution_data=None,
mask={band : mask},
fibermap=spectra_fibermap,
meta=meta,
single=True)
else :
spec = Spectra([band], {band : wave}, {band : flux}, {band : ivar},
resolution_data={band : resolution[band]},
mask={band : mask},
fibermap=spectra_fibermap,
meta=meta,
single=True)
if specdata is None :
specdata = spec
else :
specdata.update(spec)
desispec.io.write_spectra(spectra_filename, specdata)
log.info('Wrote '+spectra_filename)
# need to clear the simulation buffers that keeps growing otherwise
# because of a different number of fibers each time ...
desisim.specsim._simulators.clear()
desisim.specsim._simdefaults.clear()
if not save_resolution :
return resolution
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Fast simulation of spectra into the final DESI format (Spectra class) that can be directly used as
an input to the redshift fitter (redrock). The input file is an ASCII file with first column the wavelength in A (in vacuum, redshifted), the other columns are treated as spectral flux densities in units of 10^-17 ergs/s/cm2/A.""")
#- Required
parser.add_argument('-i','--input', type=str, required=True, help="Input spectra, ASCII or fits")
parser.add_argument('-o','--out-spectra', type=str, required=True, help="Output spectra")
#- Optional
parser.add_argument('--repeat', type=int, default=1, help="Duplicate the input spectra to have several random realizations")
#- Optional observing conditions to override program defaults
parser.add_argument('--program', type=str, default="DARK", help="Program (DARK, GRAY or BRIGHT)")
parser.add_argument('--seeing', type=float, default=None, help="Seeing FWHM [arcsec]")
parser.add_argument('--airmass', type=float, default=None, help="Airmass")
parser.add_argument('--exptime', type=float, default=None, help="Exposure time [sec]")
parser.add_argument('--moonfrac', type=float, default=None, help="Moon illumination fraction; 1=full")
parser.add_argument('--moonalt', type=float, default=None, help="Moon altitude [degrees]")
parser.add_argument('--moonsep', type=float, default=None, help="Moon separation to tile [degrees]")
parser.add_argument('--seed', type=int, default=0, help="Random seed")
parser.add_argument('--source-type', type=str, default=None, help="Source type (for fiber loss), among sky,elg,lrg,qso,bgs,star")
parser.add_argument('--skyerr', type=float, default=0.0, help="Fractional sky subtraction error")
parser.add_argument('--fullsim',action='store_true',help="write full simulation data in extra file per camera, for debugging")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if args.source_type is not None :
allowed=["sky","elg","lrg","qso","bgs","star"]
if not args.source_type in allowed :
log.error("source type has to be among {}".format(allowed))
sys.exit(12)
exptime = args.exptime
if exptime is None :
exptime = 1000. # sec
#- Generate obsconditions with args.program, then override as needed
obsconditions = desisim.simexp.reference_conditions[args.program.upper()]
if args.airmass is not None:
obsconditions['AIRMASS'] = args.airmass
if args.seeing is not None:
obsconditions['SEEING'] = args.seeing
if exptime is not None:
obsconditions['EXPTIME'] = exptime
if args.moonfrac is not None:
obsconditions['MOONFRAC'] = args.moonfrac
if args.moonalt is not None:
obsconditions['MOONALT'] = args.moonalt
if args.moonsep is not None:
obsconditions['MOONSEP'] = args.moonsep
# ascii version
isfits=False
hdulist=None
try :
hdulist=pyfits.open(args.input)
isfits=True
except (IOError,OSError) :
pass
if isfits :
log.info("Reading an input FITS file")
if 'WAVELENGTH' in hdulist:
input_wave = hdulist["WAVELENGTH"].data
elif "WAVE" in hdulist:
input_wave = hdulist["WAVE"].data
else:
log.error("need an HDU with EXTNAME='WAVELENGTH' with a 1D array/image of wavelength in A in vacuum")
sys.exit(12)
if not "FLUX" in hdulist :
log.error("need an HDU with EXTNAME='FLUX' with a 1D or 2D array/image of flux in units of 10^-17 ergs/s/cm2/A")
sys.exit(12)
input_flux = hdulist["FLUX"].data
if input_wave.size != input_flux.shape[1] :
log.error("WAVELENGTH size {} != FLUX shape[1] = {} (NAXIS1 in fits)")
hdulist.close()
else :
# read is ASCII
try :
tmp = np.loadtxt(args.input).T
except (ValueError,TypeError) :
log.error("could not read ASCII file, need at least two columns, separated by ' ', the first one for wavelength in A in vacuum, the other ones for flux in units of 10^-17 ergs/s/cm2/A, one column per spectrum.")
log.error("error message : {}".format(sys.exc_info()))
sys.exit(12)
if tmp.shape[0]<2 :
log.error("need at least two columns in ASCII file (one for wavelength in A in vacuum, one for flux in units of 10^-17 ergs/s/cm2/A")
sys.exit(12)
input_wave = tmp[0]
input_flux = tmp[1:]
if args.repeat>1 :
input_flux = np.tile(input_flux, (args.repeat,1 ))
log.info("input flux shape (after repeat) = {}".format(input_flux.shape))
else :
log.info("input flux shape = {}".format(input_flux.shape))
sourcetype=args.source_type
if sourcetype is not None and len(input_flux.shape)>1 :
nspec=input_flux.shape[0]
sourcetype=np.array([sourcetype for i in range(nspec)])
sim_spectra(input_wave, input_flux, args.program, obsconditions=obsconditions,
spectra_filename=args.out_spectra,seed=args.seed,sourcetype=sourcetype,
skyerr=args.skyerr,fullsim=args.fullsim)
| bsd-3-clause | 909df042763fd3826673ea85f96a6c7a | 42.408889 | 266 | 0.618818 | 3.63153 | false | false | false | false |
metoppv/improver | improver/regrid/grid.py | 3 | 16102 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Grid handling for regridding
"""
from typing import List, Tuple, Union
import iris
import numpy as np
from iris.cube import Cube
from numpy import ndarray
from numpy.ma.core import MaskedArray
from scipy.interpolate import RegularGridInterpolator
from improver.utilities.cube_manipulation import sort_coord_in_cube
from improver.utilities.spatial import calculate_grid_spacing, lat_lon_determine
def ensure_ascending_coord(cube: Cube) -> Cube:
"""
Check if cube coordinates ascending. if not, make it ascending
Args:
cube:
Input source cube.
Returns:
Cube with ascending coordinates
"""
for ax in ("x", "y"):
if cube.coord(axis=ax).points[0] > cube.coord(axis=ax).points[-1]:
cube = sort_coord_in_cube(cube, cube.coord(axis=ax).standard_name)
return cube
def calculate_input_grid_spacing(cube_in: Cube) -> Tuple[float, float]:
"""
Calculate grid spacing in latitude and logitude.
Check if input source grid is on even-spacing and ascending lat/lon system.
Args:
cube_in:
Input source cube.
Returns:
- Grid spacing in latitude, in degree.
- Grid spacing in logitude, in degree.
Raises:
ValueError:
If input grid is not on a latitude/longitude system or
input grid coordinates are not ascending.
"""
# check if in lat/lon system
if lat_lon_determine(cube_in) is not None:
raise ValueError("Input grid is not on a latitude/longitude system")
# calculate grid spacing
lon_spacing = calculate_grid_spacing(cube_in, "degree", axis="x", rtol=4.0e-5)
lat_spacing = calculate_grid_spacing(cube_in, "degree", axis="y", rtol=4.0e-5)
y_coord = cube_in.coord(axis="y").points
x_coord = cube_in.coord(axis="x").points
if x_coord[-1] < x_coord[0] or y_coord[-1] < y_coord[0]:
raise ValueError("Input grid coordinates are not ascending.")
return lat_spacing, lon_spacing
def get_cube_coord_names(cube: Cube) -> List[str]:
"""
Get all coordinate names from a cube.
Args:
cube:
Input cube.
Returns:
List of coordinate names.
"""
return [coord.standard_name for coord in cube.dim_coords]
def latlon_names(cube: Cube) -> Tuple[str, str]:
"""
Identify the names of the latitude and longitude dimensions of cube.
Args:
cube:
Input cube.
Returns:
- Name of latitude dimension of cube.
- Name of longitude dimension of cube.
"""
lats_name = cube.coord(axis="y").standard_name
lons_name = cube.coord(axis="x").standard_name
return lats_name, lons_name
def latlon_from_cube(cube: Cube) -> ndarray:
"""
Produce an array of latitude-longitude coordinates used by an Iris cube.
Args:
cube:
Cube with spatial coords.
Returns:
Latitude-longitude pairs (N x 2).
"""
lats_name, lons_name = latlon_names(cube)
lats_data = cube.coord(lats_name).points
lons_data = cube.coord(lons_name).points
lats_mesh, lons_mesh = np.meshgrid(lats_data, lons_data, indexing="ij")
latlon = np.dstack((lats_mesh, lons_mesh)).reshape((-1, 2))
return latlon
def unflatten_spatial_dimensions(
regrid_result: ndarray,
cube_out_mask: Cube,
in_values: ndarray,
lats_index: int,
lons_index: int,
) -> Union[ndarray, MaskedArray]:
"""
Reshape numpy array regrid_result from (lat*lon,...) to (....,lat,lon)
or from (projy*projx,...) to (...,projy,projx).
Args:
regrid_result:
Array of regridded result in (lat*lon,....) or (projy*projx,...).
cube_out_mask:
Target grid cube (for getting grid dimension here).
in_values:
Reshaped source data (in _reshape_data_cube).
lats_index:
Index of lats or projy coord in reshaped array.
lons_index:
Index of lons or projx coord in reshaped array.
Returns:
Reshaped data array.
"""
cube_out_dim0 = cube_out_mask.coord(axis="y").shape[0]
cube_out_dim1 = cube_out_mask.coord(axis="x").shape[0]
latlon_shape = [cube_out_dim0, cube_out_dim1] + list(in_values.shape[1:])
regrid_result = np.reshape(regrid_result, latlon_shape)
regrid_result = np.swapaxes(regrid_result, 1, lons_index)
regrid_result = np.swapaxes(regrid_result, 0, lats_index)
return regrid_result
def flatten_spatial_dimensions(
cube: Cube,
) -> Tuple[Union[ndarray, MaskedArray], int, int]:
"""
Reshape data cube from (....,lat,lon) into data (lat*lon,...).
Args:
cube:
Original data cube.
Returns:
- Reshaped data array.
- Index of latitude cube coords.
- Index of longitude cube coords.
"""
in_values = cube.data
lats_name, lons_name = latlon_names(cube)
lats_index = cube.coord_dims(lats_name)[0]
lons_index = cube.coord_dims(lons_name)[0]
in_values = np.swapaxes(in_values, 0, lats_index)
in_values = np.swapaxes(in_values, 1, lons_index)
lats_len = int(in_values.shape[0])
lons_len = int(in_values.shape[1])
latlon_shape = [lats_len * lons_len] + list(in_values.shape[2:])
in_values = np.reshape(in_values, latlon_shape)
return in_values, lats_index, lons_index
def classify_output_surface_type(cube_out_mask: Cube) -> ndarray:
"""
Classify surface types of target grid points based on a binary True/False land mask.
Args:
cube_out_mask:
land_sea mask information cube for target grid (land=1)
Returns:
1D land-sea mask information for 1D-ordered target grid points
"""
# cube y-axis => latitude or projection-y
cube_out_dim0 = cube_out_mask.coord(axis="y").shape[0]
cube_out_dim1 = cube_out_mask.coord(axis="x").shape[0]
out_classified = cube_out_mask.data.reshape(cube_out_dim0 * cube_out_dim1)
return out_classified
def classify_input_surface_type(
cube_in_mask: Cube, classify_latlons: ndarray
) -> ndarray:
"""
Classify surface types of source grid points based on a binary True/False land mask
cube_in_mask's grid could be different from input source grid of NWP results.
Args:
cube_in_mask:
Land_sea mask information cube for input source grid (land=1)
which should be in GeogCS's lats/lons coordinate system.
classify_latlons:
Latitude and longitude source grid points to classify (N x 2).
Returns:
Classifications (N) for 1D-ordered source grid points.
"""
in_land_mask = cube_in_mask.data
lats_name, lons_name = latlon_names(cube_in_mask)
in_land_mask_lats = cube_in_mask.coord(lats_name).points
in_land_mask_lons = cube_in_mask.coord(lons_name).points
mask_rg_interp = RegularGridInterpolator(
(in_land_mask_lats, in_land_mask_lons),
in_land_mask,
method="nearest",
bounds_error=False,
fill_value=0.0,
)
is_land = np.bool_(mask_rg_interp(classify_latlons))
return is_land
def similar_surface_classify(
in_is_land: ndarray, out_is_land: ndarray, nearest_in_indexes: ndarray
) -> ndarray:
"""
Classify surface types as matched (True) or unmatched(False) between target points
and their source point.
Args:
in_is_land:
Source point classifications (N).
out_is_land:
Target point classifications (M).
nearest_in_indexes:
Indexes of input points nearby output points (M x K).
Return:
Boolean true if input surface type matches output or no matches (M x K).
"""
k = nearest_in_indexes.shape[1]
out_is_land_bcast = np.broadcast_to(
out_is_land, (k, out_is_land.shape[0])
).transpose() # dimensions M x K
# classify the input points surrounding each output point
nearest_is_land = in_is_land[nearest_in_indexes] # dimensions M x K
# these input points surrounding output points have the same surface type
nearest_same_type = np.logical_not(
np.logical_xor(nearest_is_land, out_is_land_bcast)
) # dimensions M x K
return nearest_same_type
def slice_cube_by_domain(
cube_in: Cube, output_domain: Tuple[float, float, float, float]
) -> Cube:
"""
Extract cube domain to be consistent as cube_reference's domain.
Args:
cube_in:
Input data cube to be sliced.
output_domain:
Lat_max, lon_max, lat_min, lon_min.
Returns:
Data cube after slicing.
"""
lat_max, lon_max, lat_min, lon_min = output_domain
lat_d, lon_d = calculate_input_grid_spacing(cube_in)
domain = iris.Constraint(
latitude=lambda val: lat_min - 2.0 * lat_d < val < lat_max + 2.0 * lat_d
) & iris.Constraint(
longitude=lambda val: lon_min - 2.0 * lon_d < val < lon_max + 2.0 * lon_d
)
cube_in = cube_in.extract(domain)
return cube_in
def slice_mask_cube_by_domain(
cube_in: Cube, cube_in_mask: Cube, output_domain: Tuple[float, float, float, float]
) -> Tuple[Cube, Cube]:
"""
Extract cube domain to be consistent as cube_reference's domain.
Args:
cube_in:
Input data cube to be sliced.
cube_in_mask:
Input mask cube to be sliced.
output_domain:
Lat_max, lon_max, lat_min, lon_min.
Returns:
- Data cube after slicing.
- Mask cube after slicing.
"""
lat_max, lon_max, lat_min, lon_min = output_domain
lat_d_1, lon_d_1 = calculate_input_grid_spacing(cube_in)
lat_d_2, lon_d_2 = calculate_input_grid_spacing(cube_in_mask)
lat_d = lat_d_1 if lat_d_1 > lat_d_2 else lat_d_2
lon_d = lon_d_1 if lon_d_1 > lon_d_2 else lon_d_2
domain = iris.Constraint(
latitude=lambda val: lat_min - 2.0 * lat_d < val < lat_max + 2.0 * lat_d
) & iris.Constraint(
longitude=lambda val: lon_min - 2.0 * lon_d < val < lon_max + 2.0 * lon_d
)
cube_in = cube_in.extract(domain)
cube_in_mask = cube_in_mask.extract(domain)
return cube_in, cube_in_mask
def create_regrid_cube(cube_array: ndarray, cube_in: Cube, cube_out: Cube) -> Cube:
"""
Create a regridded cube from regridded value(numpy array).
Source cube_in must be in regular latitude/longitude coordinates.
Target cube_out can be either regular latitude/longitude grid or equal area.
Args:
cube_array:
regridded value (multidimensional)
cube_in:
source cube (for value's non-grid dimensions and attributes)
cube_out:
target cube (for target grid information)
Returns:
Regridded result cube
"""
# generate a cube based on new data and cube_in
cube_v = Cube(cube_array, units=cube_in.units, attributes=cube_in.attributes)
cube_v.rename(cube_in.standard_name or cube_in.long_name)
cube_v.var_name = cube_in.var_name
# use dim_coord from cube_in except lat/lon
cube_coord_names = get_cube_coord_names(cube_in)
lats_name, lons_name = latlon_names(cube_in)
cube_coord_names.remove(lats_name)
cube_coord_names.remove(lons_name)
ndim = len(cube_coord_names)
for i, val in enumerate(cube_coord_names):
cube_v.add_dim_coord(cube_in.coord(val), i)
# Put in suitable spatial coord from cube_out into cube_in
cord_1, cord_2 = latlon_names(cube_out)
cube_v.add_dim_coord(cube_out.coord(cord_1), ndim)
cube_v.add_dim_coord(cube_out.coord(cord_2), ndim + 1)
# add all aus_coords from cube_in
for coord in cube_in.aux_coords:
cube_v.add_aux_coord(coord.copy(), cube_in.coord_dims(coord))
return cube_v
def group_target_points_with_source_domain(
cube_in: Cube, out_latlons: ndarray
) -> Tuple[ndarray, ndarray]:
"""
Group cube_out's grid points into outside or inside cube_in's domain.
Args:
cube_in:
Source cube.
out_latlons:
Target points's latitude-longitudes.
Returns:
- Index array of target points outside input domain.
- Index array of target points inside input domain.
"""
# get latitude and longitude coordinates of cube_in
lat_coord = cube_in.coord(axis="y").points
lon_coord = cube_in.coord(axis="x").points
in_lat_max, in_lat_min = np.max(lat_coord), np.min(lat_coord)
in_lon_max, in_lon_min = np.max(lon_coord), np.min(lon_coord)
lat = out_latlons[:, 0]
lon = out_latlons[:, 1]
# check target point coordinates inside/outside input source domain
in_domain_lat = np.logical_and(lat >= in_lat_min, lat <= in_lat_max)
in_domain_lon = np.logical_and(lon >= in_lon_min, lon <= in_lon_max)
in_domain = np.logical_and(in_domain_lat, in_domain_lon)
outside_input_domain_index = np.where(np.logical_not(in_domain))[0]
inside_input_domain_index = np.where(in_domain)[0]
return outside_input_domain_index, inside_input_domain_index
def mask_target_points_outside_source_domain(
total_out_point_num: int,
outside_input_domain_index: ndarray,
inside_input_domain_index: ndarray,
regrid_result: Union[ndarray, MaskedArray],
) -> Union[ndarray, MaskedArray]:
"""
Mask target points outside cube_in's domain.
Args:
total_out_point_num:
Total number of target points
outside_input_domain_index:
Index array of target points outside input domain.
inside_input_domain_index:
Index array of target points inside input domain.
regrid_result:
Array of regridded result in (lat*lon,....) or (projy*projx,...).
Returns:
Array of regridded result in (lat*lon,....) or (projy*projx,...).
"""
# masked cube_out grid points which are out of cube_in range
output_shape = [total_out_point_num] + list(regrid_result.shape[1:])
if isinstance(regrid_result, np.ma.MaskedArray):
output = np.ma.zeros(output_shape, dtype=np.float32)
output.mask = np.full(output_shape, True, dtype=bool)
output.mask[inside_input_domain_index] = regrid_result.mask
output.data[inside_input_domain_index] = regrid_result.data
else:
output = np.zeros(output_shape, dtype=np.float32)
output[inside_input_domain_index] = regrid_result
output[outside_input_domain_index] = np.nan
return output
| bsd-3-clause | 0cdf44168a3942ff91d37a48fe4135b0 | 32.476091 | 88 | 0.649174 | 3.462796 | false | false | false | false |
metoppv/improver | improver_tests/acceptance/test_feels_like_temp.py | 3 | 2563 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tests for the feels-like-temp CLI
"""
import pytest
from . import acceptance as acc
pytestmark = [pytest.mark.acc, acc.skip_if_kgo_missing]
CLI = acc.cli_name_with_dashes(__file__)
run_cli = acc.run_cli(CLI)
def test_basic(tmp_path):
"""Test basic feels like temperature processing"""
kgo_dir = acc.kgo_root() / "feels_like_temp/ukvx"
kgo_path = kgo_dir / "kgo.nc"
params = [
"temperature_at_screen_level",
"wind_speed_at_10m",
"relative_humidity_at_screen_level",
"pressure_at_mean_sea_level",
]
input_paths = [
kgo_dir / f"20181121T1200Z-PT0012H00M-{param}.nc" for param in params
]
output_path = tmp_path / "output.nc"
args = [
*input_paths,
"--output",
output_path,
"--model-id-attr",
"mosg__model_configuration",
]
run_cli(args)
acc.compare(output_path, kgo_path)
| bsd-3-clause | 9cb9169a9ec1f312ecab4b9fd28c4ab2 | 37.833333 | 79 | 0.691377 | 3.949153 | false | true | false | false |
metoppv/improver | improver/cli/neighbour_finding.py | 3 | 9856 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to create neighbour cubes for extracting spot data."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
orography: cli.inputcube,
land_sea_mask: cli.inputcube,
site_list: cli.inputjson,
*,
all_methods=False,
land_constraint=False,
similar_altitude=False,
search_radius: float = None,
node_limit: int = None,
site_coordinate_system=None,
site_coordinate_options=None,
site_x_coordinate=None,
site_y_coordinate=None,
unique_site_id_key=None,
):
"""Create neighbour cubes for extracting spot data.
Determine grid point coordinates within the provided cubes that neighbour
spot data sites defined within the provided JSON/Dictionary.
If no options are set the returned cube will contain the nearest neighbour
found for each site. Other constrained neighbour finding methods can be
set with options below.
1. Nearest neighbour.
2. Nearest land point neighbour.
3. Nearest neighbour with minimum height difference.
4. Nearest land point neighbour with minimum height difference.
Args:
orography (iris.cube.Cube):
Cube of model orography for the model grid on which neighbours are
being found.
land_sea_mask (iris.cube.Cube):
Cube of model land mask for the model grid on which neighbours are
being found, with land points set to one and sea points set to
zero.
site_list (dict):
Dictionary that contains the spot sites for which neighbouring grid
points are to be found.
all_methods (bool):
If True, this will return a cube containing the nearest grid point
neighbours to spot sites as defined by each possible combination
of constraints.
land_constraint (bool):
If True, this will return a cube containing the nearest grid point
neighbours to spot sites that are also land points. May be used
with the similar_altitude option.
similar_altitude (bool):
If True, this will return a cube containing the nearest grid point
neighbour to each spot site that is found, within a given search
radius, to minimise the height difference between the two. May be
used with the land_constraint option.
search_radius (float):
The radius in metres about a spot site within which to search for
a grid point neighbour that is land or which has a smaller height
difference than the nearest.
node_limit (int):
When searching within the defined search_radius for suitable
neighbours, a KDTree is constructed. This node_limit prevents the
tree from becoming too large for large search radii. A default of
36 will be set, which is to say the nearest 36 grid points will be
considered. If the search radius is likely to contain more than
36 points, this value should be increased to ensure all point
are considered.
site_coordinate_system (cartopy coordinate system):
The coordinate system in which the site coordinates are provided
within the site list. This must be provided as the name of a
cartopy coordinate system. The Default will become PlateCarree.
site_coordinate_options (str):
JSON formatted string of options passed to the cartopy coordinate
system given in site_coordinate_system. "globe" is handled as a
special case to construct a cartopy Globe object.
site_x_coordinate (str):
The key that identifies site x coordinates in the provided site
dictionary. Defaults to longitude.
site_y_coordinate (str):
The key that identifies site y coordinates in the provided site
dictionary. Defaults to latitude.
unique_site_id_key (str):
Key in the provided site list that corresponds to a unique numerical
ID for every site (up to 8 digits). If this optional key is provided
such an identifier must exist for every site. This key will be used
as the name for an additional coordinate on the returned neighbour
cube. Values in this coordinate will be recorded as strings, with
all numbers padded to 8-digits, e.g. "00012345".
Returns:
iris.cube.Cube:
The processed Cube.
Raises:
ValueError:
If all_methods is used with land_constraint or similar_altitude.
"""
import json
import cartopy.crs as ccrs
import iris
import numpy as np
from improver.spotdata.neighbour_finding import NeighbourSelection
from improver.utilities.cube_manipulation import (
MergeCubes,
enforce_coordinate_ordering,
)
PROJECTION_LIST = [
"AlbersEqualArea",
"AzimuthalEquidistant",
"EuroPP",
"Geocentric",
"Geodetic",
"Geostationary",
"Globe",
"Gnomonic",
"LambertAzimuthalEqualArea",
"LambertConformal",
"LambertCylindrical",
"Mercator",
"Miller",
"Mollweide",
"NearsidePerspective",
"NorthPolarStereo",
"OSGB",
"OSNI",
"Orthographic",
"PlateCarree",
"Projection",
"Robinson",
"RotatedGeodetic",
"RotatedPole",
"Sinusoidal",
"SouthPolarStereo",
"Stereographic",
"TransverseMercator",
"UTM",
]
# Check valid options have been selected.
if all_methods is True and (land_constraint or similar_altitude):
raise ValueError("Cannot use all_methods option with other constraints.")
# Filter kwargs for those expected by plugin and which are set.
# This preserves the plugin defaults for unset options.
args = {
"land_constraint": land_constraint,
"minimum_dz": similar_altitude,
"search_radius": search_radius,
"site_coordinate_system": site_coordinate_system,
"site_coordinate_options": site_coordinate_options,
"site_x_coordinate": site_x_coordinate,
"node_limit": node_limit,
"site_y_coordinate": site_y_coordinate,
"unique_site_id_key": unique_site_id_key,
}
fargs = (site_list, orography, land_sea_mask)
kwargs = {k: v for (k, v) in args.items() if v is not None}
# Deal with coordinate systems for sites other than PlateCarree.
if "site_coordinate_system" in kwargs.keys():
scrs = kwargs["site_coordinate_system"]
if scrs not in PROJECTION_LIST:
raise ValueError("invalid projection {}".format(scrs))
site_crs = getattr(ccrs, scrs)
scrs_opts = json.loads(kwargs.pop("site_coordinate_options", "{}"))
if "globe" in scrs_opts:
crs_globe = ccrs.Globe(**scrs_opts["globe"])
del scrs_opts["globe"]
else:
crs_globe = ccrs.Globe()
kwargs["site_coordinate_system"] = site_crs(globe=crs_globe, **scrs_opts)
# Call plugin to generate neighbour cubes
if all_methods:
methods = [
{**kwargs, "land_constraint": False, "minimum_dz": False},
{**kwargs, "land_constraint": True, "minimum_dz": False},
{**kwargs, "land_constraint": False, "minimum_dz": True},
{**kwargs, "land_constraint": True, "minimum_dz": True},
]
all_methods = iris.cube.CubeList([])
for method in methods:
all_methods.append(NeighbourSelection(**method)(*fargs))
squeezed_cubes = iris.cube.CubeList([])
for index, cube in enumerate(all_methods):
cube.coord("neighbour_selection_method").points = np.int32(index)
squeezed_cubes.append(iris.util.squeeze(cube))
result = MergeCubes()(squeezed_cubes)
else:
result = NeighbourSelection(**kwargs)(*fargs)
enforce_coordinate_ordering(
result, ["neighbour_selection_method", "grid_attributes", "spot_index"]
)
return result
| bsd-3-clause | 1ff782ff2e97ef73f12d124b8b290576 | 40.586498 | 81 | 0.654322 | 4.292683 | false | false | false | false |
metoppv/improver | doc/source/conf.py | 3 | 13226 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# IMPROVER documentation build configuration file, created by
# sphinx-quickstart on Fri May 19 13:27:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# pylint: skip-file
import os
import sys
SOURCE_DIR = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
)
sys.path.insert(0, SOURCE_DIR)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx_autodoc_typehints",
]
autodoc_default_flags = ["members", "private-members"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "IMPROVER"
copyright = "- British Crown copyright. The Met Office"
author = "Met Office"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ""
# The full version, including alpha/beta/rc tags.
release = ""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["modules.rst", "extended_documentation"]
autodoc_mock_imports = ["numba"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'IMPROVER'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = "../files/improver_logo_small.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["../_static"]
# Added to apply correction css from _static that ensures text in tables
# wraps. This prevents tables from becoming very wide which necessitates
# scroll bars.
html_css_files = ["theme_overrides.css"] # override wide tables in RTD theme
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "Improverdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "Improver.tex", "IMPROVER Documentation", "Met Office", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "improver", "IMPROVER Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"IMPROVER",
"IMPROVER Documentation",
author,
"IMPROVER",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"https://docs.python.org/3/": None,
"https://scitools-iris.readthedocs.io/en/latest/": None,
"https://scitools.org.uk/cartopy/docs/latest/": None,
"https://cf-units.readthedocs.io/en/stable/": None,
"https://numpy.org/doc/stable/": None,
"https://docs.scipy.org/doc/scipy-1.6.2/reference/": None,
"https://pandas.pydata.org/pandas-docs/dev/": None,
}
# Get napoleon to document constructor methods.
napoleon_include_init_with_doc = True
def run_apidoc(_):
"""Allow automatic running of sphinx-apidoc:
Adapted from: https://github.com/rtfd/readthedocs.org/issues/1139"""
from sphinx.ext.apidoc import main
output_dir = os.path.dirname(os.path.abspath(__file__))
exclude_dir = os.path.join(SOURCE_DIR, "improver_tests")
setup_code = os.path.join(SOURCE_DIR, "setup.py")
main(["-e", "-P", "-f", "-o", output_dir, SOURCE_DIR, exclude_dir, setup_code])
def setup(app):
"""setup sphinx"""
app.connect("builder-inited", run_apidoc)
| bsd-3-clause | bf1b9056d92e41dde3b90dfea8f76ba4 | 30.267139 | 83 | 0.696507 | 3.663712 | false | false | false | false |
metoppv/improver | improver_tests/acceptance/test_weighted_blending.py | 3 | 16754 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tests for the weighted-blending CLI
"""
import pytest
from . import acceptance as acc
pytestmark = [pytest.mark.acc, acc.skip_if_kgo_missing]
PRECIP = "lwe_precipitation_rate"
CLI = acc.cli_name_with_dashes(__file__)
run_cli = acc.run_cli(CLI)
ATTRIBUTES_PATH = acc.kgo_root() / "weighted_blending/attributes.json"
BLEND_WEIGHTS_PATH = acc.kgo_root() / "weighted_blending/blending_weights.json"
@pytest.mark.slow
def test_basic_nonlin(tmp_path):
"""Test basic non linear weights"""
kgo_dir = acc.kgo_root() / "weighted_blending/basic_nonlin"
kgo_path = kgo_dir / "kgo.nc"
input_dir = kgo_dir / "../basic_lin"
input_paths = sorted((input_dir.glob("multiple_probabilities_rain_*H.nc")))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--cycletime",
"20170601T0200Z",
"--weighting-method",
"nonlinear",
"--cval",
"0.85",
*input_paths,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.slow
def test_basic_lin(tmp_path):
"""Test basic linear weights"""
kgo_dir = acc.kgo_root() / "weighted_blending/basic_lin"
kgo_path = kgo_dir / "kgo.nc"
input_paths = sorted((kgo_dir.glob("multiple_probabilities_rain_*H.nc")))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--cycletime",
"20170601T0200Z",
"--y0val",
"20.0",
"--ynval",
"2.0",
*input_paths,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_bothoptions_fail(tmp_path):
"""Test linear and non linear options together fails"""
kgo_dir = acc.kgo_root() / "weighted_blending/basic_lin"
input_paths = sorted((kgo_dir.glob("multiple_probabilities_rain_*H.nc")))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"time",
"--weighting-method",
"linear nonlinear",
*input_paths,
"--output",
output_path,
]
with pytest.raises(ValueError):
run_cli(args)
def test_invalid_lin_nonlin(tmp_path):
"""Test linear and non linear options together fails"""
kgo_dir = acc.kgo_root() / "weighted_blending/basic_lin"
input_paths = sorted((kgo_dir.glob("multiple_probabilities_rain_*H.nc")))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"time",
"--ynval",
"1",
"--cval",
"0.5",
*input_paths,
"--output",
output_path,
]
with pytest.raises(RuntimeError):
run_cli(args)
def test_invalid_nonlin_lin(tmp_path):
"""Test linear and non linear options together fails"""
kgo_dir = acc.kgo_root() / "weighted_blending/basic_lin"
input_paths = sorted((kgo_dir.glob("multiple_probabilities_rain_*H.nc")))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"time",
"--weighting-method",
"nonlinear",
"--ynval",
"1",
"--y0val",
"0",
*input_paths,
"--output",
output_path,
]
with pytest.raises(RuntimeError):
run_cli(args)
@pytest.mark.xfail(run=False, reason="takes ~5 minutes to run")
def test_percentile(tmp_path):
"""Test percentile blending"""
kgo_dir = acc.kgo_root() / "weighted_blending/percentiles"
kgo_path = kgo_dir / "kgo.nc"
input_path = kgo_dir / "input.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--cycletime",
"20170601T1000Z",
"--weighting-method",
"nonlinear",
"--cval",
"1.0",
input_path,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_cycletime_with_specified_frt(tmp_path):
"""Test cycletime blending where a forecast reference time for the
returned cube is user specified."""
kgo_dir = acc.kgo_root() / "weighted_blending/cycletime"
kgo_path = kgo_dir / "kgo_specified_frt.nc"
input_paths = sorted((kgo_dir.glob("input_temperature*.nc")))
input_paths.pop(-1)
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--y0val",
"1.0",
"--ynval",
"4.0",
"--cycletime",
"20200218T0600Z",
*input_paths,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_cycletime_with_specified_frt_single_input(tmp_path):
"""Test cycletime blending where a forecast reference time for the
returned cube is user specified. In this case the input is a single cube
with a different forecast reference time. This is a slightly different
route through the code to the preceding test as no blending actually
occurs."""
kgo_dir = acc.kgo_root() / "weighted_blending/cycletime"
kgo_path = kgo_dir / "kgo_single_input.nc"
input_path = kgo_dir / "input_temperature_0.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--y0val",
"1.0",
"--ynval",
"4.0",
"--cycletime",
"20200218T0600Z",
input_path,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_model(tmp_path):
"""Test multi-model blending"""
kgo_dir = acc.kgo_root() / "weighted_blending/model"
kgo_path = kgo_dir / "kgo.nc"
ukv_path = kgo_dir / "ukv_input.nc"
enuk_path = kgo_dir / "enuk_input.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20171208T0400Z",
"--ynval",
"1",
"--y0val",
"1",
"--model-id-attr",
"mosg__model_configuration",
"--record-run-attr",
"mosg__model_run",
"--attributes-config",
ATTRIBUTES_PATH,
ukv_path,
enuk_path,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_fails_no_model_id(tmp_path):
"""Test multi-model blending fails if model_id_attr is not specified"""
kgo_dir = acc.kgo_root() / "weighted_blending/model"
ukv_path = kgo_dir / "ukv_input.nc"
enuk_path = kgo_dir / "enuk_input.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20171208T0400Z",
"--ynval",
"1",
"--y0val",
"1",
ukv_path,
enuk_path,
"--output",
output_path,
]
with pytest.raises(RuntimeError):
run_cli(args)
@pytest.mark.slow
def test_realization_collapse(tmp_path):
"""Test realization collapsing"""
kgo_dir = acc.kgo_root() / "weighted_blending/realizations"
kgo_path = kgo_dir / "kgo.nc"
input_path = kgo_dir / "input.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"realization",
"--ynval=1",
"--y0val=1",
input_path,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_weights_dict(tmp_path):
"""Test use of weights dictionary"""
kgo_dir = acc.kgo_root() / "weighted_blending/weights_from_dict"
kgo_path = kgo_dir / "kgo.nc"
ukv_path = kgo_dir / "../model/ukv_input.nc"
enuk_path = kgo_dir / "../model/enuk_input.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20171208T0400Z",
"--weighting-method",
"dict",
"--weighting-config",
BLEND_WEIGHTS_PATH,
"--weighting-coord",
"forecast_period",
"--model-id-attr",
"mosg__model_configuration",
"--record-run-attr",
"mosg__model_run",
"--attributes-config",
ATTRIBUTES_PATH,
ukv_path,
enuk_path,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.xfail(run=False, reason="takes ~5 minutes to run")
def test_percentile_weights_dict(tmp_path):
"""Test percentile blending with weights dictionary"""
kgo_dir = acc.kgo_root() / "weighted_blending/percentile_weights_from_dict"
kgo_path = kgo_dir / "kgo.nc"
ukv_path = kgo_dir / "ukv_input.nc"
enuk_path = kgo_dir / "enuk_input.nc"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20170601T1000Z",
"--weighting-method",
"dict",
"--weighting-config",
BLEND_WEIGHTS_PATH,
"--weighting-coord",
"forecast_period",
"--model-id-attr",
"mosg__model_configuration",
"--attributes-config",
ATTRIBUTES_PATH,
ukv_path,
enuk_path,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_accum_cycle_blend(tmp_path):
"""Test blending accumulation across cycle times"""
kgo_dir = acc.kgo_root() / "weighted_blending/accum_cycle_blend"
kgo_path = kgo_dir / "kgo.nc"
input_paths = sorted(kgo_dir.glob("ukv_prob_accum_PT?H.nc"))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--cycletime",
"20181020T2200Z",
"--y0val=1",
"--ynval=1",
*input_paths,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=0.0)
def test_non_mo_model(tmp_path):
"""Test blending non-Met Office models"""
kgo_dir = acc.kgo_root() / "weighted_blending/non_mo_model"
kgo_path = kgo_dir / "kgo.nc"
det_path = kgo_dir / "non_mo_det.nc"
ens_path = kgo_dir / "non_mo_ens.nc"
attr_path = kgo_dir / "../non_mo_attributes.json"
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20171208T0400Z",
"--y0val",
"1",
"--ynval",
"1",
det_path,
ens_path,
"--output",
output_path,
"--model-id-attr",
"non_mo_model_config",
"--attributes-config",
attr_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.slow
def test_nowcast_cycle_blending(tmp_path):
"""Test blending nowcast cycles"""
kgo_dir = acc.kgo_root() / "weighted_blending/spatial_weights"
kgo_path = kgo_dir / "kgo/cycle.nc"
input_files = [
kgo_dir / f"nowcast_data/20181129T1000Z-PT{hr:04}H00M-{PRECIP}.nc"
for hr in range(2, 5)
]
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--cycletime",
"20181129T0800Z",
"--y0val",
"1",
"--ynval",
"1",
"--spatial-weights-from-mask",
*input_files,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.slow
def test_spatial_model_blending(tmp_path):
"""Test spatial model blending"""
kgo_dir = acc.kgo_root() / "weighted_blending/spatial_weights"
kgo_path = kgo_dir / "kgo/model.nc"
input_files = [
(kgo_dir / f"{t}_data/20181129T1000Z-PT0002H00M-{PRECIP}.nc")
for t in ("nowcast", "ukvx")
]
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20181129T0800Z",
"--y0val",
"1",
"--ynval",
"1",
"--spatial-weights-from-mask",
"--model-id-attr",
"mosg__model_configuration",
"--record-run-attr",
"mosg__model_run",
"--attributes-config",
ATTRIBUTES_PATH,
*input_files,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.slow
def test_nowcast_cycle_no_fuzzy(tmp_path):
"""Test blending nowcast cycles"""
kgo_dir = acc.kgo_root() / "weighted_blending/spatial_weights"
kgo_path = kgo_dir / "kgo/cycle_no_fuzzy.nc"
input_files = [
kgo_dir / f"nowcast_data/20181129T1000Z-PT{hr:04}H00M-{PRECIP}.nc"
for hr in range(2, 5)
]
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_reference_time",
"--cycletime",
"20181129T0800Z",
"--y0val",
"1",
"--ynval",
"1",
"--spatial-weights-from-mask",
"--fuzzy-length",
"1",
*input_files,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.slow
def test_spatial_model_no_fuzzy(tmp_path):
"""Test spatial model blending"""
kgo_dir = acc.kgo_root() / "weighted_blending/spatial_weights"
kgo_path = kgo_dir / "kgo/model_no_fuzzy.nc"
input_files = [
(kgo_dir / f"{t}_data/20181129T1000Z-PT0002H00M-{PRECIP}.nc")
for t in ("nowcast", "ukvx")
]
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--cycletime",
"20181129T0800Z",
"--y0val",
"1",
"--ynval",
"1",
"--spatial-weights-from-mask",
"--fuzzy-length",
"1",
"--model-id-attr",
"mosg__model_configuration",
"--record-run-attr",
"mosg__model_run",
"--attributes-config",
ATTRIBUTES_PATH,
*input_files,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
@pytest.mark.slow
def test_three_model_blending(tmp_path):
"""Test blending three models"""
kgo_dir = acc.kgo_root() / "weighted_blending/three_models"
kgo_path = kgo_dir / "kgo.nc"
input_files = [
(kgo_dir / f"{t}/20190101T0400Z-PT{l:04}H00M-precip_rate.nc")
for t, l in (("enukxhrly", 4), ("nc", 1), ("ukvx", 2))
]
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model_configuration",
"--spatial-weights-from-mask",
"--weighting-method",
"dict",
"--weighting-coord",
"forecast_period",
"--cycletime",
"20190101T0300Z",
"--model-id-attr",
"mosg__model_configuration",
"--record-run-attr",
"mosg__model_run",
"--attributes-config",
ATTRIBUTES_PATH,
"--weighting-config",
BLEND_WEIGHTS_PATH,
*input_files,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
| bsd-3-clause | b07ec7838409f9a9e5fb489edbadd5a0 | 27.590444 | 79 | 0.568521 | 3.290259 | false | true | false | false |
bread-and-pepper/django-userena | userena/middleware.py | 12 | 1228 | from django.utils import translation
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable
from userena.utils import get_user_profile
class UserenaLocaleMiddleware(object):
"""
Set the language by looking at the language setting in the profile.
It doesn't override the cookie that is set by Django so a user can still
switch languages depending if the cookie is set.
"""
def process_request(self, request):
lang_cookie = request.session.get(settings.LANGUAGE_COOKIE_NAME)
if not lang_cookie:
if request.user.is_authenticated():
try:
profile = get_user_profile(user=request.user)
except (ObjectDoesNotExist, SiteProfileNotAvailable):
profile = False
if profile:
try:
lang = getattr(profile, userena_settings.USERENA_LANGUAGE_FIELD)
translation.activate(lang)
request.LANGUAGE_CODE = translation.get_language()
except AttributeError: pass
| bsd-3-clause | 8c8e6981d9e11c9386b35c8a3789cb9b | 37.375 | 88 | 0.645765 | 5.032787 | false | false | false | false |
bread-and-pepper/django-userena | userena/backends.py | 5 | 1771 | import django.core.validators
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
class UserenaAuthenticationBackend(ModelBackend):
"""
Custom backend because the user must be able to supply a ``email`` or
``username`` to the login form.
"""
def authenticate(self, identification, password=None, check_password=True):
"""
Authenticates a user through the combination email/username with
password.
:param identification:
A string containing the username or e-mail of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by userena at
activation when a user opens a page with a secret hash.
:return: The signed in :class:`User`.
"""
User = get_user_model()
try:
django.core.validators.validate_email(identification)
try: user = User.objects.get(email__iexact=identification)
except User.DoesNotExist: return None
except django.core.validators.ValidationError:
try: user = User.objects.get(username__iexact=identification)
except User.DoesNotExist: return None
if check_password:
if user.check_password(password):
return user
return None
else: return user
def get_user(self, user_id):
User = get_user_model()
try: return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| bsd-3-clause | a4afd68a3ee31a270221cd27b293f659 | 34.42 | 79 | 0.637493 | 4.760753 | false | false | false | false |
bread-and-pepper/django-userena | userena/utils.py | 4 | 6483 | from django.conf import settings
from django.utils.encoding import smart_bytes
from django.utils.functional import allow_lazy
from django.utils.http import urlencode
from django.utils.six import text_type
from django.utils.text import Truncator
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable, get_model
from hashlib import sha1, md5
import random, datetime
import warnings
def truncate_words(s, num, end_text='...'):
truncate = end_text and ' %s' % end_text or ''
return Truncator(s).words(num, truncate=truncate)
truncate_words = allow_lazy(truncate_words, text_type)
def get_gravatar(email, size=80, default='identicon'):
""" Get's a Gravatar for a email address.
:param size:
The size in pixels of one side of the Gravatar's square image.
Optional, if not supplied will default to ``80``.
:param default:
Defines what should be displayed if no image is found for this user.
Optional argument which defaults to ``identicon``. The argument can be
a URI to an image or one of the following options:
``404``
Do not load any image if none is associated with the email
hash, instead return an HTTP 404 (File Not Found) response.
``mm``
Mystery-man, a simple, cartoon-style silhouetted outline of a
person (does not vary by email hash).
``identicon``
A geometric pattern based on an email hash.
``monsterid``
A generated 'monster' with different colors, faces, etc.
``wavatar``
Generated faces with differing features and backgrounds
:return: The URI pointing to the Gravatar.
"""
if userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE:
base_url = 'https://secure.gravatar.com/avatar/'
else: base_url = '//www.gravatar.com/avatar/'
gravatar_url = '%(base_url)s%(gravatar_id)s?' % \
{'base_url': base_url,
'gravatar_id': md5(email.lower().encode('utf-8')).hexdigest()}
gravatar_url += urlencode({
's': str(size),
'd': default
})
return gravatar_url
def signin_redirect(redirect=None, user=None):
"""
Redirect user after successful sign in.
First looks for a ``requested_redirect``. If not supplied will fall-back to
the user specific account page. If all fails, will fall-back to the standard
Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining the URI to
go next.
:param redirect:
A value normally supplied by ``next`` form field. Gets preference
before the default view which requires the user.
:param user:
A ``User`` object specifying the user who has just signed in.
:return: String containing the URI to redirect to.
"""
if redirect: return redirect
elif user is not None:
return userena_settings.USERENA_SIGNIN_REDIRECT_URL % \
{'username': user.username}
else: return settings.LOGIN_REDIRECT_URL
def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a random
string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not isinstance(string, (str, text_type)):
string = str(string)
if not salt:
salt = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
salted_bytes = (smart_bytes(salt) + smart_bytes(string))
hash_ = sha1(salted_bytes).hexdigest()
return salt, hash_
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
try:
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1))
except LookupError:
profile_mod = None
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod
def get_user_profile(user):
profile_model = get_profile_model()
try:
profile = user.get_profile()
except AttributeError:
related_name = profile_model._meta.get_field('user')\
.related_query_name()
profile = getattr(user, related_name, None)
except profile_model.DoesNotExist:
profile = None
if profile:
return profile
return profile_model.objects.create(user=user)
def get_protocol():
"""
Returns a string with the current protocol.
This can be either 'http' or 'https' depending on ``USERENA_USE_HTTPS``
setting.
"""
protocol = 'http'
if getattr(settings, 'USERENA_USE_HTTPS', userena_settings.DEFAULT_USERENA_USE_HTTPS):
protocol = 'https'
return protocol
def get_datetime_now():
"""
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
"""
try:
from django.utils import timezone
return timezone.now() # pragma: no cover
except ImportError: # pragma: no cover
return datetime.datetime.now()
# Django 1.5 compatibility utilities, providing support for custom User models.
# Since get_user_model() causes a circular import if called when app models are
# being loaded, the user_model_label should be used when possible, with calls
# to get_user_model deferred to execution time
user_model_label = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def get_user_model():
warnings.warn("Use Django's django.contrib.auth.get_user_model directly. "
"This function will be removed in future versions of "
"django-userena.", DeprecationWarning)
from django.contrib.auth import get_user_model
return get_user_model()
| bsd-3-clause | bb0acf704cac49c7237d28e55268504b | 32.076531 | 90 | 0.658954 | 4.103165 | false | false | false | false |
bread-and-pepper/django-userena | userena/tests/tests_utils.py | 6 | 3258 | import sys, re, six
from django.test import TestCase
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.six.moves.urllib_parse import urlparse, parse_qs
from userena.utils import (get_gravatar, signin_redirect, get_profile_model,
get_protocol, generate_sha1)
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable
class UtilsTests(TestCase):
""" Test the extra utils methods """
fixtures = ['users']
def test_generate_sha(self):
s1 = six.u('\xc5se')
s2 = six.u('\xd8ystein')
s3 = six.u('\xc6gir')
h1 = generate_sha1(s1)
h2 = generate_sha1(s2)
h3 = generate_sha1(s3)
# Check valid SHA1 activation key
self.assertTrue(re.match('^[a-f0-9]{40}$', h1[1]))
self.assertTrue(re.match('^[a-f0-9]{40}$', h2[1]))
self.assertTrue(re.match('^[a-f0-9]{40}$', h3[1]))
def test_get_gravatar(self):
template = 's=%(size)s&d=%(type)s'
# Check the defaults.
parsed = urlparse(get_gravatar('alice@example.com'))
self.assertEqual(
parse_qs(parsed.query),
parse_qs(template % {'size': 80, 'type': 'identicon'})
)
# Check different size
parsed = urlparse(get_gravatar('alice@example.com', size=200))
self.assertEqual(
parse_qs(parsed.query),
parse_qs(template % {'size': 200, 'type': 'identicon'})
)
# Check different default
parsed = urlparse(get_gravatar('alice@example.com', default='404'))
self.assertEqual(
parse_qs(parsed.query),
parse_qs(template % {'size': 80, 'type': '404'})
)
def test_signin_redirect(self):
"""
Test redirect function which should redirect the user after a
succesfull signin.
"""
# Test with a requested redirect
self.assertEqual(signin_redirect(redirect='/accounts/'), '/accounts/')
# Test with only the user specified
user = get_user_model().objects.get(pk=1)
self.assertEqual(signin_redirect(user=user),
'/accounts/%s/' % user.username)
# The ultimate fallback, probably never used
self.assertEqual(signin_redirect(), settings.LOGIN_REDIRECT_URL)
def test_get_profile_model(self):
"""
Test if the correct profile model is returned when
``get_profile_model()`` is called.
"""
# A non existent model should also raise ``SiteProfileNotAvailable``
# error.
with self.settings(AUTH_PROFILE_MODULE='userena.FakeProfile'):
self.assertRaises(SiteProfileNotAvailable, get_profile_model)
# An error should be raised when there is no ``AUTH_PROFILE_MODULE``
# supplied.
with self.settings(AUTH_PROFILE_MODULE=None):
self.assertRaises(SiteProfileNotAvailable, get_profile_model)
def test_get_protocol(self):
""" Test if the correct protocol is returned """
self.assertEqual(get_protocol(), 'http')
with self.settings(USERENA_USE_HTTPS=True):
self.assertEqual(get_protocol(), 'https')
| bsd-3-clause | 5fae2b07fd3fee16ab6e4a7587a2b8a7 | 34.413043 | 78 | 0.609576 | 3.878571 | false | true | false | false |
pvlib/pvlib-python | docs/examples/iv-modeling/plot_singlediode.py | 3 | 4250 | """
Calculating a module's IV curves
================================
Examples of modeling IV curves using a single-diode circuit equivalent model.
"""
# %%
# Calculating a module IV curve for certain operating conditions is a two-step
# process. Multiple methods exist for both parts of the process. Here we use
# the De Soto model [1]_ to calculate the electrical parameters for an IV
# curve at a certain irradiance and temperature using the module's
# base characteristics at reference conditions. Those parameters are then used
# to calculate the module's IV curve by solving the single-diode equation using
# the Lambert W method.
#
# The single-diode equation is a circuit-equivalent model of a PV
# cell and has five electrical parameters that depend on the operating
# conditions. For more details on the single-diode equation and the five
# parameters, see the `PVPMC single diode page
# <https://pvpmc.sandia.gov/modeling-steps/2-dc-module-iv/diode-equivalent-circuit-models/>`_.
#
# References
# ----------
# .. [1] W. De Soto et al., "Improvement and validation of a model for
# photovoltaic array performance", Solar Energy, vol 80, pp. 78-88, 2006.
#
# Calculating IV Curves
# -----------------------
# This example uses :py:meth:`pvlib.pvsystem.calcparams_desoto` to calculate
# the 5 electrical parameters needed to solve the single-diode equation.
# :py:meth:`pvlib.pvsystem.singlediode` is then used to generate the IV curves.
from pvlib import pvsystem
import pandas as pd
import matplotlib.pyplot as plt
# Example module parameters for the Canadian Solar CS5P-220M:
parameters = {
'Name': 'Canadian Solar CS5P-220M',
'BIPV': 'N',
'Date': '10/5/2009',
'T_NOCT': 42.4,
'A_c': 1.7,
'N_s': 96,
'I_sc_ref': 5.1,
'V_oc_ref': 59.4,
'I_mp_ref': 4.69,
'V_mp_ref': 46.9,
'alpha_sc': 0.004539,
'beta_oc': -0.22216,
'a_ref': 2.6373,
'I_L_ref': 5.114,
'I_o_ref': 8.196e-10,
'R_s': 1.065,
'R_sh_ref': 381.68,
'Adjust': 8.7,
'gamma_r': -0.476,
'Version': 'MM106',
'PTC': 200.1,
'Technology': 'Mono-c-Si',
}
cases = [
(1000, 55),
(800, 55),
(600, 55),
(400, 25),
(400, 40),
(400, 55)
]
conditions = pd.DataFrame(cases, columns=['Geff', 'Tcell'])
# adjust the reference parameters according to the operating
# conditions using the De Soto model:
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
conditions['Geff'],
conditions['Tcell'],
alpha_sc=parameters['alpha_sc'],
a_ref=parameters['a_ref'],
I_L_ref=parameters['I_L_ref'],
I_o_ref=parameters['I_o_ref'],
R_sh_ref=parameters['R_sh_ref'],
R_s=parameters['R_s'],
EgRef=1.121,
dEgdT=-0.0002677
)
# plug the parameters into the SDE and solve for IV curves:
curve_info = pvsystem.singlediode(
photocurrent=IL,
saturation_current=I0,
resistance_series=Rs,
resistance_shunt=Rsh,
nNsVth=nNsVth,
ivcurve_pnts=100,
method='lambertw'
)
# plot the calculated curves:
plt.figure()
for i, case in conditions.iterrows():
label = (
"$G_{eff}$ " + f"{case['Geff']} $W/m^2$\n"
"$T_{cell}$ " + f"{case['Tcell']} $\\degree C$"
)
plt.plot(curve_info['v'][i], curve_info['i'][i], label=label)
v_mp = curve_info['v_mp'][i]
i_mp = curve_info['i_mp'][i]
# mark the MPP
plt.plot([v_mp], [i_mp], ls='', marker='o', c='k')
plt.legend(loc=(1.0, 0))
plt.xlabel('Module voltage [V]')
plt.ylabel('Module current [A]')
plt.title(parameters['Name'])
plt.show()
plt.gcf().set_tight_layout(True)
# draw trend arrows
def draw_arrow(ax, label, x0, y0, rotation, size, direction):
style = direction + 'arrow'
bbox_props = dict(boxstyle=style, fc=(0.8, 0.9, 0.9), ec="b", lw=1)
t = ax.text(x0, y0, label, ha="left", va="bottom", rotation=rotation,
size=size, bbox=bbox_props, zorder=-1)
bb = t.get_bbox_patch()
bb.set_boxstyle(style, pad=0.6)
ax = plt.gca()
draw_arrow(ax, 'Irradiance', 20, 2.5, 90, 15, 'r')
draw_arrow(ax, 'Temperature', 35, 1, 0, 15, 'l')
print(pd.DataFrame({
'i_sc': curve_info['i_sc'],
'v_oc': curve_info['v_oc'],
'i_mp': curve_info['i_mp'],
'v_mp': curve_info['v_mp'],
'p_mp': curve_info['p_mp'],
}))
| bsd-3-clause | ab65f6de83f934c853a5e86e1f34aad1 | 28.72028 | 94 | 0.626824 | 2.825798 | false | false | false | false |
pytorch/vision | references/detection/group_by_aspect_ratio.py | 1 | 7143 | import bisect
import copy
import math
from collections import defaultdict
from itertools import chain, repeat
import numpy as np
import torch
import torch.utils.data
import torchvision
from PIL import Image
from torch.utils.data.sampler import BatchSampler, Sampler
from torch.utils.model_zoo import tqdm
def _repeat_to_at_least(iterable, n):
repeat_times = math.ceil(n / len(iterable))
repeated = chain.from_iterable(repeat(iterable, repeat_times))
return list(repeated)
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that the batch only contain elements from the same group.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Args:
sampler (Sampler): Base sampler.
group_ids (list[int]): If the sampler produces indices in range [0, N),
`group_ids` must be a list of `N` ints which contains the group id of each sample.
The group ids must be a continuous set of integers starting from
0, i.e. they must be in the range [0, num_groups).
batch_size (int): Size of mini-batch.
"""
def __init__(self, sampler, group_ids, batch_size):
if not isinstance(sampler, Sampler):
raise ValueError(f"sampler should be an instance of torch.utils.data.Sampler, but got sampler={sampler}")
self.sampler = sampler
self.group_ids = group_ids
self.batch_size = batch_size
def __iter__(self):
buffer_per_group = defaultdict(list)
samples_per_group = defaultdict(list)
num_batches = 0
for idx in self.sampler:
group_id = self.group_ids[idx]
buffer_per_group[group_id].append(idx)
samples_per_group[group_id].append(idx)
if len(buffer_per_group[group_id]) == self.batch_size:
yield buffer_per_group[group_id]
num_batches += 1
del buffer_per_group[group_id]
assert len(buffer_per_group[group_id]) < self.batch_size
# now we have run out of elements that satisfy
# the group criteria, let's return the remaining
# elements so that the size of the sampler is
# deterministic
expected_num_batches = len(self)
num_remaining = expected_num_batches - num_batches
if num_remaining > 0:
# for the remaining batches, take first the buffers with largest number
# of elements
for group_id, _ in sorted(buffer_per_group.items(), key=lambda x: len(x[1]), reverse=True):
remaining = self.batch_size - len(buffer_per_group[group_id])
samples_from_group_id = _repeat_to_at_least(samples_per_group[group_id], remaining)
buffer_per_group[group_id].extend(samples_from_group_id[:remaining])
assert len(buffer_per_group[group_id]) == self.batch_size
yield buffer_per_group[group_id]
num_remaining -= 1
if num_remaining == 0:
break
assert num_remaining == 0
def __len__(self):
return len(self.sampler) // self.batch_size
def _compute_aspect_ratios_slow(dataset, indices=None):
print(
"Your dataset doesn't support the fast path for "
"computing the aspect ratios, so will iterate over "
"the full dataset and load every image instead. "
"This might take some time..."
)
if indices is None:
indices = range(len(dataset))
class SubsetSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
sampler = SubsetSampler(indices)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
sampler=sampler,
num_workers=14, # you might want to increase it for faster processing
collate_fn=lambda x: x[0],
)
aspect_ratios = []
with tqdm(total=len(dataset)) as pbar:
for _i, (img, _) in enumerate(data_loader):
pbar.update(1)
height, width = img.shape[-2:]
aspect_ratio = float(width) / float(height)
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def _compute_aspect_ratios_custom_dataset(dataset, indices=None):
if indices is None:
indices = range(len(dataset))
aspect_ratios = []
for i in indices:
height, width = dataset.get_height_and_width(i)
aspect_ratio = float(width) / float(height)
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def _compute_aspect_ratios_coco_dataset(dataset, indices=None):
if indices is None:
indices = range(len(dataset))
aspect_ratios = []
for i in indices:
img_info = dataset.coco.imgs[dataset.ids[i]]
aspect_ratio = float(img_info["width"]) / float(img_info["height"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def _compute_aspect_ratios_voc_dataset(dataset, indices=None):
if indices is None:
indices = range(len(dataset))
aspect_ratios = []
for i in indices:
# this doesn't load the data into memory, because PIL loads it lazily
width, height = Image.open(dataset.images[i]).size
aspect_ratio = float(width) / float(height)
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def _compute_aspect_ratios_subset_dataset(dataset, indices=None):
if indices is None:
indices = range(len(dataset))
ds_indices = [dataset.indices[i] for i in indices]
return compute_aspect_ratios(dataset.dataset, ds_indices)
def compute_aspect_ratios(dataset, indices=None):
if hasattr(dataset, "get_height_and_width"):
return _compute_aspect_ratios_custom_dataset(dataset, indices)
if isinstance(dataset, torchvision.datasets.CocoDetection):
return _compute_aspect_ratios_coco_dataset(dataset, indices)
if isinstance(dataset, torchvision.datasets.VOCDetection):
return _compute_aspect_ratios_voc_dataset(dataset, indices)
if isinstance(dataset, torch.utils.data.Subset):
return _compute_aspect_ratios_subset_dataset(dataset, indices)
# slow path
return _compute_aspect_ratios_slow(dataset, indices)
def _quantize(x, bins):
bins = copy.deepcopy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def create_aspect_ratio_groups(dataset, k=0):
aspect_ratios = compute_aspect_ratios(dataset)
bins = (2 ** np.linspace(-1, 1, 2 * k + 1)).tolist() if k > 0 else [1.0]
groups = _quantize(aspect_ratios, bins)
# count number of elements per group
counts = np.unique(groups, return_counts=True)[1]
fbins = [0] + bins + [np.inf]
print(f"Using {fbins} as bins for aspect ratio quantization")
print(f"Count of instances per bin: {counts}")
return groups
| bsd-3-clause | bb391a395047258be81d86b435f9183d | 35.443878 | 117 | 0.642867 | 3.8096 | false | false | false | false |
pytorch/vision | .circleci/unittest/linux/scripts/run-clang-format.py | 2 | 10667 | #!/usr/bin/env python
"""
MIT License
Copyright (c) 2017 Guillaume Papin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse
import difflib
import fnmatch
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu"
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [x for x in dnames if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)]
fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original, reformatted, fromfile=f"{file}\t(original)", tofile=f"{file}\t(reformatted)", n=3
)
)
class DiffError(Exception):
def __init__(self, message, errs=None):
super().__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super().__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e)
def run_clang_format_diff(args, file):
try:
with open(file, encoding="utf-8") as f:
original = f.readlines()
except OSError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8"
)
except OSError as exc:
raise DiffError(f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}")
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return "\x1b[1m\x1b[31m" + s + "\x1b[0m"
def colorize(diff_lines):
def bold(s):
return "\x1b[1m" + s + "\x1b[0m"
def cyan(s):
return "\x1b[36m" + s + "\x1b[0m"
def green(s):
return "\x1b[32m" + s + "\x1b[0m"
def red(s):
return "\x1b[31m" + s + "\x1b[0m"
for line in diff_lines:
if line[:4] in ["--- ", "+++ "]:
yield bold(line)
elif line.startswith("@@ "):
yield cyan(line)
elif line.startswith("+"):
yield green(line)
elif line.startswith("-"):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = "error:"
if use_colors:
error_text = bold_red(error_text)
print(f"{prog}: {error_text} {message}", file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--clang-format-executable",
metavar="EXECUTABLE",
help="path to the clang-format executable",
default="clang-format",
)
parser.add_argument(
"--extensions",
help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})",
default=DEFAULT_EXTENSIONS,
)
parser.add_argument("-r", "--recursive", action="store_true", help="run recursively over directories")
parser.add_argument("files", metavar="file", nargs="+")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument(
"-j",
metavar="N",
type=int,
default=0,
help="run N clang-format jobs in parallel (default number of cpus + 1)",
)
parser.add_argument(
"--color", default="auto", choices=["auto", "always", "never"], help="show colored diff (default: auto)"
)
parser.add_argument(
"-e",
"--exclude",
metavar="PATTERN",
action="append",
default=[],
help="exclude paths matching the given glob-like pattern(s) from recursive search",
)
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == "always":
colored_stdout = True
colored_stderr = True
elif args.color == "auto":
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, "--version"]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}",
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files, recursive=args.recursive, exclude=args.exclude, extensions=args.extensions.split(",")
)
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 8875c6a942426de8bc8c6079b568130b | 31.226586 | 113 | 0.622106 | 3.949278 | false | false | false | false |
pytorch/vision | references/depth/stereo/train.py | 1 | 33426 | import argparse
import os
import warnings
from pathlib import Path
from typing import List, Union
import numpy as np
import torch
import torch.distributed as dist
import torchvision.models.optical_flow
import torchvision.prototype.models.depth.stereo
import utils
import vizualization
from parsing import make_dataset, make_eval_transform, make_train_transform, VALID_DATASETS
from torch import nn
from torchvision.transforms.functional import get_dimensions, InterpolationMode, resize
from utils.metrics import AVAILABLE_METRICS
from utils.norm import freeze_batch_norm
def make_stereo_flow(flow: Union[torch.Tensor, List[torch.Tensor]], model_out_channels: int) -> torch.Tensor:
"""Helper function to make stereo flow from a given model output"""
if isinstance(flow, list):
return [make_stereo_flow(flow_i, model_out_channels) for flow_i in flow]
B, C, H, W = flow.shape
# we need to add zero flow if the model outputs 2 channels
if C == 1 and model_out_channels == 2:
zero_flow = torch.zeros_like(flow)
# by convention the flow is X-Y axis, so we need the Y flow last
flow = torch.cat([flow, zero_flow], dim=1)
return flow
def make_lr_schedule(args: argparse.Namespace, optimizer: torch.optim.Optimizer) -> np.ndarray:
"""Helper function to return a learning rate scheduler for CRE-stereo"""
if args.decay_after_steps < args.warmup_steps:
raise ValueError(f"decay_after_steps: {args.function} must be greater than warmup_steps: {args.warmup_steps}")
warmup_steps = args.warmup_steps if args.warmup_steps else 0
flat_lr_steps = args.decay_after_steps - warmup_steps if args.decay_after_steps else 0
decay_lr_steps = args.total_iterations - flat_lr_steps
max_lr = args.lr
min_lr = args.min_lr
schedulers = []
milestones = []
if warmup_steps > 0:
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_factor, total_iters=warmup_steps
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_factor, total_iters=warmup_steps
)
else:
raise ValueError(f"Unknown lr warmup method {args.lr_warmup_method}")
schedulers.append(warmup_lr_scheduler)
milestones.append(warmup_steps)
if flat_lr_steps > 0:
flat_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer, factor=max_lr, total_iters=flat_lr_steps)
schedulers.append(flat_lr_scheduler)
milestones.append(flat_lr_steps + warmup_steps)
if decay_lr_steps > 0:
if args.lr_decay_method == "cosine":
decay_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=decay_lr_steps, eta_min=min_lr
)
elif args.lr_decay_method == "linear":
decay_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=max_lr, end_factor=min_lr, total_iters=decay_lr_steps
)
elif args.lr_decay_method == "exponential":
decay_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma=args.lr_decay_gamma, last_epoch=-1
)
else:
raise ValueError(f"Unknown lr decay method {args.lr_decay_method}")
schedulers.append(decay_lr_scheduler)
scheduler = torch.optim.lr_scheduler.SequentialLR(optimizer, schedulers, milestones=milestones)
return scheduler
def shuffle_dataset(dataset):
"""Shuffle the dataset"""
perm = torch.randperm(len(dataset))
return torch.utils.data.Subset(dataset, perm)
def resize_dataset_to_n_steps(
dataset: torch.utils.data.Dataset, dataset_steps: int, samples_per_step: int, args: argparse.Namespace
) -> torch.utils.data.Dataset:
original_size = len(dataset)
if args.steps_is_epochs:
samples_per_step = original_size
target_size = dataset_steps * samples_per_step
dataset_copies = []
n_expands, remainder = divmod(target_size, original_size)
for idx in range(n_expands):
dataset_copies.append(dataset)
if remainder > 0:
dataset_copies.append(torch.utils.data.Subset(dataset, list(range(remainder))))
if args.dataset_shuffle:
dataset_copies = [shuffle_dataset(dataset_copy) for dataset_copy in dataset_copies]
dataset = torch.utils.data.ConcatDataset(dataset_copies)
return dataset
def get_train_dataset(dataset_root: str, args: argparse.Namespace) -> torch.utils.data.Dataset:
datasets = []
for dataset_name in args.train_datasets:
transform = make_train_transform(args)
dataset = make_dataset(dataset_name, dataset_root, transform)
datasets.append(dataset)
if len(datasets) == 0:
raise ValueError("No datasets specified for training")
samples_per_step = args.world_size * args.batch_size
for idx, (dataset, steps_per_dataset) in enumerate(zip(datasets, args.dataset_steps)):
datasets[idx] = resize_dataset_to_n_steps(dataset, steps_per_dataset, samples_per_step, args)
dataset = torch.utils.data.ConcatDataset(datasets)
if args.dataset_order_shuffle:
dataset = shuffle_dataset(dataset)
print(f"Training dataset: {len(dataset)} samples")
return dataset
@torch.inference_mode()
def _evaluate(
model,
args,
val_loader,
*,
padder_mode,
print_freq=10,
writter=None,
step=None,
iterations=None,
batch_size=None,
header=None,
):
"""Helper function to compute various metrics (epe, etc.) for a model on a given dataset."""
model.eval()
header = header or "Test:"
device = torch.device(args.device)
metric_logger = utils.MetricLogger(delimiter=" ")
iterations = iterations or args.recurrent_updates
logger = utils.MetricLogger()
for meter_name in args.metrics:
logger.add_meter(meter_name, fmt="{global_avg:.4f}")
if "fl-all" not in args.metrics:
logger.add_meter("fl-all", fmt="{global_avg:.4f}")
num_processed_samples = 0
with torch.cuda.amp.autocast(enabled=args.mixed_precision, dtype=torch.float16):
for blob in metric_logger.log_every(val_loader, print_freq, header):
image_left, image_right, disp_gt, valid_disp_mask = (x.to(device) for x in blob)
padder = utils.InputPadder(image_left.shape, mode=padder_mode)
image_left, image_right = padder.pad(image_left, image_right)
disp_predictions = model(image_left, image_right, flow_init=None, num_iters=iterations)
disp_pred = disp_predictions[-1][:, :1, :, :]
disp_pred = padder.unpad(disp_pred)
metrics, _ = utils.compute_metrics(disp_pred, disp_gt, valid_disp_mask, metrics=logger.meters.keys())
num_processed_samples += image_left.shape[0]
for name in metrics:
logger.meters[name].update(metrics[name], n=1)
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
print("Num_processed_samples: ", num_processed_samples)
if (
hasattr(val_loader.dataset, "__len__")
and len(val_loader.dataset) != num_processed_samples
and torch.distributed.get_rank() == 0
):
warnings.warn(
f"Number of processed samples {num_processed_samples} is different"
f"from the dataset size {len(val_loader.dataset)}. This may happen if"
"the dataset is not divisible by the batch size. Try lowering the batch size or GPU number for more accurate results."
)
if writter is not None and args.rank == 0:
for meter_name, meter_value in logger.meters.items():
scalar_name = f"{meter_name} {header}"
writter.add_scalar(scalar_name, meter_value.avg, step)
logger.synchronize_between_processes()
print(header, logger)
def make_eval_loader(dataset_name: str, args: argparse.Namespace) -> torch.utils.data.DataLoader:
if args.weights:
weights = torchvision.models.get_weight(args.weights)
trans = weights.transforms()
def preprocessing(image_left, image_right, disp, valid_disp_mask):
C_o, H_o, W_o = get_dimensions(image_left)
image_left, image_right = trans(image_left, image_right)
C_t, H_t, W_t = get_dimensions(image_left)
scale_factor = W_t / W_o
if disp is not None and not isinstance(disp, torch.Tensor):
disp = torch.from_numpy(disp)
if W_t != W_o:
disp = resize(disp, (H_t, W_t), mode=InterpolationMode.BILINEAR) * scale_factor
if valid_disp_mask is not None and not isinstance(valid_disp_mask, torch.Tensor):
valid_disp_mask = torch.from_numpy(valid_disp_mask)
if W_t != W_o:
valid_disp_mask = resize(valid_disp_mask, (H_t, W_t), mode=InterpolationMode.NEAREST)
return image_left, image_right, disp, valid_disp_mask
else:
preprocessing = make_eval_transform(args)
val_dataset = make_dataset(dataset_name, args.dataset_root, transforms=preprocessing)
if args.distributed:
sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=False)
else:
sampler = torch.utils.data.SequentialSampler(val_dataset)
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=sampler,
batch_size=args.batch_size,
pin_memory=True,
num_workers=args.workers,
)
return val_loader
def evaluate(model, loaders, args, writter=None, step=None):
for loader_name, loader in loaders.items():
_evaluate(
model,
args,
loader,
iterations=args.recurrent_updates,
padder_mode=args.padder_type,
header=f"{loader_name} evaluation",
batch_size=args.batch_size,
writter=writter,
step=step,
)
def run(model, optimizer, scheduler, train_loader, val_loaders, logger, writer, scaler, args):
device = torch.device(args.device)
# wrap the loader in a logger
loader = iter(logger.log_every(train_loader))
# output channels
model_out_channels = model.module.output_channels if args.distributed else model.output_channels
torch.set_num_threads(args.threads)
sequence_criterion = utils.SequenceLoss(
gamma=args.gamma,
max_flow=args.max_disparity,
exclude_large_flows=args.flow_loss_exclude_large,
).to(device)
if args.consistency_weight:
consistency_criterion = utils.FlowSequenceConsistencyLoss(
args.gamma,
resize_factor=0.25,
rescale_factor=0.25,
rescale_mode="bilinear",
).to(device)
else:
consistency_criterion = None
if args.psnr_weight:
psnr_criterion = utils.PSNRLoss().to(device)
else:
psnr_criterion = None
if args.smoothness_weight:
smoothness_criterion = utils.SmoothnessLoss().to(device)
else:
smoothness_criterion = None
if args.photometric_weight:
photometric_criterion = utils.FlowPhotoMetricLoss(
ssim_weight=args.photometric_ssim_weight,
max_displacement_ratio=args.photometric_max_displacement_ratio,
ssim_use_padding=False,
).to(device)
else:
photometric_criterion = None
for step in range(args.start_step + 1, args.total_iterations + 1):
data_blob = next(loader)
optimizer.zero_grad()
# unpack the data blob
image_left, image_right, disp_mask, valid_disp_mask = (x.to(device) for x in data_blob)
with torch.cuda.amp.autocast(enabled=args.mixed_precision, dtype=torch.float16):
disp_predictions = model(image_left, image_right, flow_init=None, num_iters=args.recurrent_updates)
# different models have different outputs, make sure we get the right ones for this task
disp_predictions = make_stereo_flow(disp_predictions, model_out_channels)
# should the architecture or training loop require it, we have to adjust the disparity mask
# target to possibly look like an optical flow mask
disp_mask = make_stereo_flow(disp_mask, model_out_channels)
# sequence loss on top of the model outputs
loss = sequence_criterion(disp_predictions, disp_mask, valid_disp_mask) * args.flow_loss_weight
if args.consistency_weight > 0:
loss_consistency = consistency_criterion(disp_predictions)
loss += loss_consistency * args.consistency_weight
if args.psnr_weight > 0:
loss_psnr = 0.0
for pred in disp_predictions:
# predictions might have 2 channels
loss_psnr += psnr_criterion(
pred * valid_disp_mask.unsqueeze(1),
disp_mask * valid_disp_mask.unsqueeze(1),
).mean() # mean the psnr loss over the batch
loss += loss_psnr / len(disp_predictions) * args.psnr_weight
if args.photometric_weight > 0:
loss_photometric = 0.0
for pred in disp_predictions:
# predictions might have 1 channel, therefore we need to inpute 0s for the second channel
if model_out_channels == 1:
pred = torch.cat([pred, torch.zeros_like(pred)], dim=1)
loss_photometric += photometric_criterion(
image_left, image_right, pred, valid_disp_mask
) # photometric loss already comes out meaned over the batch
loss += loss_photometric / len(disp_predictions) * args.photometric_weight
if args.smoothness_weight > 0:
loss_smoothness = 0.0
for pred in disp_predictions:
# predictions might have 2 channels
loss_smoothness += smoothness_criterion(
image_left, pred[:, :1, :, :]
).mean() # mean the smoothness loss over the batch
loss += loss_smoothness / len(disp_predictions) * args.smoothness_weight
with torch.no_grad():
metrics, _ = utils.compute_metrics(
disp_predictions[-1][:, :1, :, :], # predictions might have 2 channels
disp_mask[:, :1, :, :], # so does the ground truth
valid_disp_mask,
args.metrics,
)
metrics.pop("fl-all", None)
logger.update(loss=loss, **metrics)
if scaler is not None:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
if args.clip_grad_norm:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip_grad_norm)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if args.clip_grad_norm:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip_grad_norm)
optimizer.step()
scheduler.step()
if not dist.is_initialized() or dist.get_rank() == 0:
if writer is not None and step % args.tensorboard_log_frequency == 0:
# log the loss and metrics to tensorboard
writer.add_scalar("loss", loss, step)
for name, value in logger.meters.items():
writer.add_scalar(name, value.avg, step)
# log the images to tensorboard
pred_grid = vizualization.make_training_sample_grid(
image_left, image_right, disp_mask, valid_disp_mask, disp_predictions
)
writer.add_image("predictions", pred_grid, step, dataformats="HWC")
# second thing we want to see is how relevant the iterative refinement is
pred_sequence_grid = vizualization.make_disparity_sequence_grid(disp_predictions, disp_mask)
writer.add_image("sequence", pred_sequence_grid, step, dataformats="HWC")
if step % args.save_frequency == 0:
if not args.distributed or args.rank == 0:
model_without_ddp = (
model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
)
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"step": step,
"args": args,
}
os.makedirs(args.checkpoint_dir, exist_ok=True)
torch.save(checkpoint, Path(args.checkpoint_dir) / f"{args.name}_{step}.pth")
torch.save(checkpoint, Path(args.checkpoint_dir) / f"{args.name}.pth")
if step % args.valid_frequency == 0:
evaluate(model, val_loaders, args, writer, step)
model.train()
if args.freeze_batch_norm:
if isinstance(model, nn.parallel.DistributedDataParallel):
freeze_batch_norm(model.module)
else:
freeze_batch_norm(model)
# one final save at the end
if not args.distributed or args.rank == 0:
model_without_ddp = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"step": step,
"args": args,
}
os.makedirs(args.checkpoint_dir, exist_ok=True)
torch.save(checkpoint, Path(args.checkpoint_dir) / f"{args.name}_{step}.pth")
torch.save(checkpoint, Path(args.checkpoint_dir) / f"{args.name}.pth")
def main(args):
args.total_iterations = sum(args.dataset_steps)
# intialize DDP setting
utils.setup_ddp(args)
print(args)
args.test_only = args.train_datasets is None
# set the appropiate devices
if args.distributed and args.device == "cpu":
raise ValueError("The device must be cuda if we want to run in distributed mode using torchrun")
device = torch.device(args.device)
# select model architecture
model = torchvision.prototype.models.depth.stereo.__dict__[args.model](weights=args.weights)
# convert to DDP if need be
if args.distributed:
model = model.to(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
else:
model.to(device)
model_without_ddp = model
os.makedirs(args.checkpoint_dir, exist_ok=True)
val_loaders = {name: make_eval_loader(name, args) for name in args.test_datasets}
# EVAL ONLY configurations
if args.test_only:
evaluate(model, val_loaders, args)
return
# Sanity check for the parameter count
print(f"Parameter Count: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
# Compose the training dataset
train_dataset = get_train_dataset(args.dataset_root, args)
# initialize the optimizer
if args.optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=0.9)
else:
raise ValueError(f"Unknown optimizer {args.optimizer}. Please choose between adam and sgd")
# initialize the learning rate schedule
scheduler = make_lr_schedule(args, optimizer)
# load them from checkpoint if need
args.start_step = 0
if args.resume_path is not None:
checkpoint = torch.load(args.resume_path, map_location="cpu")
if "model" in checkpoint:
# this means the user requested to resume from a training checkpoint
model_without_ddp.load_state_dict(checkpoint["model"])
# this means the user wants to continue training from where it was left off
if args.resume_schedule:
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
args.start_step = checkpoint["step"] + 1
# modify starting point of the dat
sample_start_step = args.start_step * args.batch_size * args.world_size
train_dataset = train_dataset[sample_start_step:]
else:
# this means the user wants to finetune on top of a model state dict
# and that no other changes are required
model_without_ddp.load_state_dict(checkpoint)
torch.backends.cudnn.benchmark = True
# enable training mode
model.train()
if args.freeze_batch_norm:
freeze_batch_norm(model_without_ddp)
# put dataloader on top of the dataset
# make sure to disable shuffling since the dataset is already shuffled
# in order to guarantee quasi randomness whilst retaining a deterministic
# dataset consumption order
if args.distributed:
# the train dataset is preshuffled in order to respect the iteration order
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, drop_last=True)
else:
# the train dataset is already shuffled so we can use a simple SequentialSampler
sampler = torch.utils.data.SequentialSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
pin_memory=True,
num_workers=args.workers,
)
# intialize the logger
if args.tensorboard_summaries:
from torch.utils.tensorboard import SummaryWriter
tensorboard_path = Path(args.checkpoint_dir) / "tensorboard"
os.makedirs(tensorboard_path, exist_ok=True)
tensorboard_run = tensorboard_path / f"{args.name}"
writer = SummaryWriter(tensorboard_run)
else:
writer = None
logger = utils.MetricLogger(delimiter=" ")
scaler = torch.cuda.amp.GradScaler() if args.mixed_precision else None
# run the training loop
# this will perform optimization, respectively logging and saving checkpoints
# when need be
run(
model=model,
optimizer=optimizer,
scheduler=scheduler,
train_loader=train_loader,
val_loaders=val_loaders,
logger=logger,
writer=writer,
scaler=scaler,
args=args,
)
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Stereo Matching Training", add_help=add_help)
# checkpointing
parser.add_argument("--name", default="crestereo", help="name of the experiment")
parser.add_argument("--resume", type=str, default=None, help="from which checkpoint to resume")
parser.add_argument("--checkpoint-dir", type=str, default="checkpoints", help="path to the checkpoint directory")
# dataset
parser.add_argument("--dataset-root", type=str, default="", help="path to the dataset root directory")
parser.add_argument(
"--train-datasets",
type=str,
nargs="+",
default=["crestereo"],
help="dataset(s) to train on",
choices=list(VALID_DATASETS.keys()),
)
parser.add_argument(
"--dataset-steps", type=int, nargs="+", default=[300_000], help="number of steps for each dataset"
)
parser.add_argument(
"--steps-is-epochs", action="store_true", help="if set, dataset-steps are interpreted as epochs"
)
parser.add_argument(
"--test-datasets",
type=str,
nargs="+",
default=["middlebury2014-train"],
help="dataset(s) to test on",
choices=["middlebury2014-train"],
)
parser.add_argument("--dataset-shuffle", type=bool, help="shuffle the dataset", default=True)
parser.add_argument("--dataset-order-shuffle", type=bool, help="shuffle the dataset order", default=True)
parser.add_argument("--batch-size", type=int, default=2, help="batch size per GPU")
parser.add_argument("--workers", type=int, default=4, help="number of workers per GPU")
parser.add_argument(
"--threads",
type=int,
default=16,
help="number of CPU threads per GPU. This can be changed around to speed-up transforms if needed. This can lead to worker thread contention so use with care.",
)
# model architecture
parser.add_argument(
"--model",
type=str,
default="crestereo_base",
help="model architecture",
choices=["crestereo_base", "raft_stereo"],
)
parser.add_argument("--recurrent-updates", type=int, default=10, help="number of recurrent updates")
parser.add_argument("--freeze-batch-norm", action="store_true", help="freeze batch norm parameters")
# loss parameters
parser.add_argument("--gamma", type=float, default=0.8, help="gamma parameter for the flow sequence loss")
parser.add_argument("--flow-loss-weight", type=float, default=1.0, help="weight for the flow loss")
parser.add_argument(
"--flow-loss-exclude-large",
action="store_true",
help="exclude large flow values from the loss. A large value is defined as a value greater than the ground truth flow norm",
default=False,
)
parser.add_argument("--consistency-weight", type=float, default=0.0, help="consistency loss weight")
parser.add_argument(
"--consistency-resize-factor",
type=float,
default=0.25,
help="consistency loss resize factor to account for the fact that the flow is computed on a downsampled image",
)
parser.add_argument("--psnr-weight", type=float, default=0.0, help="psnr loss weight")
parser.add_argument("--smoothness-weight", type=float, default=0.0, help="smoothness loss weight")
parser.add_argument("--photometric-weight", type=float, default=0.0, help="photometric loss weight")
parser.add_argument(
"--photometric-max-displacement-ratio",
type=float,
default=0.15,
help="Only pixels with a displacement smaller than this ratio of the image width will be considered for the photometric loss",
)
parser.add_argument("--photometric-ssim-weight", type=float, default=0.85, help="photometric ssim loss weight")
# transforms parameters
parser.add_argument("--gpu-transforms", action="store_true", help="use GPU transforms")
parser.add_argument(
"--eval-size", type=int, nargs="+", default=[384, 512], help="size of the images for evaluation"
)
parser.add_argument("--resize-size", type=int, nargs=2, default=None, help="resize size")
parser.add_argument("--crop-size", type=int, nargs=2, default=[384, 512], help="crop size")
parser.add_argument("--scale-range", type=float, nargs=2, default=[0.6, 1.0], help="random scale range")
parser.add_argument("--rescale-prob", type=float, default=1.0, help="probability of resizing the image")
parser.add_argument(
"--scaling-type", type=str, default="linear", help="scaling type", choices=["exponential", "linear"]
)
parser.add_argument("--flip-prob", type=float, default=0.5, help="probability of flipping the image")
parser.add_argument(
"--norm-mean", type=float, nargs="+", default=[0.5, 0.5, 0.5], help="mean for image normalization"
)
parser.add_argument(
"--norm-std", type=float, nargs="+", default=[0.5, 0.5, 0.5], help="std for image normalization"
)
parser.add_argument(
"--use-grayscale", action="store_true", help="use grayscale images instead of RGB", default=False
)
parser.add_argument("--max-disparity", type=float, default=None, help="maximum disparity")
parser.add_argument(
"--interpolation-strategy",
type=str,
default="bilinear",
help="interpolation strategy",
choices=["bilinear", "bicubic", "mixed"],
)
parser.add_argument("--spatial-shift-prob", type=float, default=1.0, help="probability of shifting the image")
parser.add_argument(
"--spatial-shift-max-angle", type=float, default=0.1, help="maximum angle for the spatial shift"
)
parser.add_argument(
"--spatial-shift-max-displacement", type=float, default=2.0, help="maximum displacement for the spatial shift"
)
parser.add_argument("--gamma-range", type=float, nargs="+", default=[0.8, 1.2], help="range for gamma correction")
parser.add_argument(
"--brightness-range", type=float, nargs="+", default=[0.8, 1.2], help="range for brightness correction"
)
parser.add_argument(
"--contrast-range", type=float, nargs="+", default=[0.8, 1.2], help="range for contrast correction"
)
parser.add_argument(
"--saturation-range", type=float, nargs="+", default=0.0, help="range for saturation correction"
)
parser.add_argument("--hue-range", type=float, nargs="+", default=0.0, help="range for hue correction")
parser.add_argument(
"--asymmetric-jitter-prob",
type=float,
default=1.0,
help="probability of using asymmetric jitter instead of symmetric jitter",
)
parser.add_argument("--occlusion-prob", type=float, default=0.5, help="probability of occluding the rightimage")
parser.add_argument(
"--occlusion-px-range", type=int, nargs="+", default=[50, 100], help="range for the number of occluded pixels"
)
parser.add_argument("--erase-prob", type=float, default=0.0, help="probability of erasing in both images")
parser.add_argument(
"--erase-px-range", type=int, nargs="+", default=[50, 100], help="range for the number of erased pixels"
)
parser.add_argument(
"--erase-num-repeats", type=int, default=1, help="number of times to repeat the erase operation"
)
# optimizer parameters
parser.add_argument("--optimizer", type=str, default="adam", help="optimizer", choices=["adam", "sgd"])
parser.add_argument("--lr", type=float, default=4e-4, help="learning rate")
parser.add_argument("--weight-decay", type=float, default=0.0, help="weight decay")
parser.add_argument("--clip-grad-norm", type=float, default=0.0, help="clip grad norm")
# lr_scheduler parameters
parser.add_argument("--min-lr", type=float, default=2e-5, help="minimum learning rate")
parser.add_argument("--warmup-steps", type=int, default=6_000, help="number of warmup steps")
parser.add_argument(
"--decay-after-steps", type=int, default=180_000, help="number of steps after which to start decay the lr"
)
parser.add_argument(
"--lr-warmup-method", type=str, default="linear", help="warmup method", choices=["linear", "cosine"]
)
parser.add_argument("--lr-warmup-factor", type=float, default=0.02, help="warmup factor for the learning rate")
parser.add_argument(
"--lr-decay-method",
type=str,
default="linear",
help="decay method",
choices=["linear", "cosine", "exponential"],
)
parser.add_argument("--lr-decay-gamma", type=float, default=0.8, help="decay factor for the learning rate")
# deterministic behaviour
parser.add_argument("--seed", type=int, default=42, help="seed for random number generators")
# mixed precision training
parser.add_argument("--mixed-precision", action="store_true", help="use mixed precision training")
# logging
parser.add_argument("--tensorboard-summaries", action="store_true", help="log to tensorboard")
parser.add_argument("--tensorboard-log-frequency", type=int, default=100, help="log frequency")
parser.add_argument("--save-frequency", type=int, default=1_000, help="save frequency")
parser.add_argument("--valid-frequency", type=int, default=1_000, help="validation frequency")
parser.add_argument(
"--metrics",
type=str,
nargs="+",
default=["mae", "rmse", "1px", "3px", "5px", "relepe"],
help="metrics to log",
choices=AVAILABLE_METRICS,
)
# distributed parameters
parser.add_argument("--world-size", type=int, default=8, help="number of distributed processes")
parser.add_argument("--dist-url", type=str, default="env://", help="url used to set up distributed training")
parser.add_argument("--device", type=str, default="cuda", help="device to use for training")
# weights API
parser.add_argument("--weights", type=str, default=None, help="weights API url")
parser.add_argument(
"--resume-path", type=str, default=None, help="a path from which to resume or start fine-tuning"
)
parser.add_argument("--resume-schedule", action="store_true", help="resume optimizer state")
# padder parameters
parser.add_argument("--padder-type", type=str, default="kitti", help="padder type", choices=["kitti", "sintel"])
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
| bsd-3-clause | 14dba50a8d2ac5bf038bde41c497c8c3 | 41.418782 | 167 | 0.641088 | 3.845163 | false | false | false | false |
pytorch/vision | torchvision/prototype/datasets/_api.py | 1 | 1763 | import pathlib
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
from torchvision.prototype.datasets import home
from torchvision.prototype.datasets.utils import Dataset
from torchvision.prototype.utils._internal import add_suggestion
T = TypeVar("T")
D = TypeVar("D", bound=Type[Dataset])
BUILTIN_INFOS: Dict[str, Dict[str, Any]] = {}
def register_info(name: str) -> Callable[[Callable[[], Dict[str, Any]]], Callable[[], Dict[str, Any]]]:
def wrapper(fn: Callable[[], Dict[str, Any]]) -> Callable[[], Dict[str, Any]]:
BUILTIN_INFOS[name] = fn()
return fn
return wrapper
BUILTIN_DATASETS = {}
def register_dataset(name: str) -> Callable[[D], D]:
def wrapper(dataset_cls: D) -> D:
BUILTIN_DATASETS[name] = dataset_cls
return dataset_cls
return wrapper
def list_datasets() -> List[str]:
return sorted(BUILTIN_DATASETS.keys())
def find(dct: Dict[str, T], name: str) -> T:
name = name.lower()
try:
return dct[name]
except KeyError as error:
raise ValueError(
add_suggestion(
f"Unknown dataset '{name}'.",
word=name,
possibilities=dct.keys(),
alternative_hint=lambda _: (
"You can use torchvision.datasets.list_datasets() to get a list of all available datasets."
),
)
) from error
def info(name: str) -> Dict[str, Any]:
return find(BUILTIN_INFOS, name)
def load(name: str, *, root: Optional[Union[str, pathlib.Path]] = None, **config: Any) -> Dataset:
dataset_cls = find(BUILTIN_DATASETS, name)
if root is None:
root = pathlib.Path(home()) / name
return dataset_cls(root, **config)
| bsd-3-clause | 25d234f805333e58c656072cc9b644ba | 26.123077 | 111 | 0.613159 | 3.642562 | false | false | false | false |
pytorch/vision | torchvision/datasets/sun397.py | 1 | 2748 | from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (string): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
| bsd-3-clause | 70d6f56f46dd121e916b9bea0d864b72 | 35.157895 | 115 | 0.633916 | 3.774725 | false | false | false | false |
pytorch/vision | test/test_datasets_samplers.py | 1 | 3785 | import pytest
import torch
from common_utils import assert_equal, get_list_of_videos
from torchvision import io
from torchvision.datasets.samplers import DistributedSampler, RandomClipSampler, UniformClipSampler
from torchvision.datasets.video_utils import VideoClips
@pytest.mark.skipif(not io.video._av_available(), reason="this test requires av")
class TestDatasetsSamplers:
def test_random_clip_sampler(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25])
video_clips = VideoClips(video_list, 5, 5)
sampler = RandomClipSampler(video_clips, 3)
assert len(sampler) == 3 * 3
indices = torch.tensor(list(iter(sampler)))
videos = torch.div(indices, 5, rounding_mode="floor")
v_idxs, count = torch.unique(videos, return_counts=True)
assert_equal(v_idxs, torch.tensor([0, 1, 2]))
assert_equal(count, torch.tensor([3, 3, 3]))
def test_random_clip_sampler_unequal(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25])
video_clips = VideoClips(video_list, 5, 5)
sampler = RandomClipSampler(video_clips, 3)
assert len(sampler) == 2 + 3 + 3
indices = list(iter(sampler))
assert 0 in indices
assert 1 in indices
# remove elements of the first video, to simplify testing
indices.remove(0)
indices.remove(1)
indices = torch.tensor(indices) - 2
videos = torch.div(indices, 5, rounding_mode="floor")
v_idxs, count = torch.unique(videos, return_counts=True)
assert_equal(v_idxs, torch.tensor([0, 1]))
assert_equal(count, torch.tensor([3, 3]))
def test_uniform_clip_sampler(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25])
video_clips = VideoClips(video_list, 5, 5)
sampler = UniformClipSampler(video_clips, 3)
assert len(sampler) == 3 * 3
indices = torch.tensor(list(iter(sampler)))
videos = torch.div(indices, 5, rounding_mode="floor")
v_idxs, count = torch.unique(videos, return_counts=True)
assert_equal(v_idxs, torch.tensor([0, 1, 2]))
assert_equal(count, torch.tensor([3, 3, 3]))
assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14]))
def test_uniform_clip_sampler_insufficient_clips(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[10, 25, 25])
video_clips = VideoClips(video_list, 5, 5)
sampler = UniformClipSampler(video_clips, 3)
assert len(sampler) == 3 * 3
indices = torch.tensor(list(iter(sampler)))
assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11]))
def test_distributed_sampler_and_uniform_clip_sampler(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[25, 25, 25])
video_clips = VideoClips(video_list, 5, 5)
clip_sampler = UniformClipSampler(video_clips, 3)
distributed_sampler_rank0 = DistributedSampler(
clip_sampler,
num_replicas=2,
rank=0,
group_size=3,
)
indices = torch.tensor(list(iter(distributed_sampler_rank0)))
assert len(distributed_sampler_rank0) == 6
assert_equal(indices, torch.tensor([0, 2, 4, 10, 12, 14]))
distributed_sampler_rank1 = DistributedSampler(
clip_sampler,
num_replicas=2,
rank=1,
group_size=3,
)
indices = torch.tensor(list(iter(distributed_sampler_rank1)))
assert len(distributed_sampler_rank1) == 6
assert_equal(indices, torch.tensor([5, 7, 9, 0, 2, 4]))
if __name__ == "__main__":
pytest.main([__file__])
| bsd-3-clause | dc38f668d6bea90a7c3c036b8d9b3292 | 43.011628 | 99 | 0.628005 | 3.440909 | false | true | false | false |
pytorch/vision | test/test_architecture_ops.py | 1 | 1275 | import unittest
import pytest
import torch
from torchvision.models.maxvit import SwapAxes, WindowDepartition, WindowPartition
class MaxvitTester(unittest.TestCase):
def test_maxvit_window_partition(self):
input_shape = (1, 3, 224, 224)
partition_size = 7
n_partitions = input_shape[3] // partition_size
x = torch.randn(input_shape)
partition = WindowPartition()
departition = WindowDepartition()
x_hat = partition(x, partition_size)
x_hat = departition(x_hat, partition_size, n_partitions, n_partitions)
torch.testing.assert_close(x, x_hat)
def test_maxvit_grid_partition(self):
input_shape = (1, 3, 224, 224)
partition_size = 7
n_partitions = input_shape[3] // partition_size
x = torch.randn(input_shape)
pre_swap = SwapAxes(-2, -3)
post_swap = SwapAxes(-2, -3)
partition = WindowPartition()
departition = WindowDepartition()
x_hat = partition(x, n_partitions)
x_hat = pre_swap(x_hat)
x_hat = post_swap(x_hat)
x_hat = departition(x_hat, n_partitions, partition_size, partition_size)
torch.testing.assert_close(x, x_hat)
if __name__ == "__main__":
pytest.main([__file__])
| bsd-3-clause | 46e7b8c17f4bf1e5618b41e1cf83afbe | 26.717391 | 82 | 0.621961 | 3.455285 | false | true | false | false |
pytorch/vision | torchvision/datasets/omniglot.py | 1 | 4091 | from os.path import join
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: str,
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L")
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
| bsd-3-clause | 2aa22d94a90f05f550fdeab351ced9b2 | 39.107843 | 106 | 0.635297 | 3.93744 | false | false | false | false |
pytorch/vision | torchvision/ops/_register_onnx_ops.py | 1 | 4157 | import sys
import warnings
import torch
_onnx_opset_version_11 = 11
_onnx_opset_version_16 = 16
base_onnx_opset_version = _onnx_opset_version_11
def _register_custom_op():
from torch.onnx.symbolic_helper import parse_args
from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze
from torch.onnx.symbolic_opset9 import _cast_Long
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = unsqueeze(g, boxes, 0)
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
nms_out = g.op("NonMaxSuppression", boxes, scores, max_output_per_class, iou_threshold)
return squeeze(g, select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1)
def _process_batch_indices_for_roi_align(g, rois):
return _cast_Long(
g, squeeze(g, select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1), False
)
def _process_rois_for_roi_align(g, rois):
return select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
def _process_sampling_ratio_for_roi_align(g, sampling_ratio: int):
if sampling_ratio < 0:
warnings.warn(
"ONNX export for RoIAlign with a non-zero sampling_ratio is not supported. "
"The model will be exported with a sampling_ratio of 0."
)
sampling_ratio = 0
return sampling_ratio
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset11(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
if aligned:
warnings.warn(
"ROIAlign with aligned=True is only supported in opset >= 16. "
"Please export with opset 16 or higher, or use aligned=False."
)
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset16(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
coordinate_transformation_mode = "half_pixel" if aligned else "output_half_pixel"
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
coordinate_transformation_mode_s=coordinate_transformation_mode,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset11, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset16, _onnx_opset_version_16)
register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _onnx_opset_version_11)
| bsd-3-clause | 54daf5d4984231ceae02d9c01b776ad4 | 43.698925 | 118 | 0.622564 | 3.24259 | false | false | false | false |
pytorch/vision | torchvision/models/detection/rpn.py | 1 | 15735 | from typing import Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torchvision.ops import boxes as box_ops, Conv2dNormActivation
from . import _utils as det_utils
# Import AnchorGenerator to keep compatibility.
from .anchor_utils import AnchorGenerator # noqa: 401
from .image_list import ImageList
class RPNHead(nn.Module):
"""
Adds a simple RPN Head with classification and regression heads
Args:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
conv_depth (int, optional): number of convolutions
"""
_version = 2
def __init__(self, in_channels: int, num_anchors: int, conv_depth=1) -> None:
super().__init__()
convs = []
for _ in range(conv_depth):
convs.append(Conv2dNormActivation(in_channels, in_channels, kernel_size=3, norm_layer=None))
self.conv = nn.Sequential(*convs)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1)
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
torch.nn.init.normal_(layer.weight, std=0.01) # type: ignore[arg-type]
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0) # type: ignore[arg-type]
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
for type in ["weight", "bias"]:
old_key = f"{prefix}conv.{type}"
new_key = f"{prefix}conv.0.0.{type}"
if old_key in state_dict:
state_dict[new_key] = state_dict.pop(old_key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, x: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:
logits = []
bbox_reg = []
for feature in x:
t = self.conv(feature)
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return logits, bbox_reg
def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int, W: int) -> Tensor:
layer = layer.view(N, -1, C, H, W)
layer = layer.permute(0, 3, 4, 1, 2)
layer = layer.reshape(N, -1, C)
return layer
def concat_box_prediction_layers(box_cls: List[Tensor], box_regression: List[Tensor]) -> Tuple[Tensor, Tensor]:
box_cls_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for box_cls_per_level, box_regression_per_level in zip(box_cls, box_regression):
N, AxC, H, W = box_cls_per_level.shape
Ax4 = box_regression_per_level.shape[1]
A = Ax4 // 4
C = AxC // A
box_cls_per_level = permute_and_flatten(box_cls_per_level, N, A, C, H, W)
box_cls_flattened.append(box_cls_per_level)
box_regression_per_level = permute_and_flatten(box_regression_per_level, N, A, 4, H, W)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = torch.cat(box_cls_flattened, dim=1).flatten(0, -2)
box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)
return box_cls, box_regression
class RegionProposalNetwork(torch.nn.Module):
"""
Implements Region Proposal Network (RPN).
Args:
anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
maps.
head (nn.Module): module that computes the objectness and regression deltas
fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
considered as positive during training of the RPN.
bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
considered as negative during training of the RPN.
batch_size_per_image (int): number of anchors that are sampled during training of the RPN
for computing the loss
positive_fraction (float): proportion of positive anchors in a mini-batch during training
of the RPN
pre_nms_top_n (Dict[str, int]): number of proposals to keep before applying NMS. It should
contain two fields: training and testing, to allow for different values depending
on training or evaluation
post_nms_top_n (Dict[str, int]): number of proposals to keep after applying NMS. It should
contain two fields: training and testing, to allow for different values depending
on training or evaluation
nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
"""
__annotations__ = {
"box_coder": det_utils.BoxCoder,
"proposal_matcher": det_utils.Matcher,
"fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler,
}
def __init__(
self,
anchor_generator: AnchorGenerator,
head: nn.Module,
# Faster-RCNN Training
fg_iou_thresh: float,
bg_iou_thresh: float,
batch_size_per_image: int,
positive_fraction: float,
# Faster-RCNN Inference
pre_nms_top_n: Dict[str, int],
post_nms_top_n: Dict[str, int],
nms_thresh: float,
score_thresh: float = 0.0,
) -> None:
super().__init__()
self.anchor_generator = anchor_generator
self.head = head
self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
# used during training
self.box_similarity = box_ops.box_iou
self.proposal_matcher = det_utils.Matcher(
fg_iou_thresh,
bg_iou_thresh,
allow_low_quality_matches=True,
)
self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction)
# used during testing
self._pre_nms_top_n = pre_nms_top_n
self._post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.score_thresh = score_thresh
self.min_size = 1e-3
def pre_nms_top_n(self) -> int:
if self.training:
return self._pre_nms_top_n["training"]
return self._pre_nms_top_n["testing"]
def post_nms_top_n(self) -> int:
if self.training:
return self._post_nms_top_n["training"]
return self._post_nms_top_n["testing"]
def assign_targets_to_anchors(
self, anchors: List[Tensor], targets: List[Dict[str, Tensor]]
) -> Tuple[List[Tensor], List[Tensor]]:
labels = []
matched_gt_boxes = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
gt_boxes = targets_per_image["boxes"]
if gt_boxes.numel() == 0:
# Background image (negative example)
device = anchors_per_image.device
matched_gt_boxes_per_image = torch.zeros(anchors_per_image.shape, dtype=torch.float32, device=device)
labels_per_image = torch.zeros((anchors_per_image.shape[0],), dtype=torch.float32, device=device)
else:
match_quality_matrix = self.box_similarity(gt_boxes, anchors_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)]
labels_per_image = matched_idxs >= 0
labels_per_image = labels_per_image.to(dtype=torch.float32)
# Background (negative examples)
bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_indices] = 0.0
# discard indices that are between thresholds
inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS
labels_per_image[inds_to_discard] = -1.0
labels.append(labels_per_image)
matched_gt_boxes.append(matched_gt_boxes_per_image)
return labels, matched_gt_boxes
def _get_top_n_idx(self, objectness: Tensor, num_anchors_per_level: List[int]) -> Tensor:
r = []
offset = 0
for ob in objectness.split(num_anchors_per_level, 1):
num_anchors = ob.shape[1]
pre_nms_top_n = det_utils._topk_min(ob, self.pre_nms_top_n(), 1)
_, top_n_idx = ob.topk(pre_nms_top_n, dim=1)
r.append(top_n_idx + offset)
offset += num_anchors
return torch.cat(r, dim=1)
def filter_proposals(
self,
proposals: Tensor,
objectness: Tensor,
image_shapes: List[Tuple[int, int]],
num_anchors_per_level: List[int],
) -> Tuple[List[Tensor], List[Tensor]]:
num_images = proposals.shape[0]
device = proposals.device
# do not backprop through objectness
objectness = objectness.detach()
objectness = objectness.reshape(num_images, -1)
levels = [
torch.full((n,), idx, dtype=torch.int64, device=device) for idx, n in enumerate(num_anchors_per_level)
]
levels = torch.cat(levels, 0)
levels = levels.reshape(1, -1).expand_as(objectness)
# select top_n boxes independently per level before applying nms
top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)
image_range = torch.arange(num_images, device=device)
batch_idx = image_range[:, None]
objectness = objectness[batch_idx, top_n_idx]
levels = levels[batch_idx, top_n_idx]
proposals = proposals[batch_idx, top_n_idx]
objectness_prob = torch.sigmoid(objectness)
final_boxes = []
final_scores = []
for boxes, scores, lvl, img_shape in zip(proposals, objectness_prob, levels, image_shapes):
boxes = box_ops.clip_boxes_to_image(boxes, img_shape)
# remove small boxes
keep = box_ops.remove_small_boxes(boxes, self.min_size)
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# remove low scoring boxes
# use >= for Backwards compatibility
keep = torch.where(scores >= self.score_thresh)[0]
boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
# non-maximum suppression, independently done per level
keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh)
# keep only topk scoring predictions
keep = keep[: self.post_nms_top_n()]
boxes, scores = boxes[keep], scores[keep]
final_boxes.append(boxes)
final_scores.append(scores)
return final_boxes, final_scores
def compute_loss(
self, objectness: Tensor, pred_bbox_deltas: Tensor, labels: List[Tensor], regression_targets: List[Tensor]
) -> Tuple[Tensor, Tensor]:
"""
Args:
objectness (Tensor)
pred_bbox_deltas (Tensor)
labels (List[Tensor])
regression_targets (List[Tensor])
Returns:
objectness_loss (Tensor)
box_loss (Tensor)
"""
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]
sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness = objectness.flatten()
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = F.smooth_l1_loss(
pred_bbox_deltas[sampled_pos_inds],
regression_targets[sampled_pos_inds],
beta=1 / 9,
reduction="sum",
) / (sampled_inds.numel())
objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds])
return objectness_loss, box_loss
def forward(
self,
images: ImageList,
features: Dict[str, Tensor],
targets: Optional[List[Dict[str, Tensor]]] = None,
) -> Tuple[List[Tensor], Dict[str, Tensor]]:
"""
Args:
images (ImageList): images for which we want to compute the predictions
features (Dict[str, Tensor]): features computed from the images that are
used for computing the predictions. Each tensor in the list
correspond to different feature levels
targets (List[Dict[str, Tensor]]): ground-truth boxes present in the image (optional).
If provided, each element in the dict should contain a field `boxes`,
with the locations of the ground-truth boxes.
Returns:
boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per
image.
losses (Dict[str, Tensor]): the losses for the model during training. During
testing, it is an empty dict.
"""
# RPN uses all feature maps that are available
features = list(features.values())
objectness, pred_bbox_deltas = self.head(features)
anchors = self.anchor_generator(images, features)
num_images = len(anchors)
num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness]
num_anchors_per_level = [s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors]
objectness, pred_bbox_deltas = concat_box_prediction_layers(objectness, pred_bbox_deltas)
# apply pred_bbox_deltas to anchors to obtain the decoded proposals
# note that we detach the deltas because Faster R-CNN do not backprop through
# the proposals
proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
proposals = proposals.view(num_images, -1, 4)
boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)
losses = {}
if self.training:
if targets is None:
raise ValueError("targets should not be None")
labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)
regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)
loss_objectness, loss_rpn_box_reg = self.compute_loss(
objectness, pred_bbox_deltas, labels, regression_targets
)
losses = {
"loss_objectness": loss_objectness,
"loss_rpn_box_reg": loss_rpn_box_reg,
}
return boxes, losses
| bsd-3-clause | 8852d38f148c15f92933f506caff50ac | 39.658915 | 117 | 0.607372 | 3.762554 | false | false | false | false |
pytorch/vision | torchvision/models/detection/anchor_utils.py | 1 | 11858 | import math
from typing import List, Optional
import torch
from torch import nn, Tensor
from .image_list import ImageList
class AnchorGenerator(nn.Module):
"""
Module that generates anchors for a set of feature maps and
image sizes.
The module support computing anchors at multiple sizes and aspect ratios
per feature map. This module assumes aspect ratio = height / width for
each anchor.
sizes and aspect_ratios should have the same number of elements, and it should
correspond to the number of feature maps.
sizes[i] and aspect_ratios[i] can have an arbitrary number of elements,
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
per spatial location for feature map i.
Args:
sizes (Tuple[Tuple[int]]):
aspect_ratios (Tuple[Tuple[float]]):
"""
__annotations__ = {
"cell_anchors": List[torch.Tensor],
}
def __init__(
self,
sizes=((128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),),
):
super().__init__()
if not isinstance(sizes[0], (list, tuple)):
# TODO change this
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = [
self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(sizes, aspect_ratios)
]
# TODO: https://github.com/pytorch/pytorch/issues/26792
# For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values.
# (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios)
# This method assumes aspect ratio = height / width for an anchor.
def generate_anchors(
self,
scales: List[int],
aspect_ratios: List[float],
dtype: torch.dtype = torch.float32,
device: torch.device = torch.device("cpu"),
) -> Tensor:
scales = torch.as_tensor(scales, dtype=dtype, device=device)
aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)
h_ratios = torch.sqrt(aspect_ratios)
w_ratios = 1 / h_ratios
ws = (w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h_ratios[:, None] * scales[None, :]).view(-1)
base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2
return base_anchors.round()
def set_cell_anchors(self, dtype: torch.dtype, device: torch.device):
self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors]
def num_anchors_per_location(self) -> List[int]:
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
# For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2),
# output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
anchors = []
cell_anchors = self.cell_anchors
torch._assert(cell_anchors is not None, "cell_anchors should not be None")
torch._assert(
len(grid_sizes) == len(strides) == len(cell_anchors),
"Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified.",
)
for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
grid_height, grid_width = size
stride_height, stride_width = stride
device = base_anchors.device
# For output anchor, compute [x_center, y_center, x_center, y_center]
shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width
shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij")
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
# For every (base anchor, output anchor) pair,
# offset each zero-centered base anchor by the center of the output anchor.
anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
return anchors
def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
image_size = image_list.tensors.shape[-2:]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
strides = [
[
torch.empty((), dtype=torch.int64, device=device).fill_(image_size[0] // g[0]),
torch.empty((), dtype=torch.int64, device=device).fill_(image_size[1] // g[1]),
]
for g in grid_sizes
]
self.set_cell_anchors(dtype, device)
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
anchors: List[List[torch.Tensor]] = []
for _ in range(len(image_list.image_sizes)):
anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps]
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
return anchors
class DefaultBoxGenerator(nn.Module):
"""
This module generates the default boxes of SSD for a set of feature maps and image sizes.
Args:
aspect_ratios (List[List[int]]): A list with all the aspect ratios used in each feature map.
min_ratio (float): The minimum scale :math:`\text{s}_{\text{min}}` of the default boxes used in the estimation
of the scales of each feature map. It is used only if the ``scales`` parameter is not provided.
max_ratio (float): The maximum scale :math:`\text{s}_{\text{max}}` of the default boxes used in the estimation
of the scales of each feature map. It is used only if the ``scales`` parameter is not provided.
scales (List[float]], optional): The scales of the default boxes. If not provided it will be estimated using
the ``min_ratio`` and ``max_ratio`` parameters.
steps (List[int]], optional): It's a hyper-parameter that affects the tiling of defalt boxes. If not provided
it will be estimated from the data.
clip (bool): Whether the standardized values of default boxes should be clipped between 0 and 1. The clipping
is applied while the boxes are encoded in format ``(cx, cy, w, h)``.
"""
def __init__(
self,
aspect_ratios: List[List[int]],
min_ratio: float = 0.15,
max_ratio: float = 0.9,
scales: Optional[List[float]] = None,
steps: Optional[List[int]] = None,
clip: bool = True,
):
super().__init__()
if steps is not None and len(aspect_ratios) != len(steps):
raise ValueError("aspect_ratios and steps should have the same length")
self.aspect_ratios = aspect_ratios
self.steps = steps
self.clip = clip
num_outputs = len(aspect_ratios)
# Estimation of default boxes scales
if scales is None:
if num_outputs > 1:
range_ratio = max_ratio - min_ratio
self.scales = [min_ratio + range_ratio * k / (num_outputs - 1.0) for k in range(num_outputs)]
self.scales.append(1.0)
else:
self.scales = [min_ratio, max_ratio]
else:
self.scales = scales
self._wh_pairs = self._generate_wh_pairs(num_outputs)
def _generate_wh_pairs(
self, num_outputs: int, dtype: torch.dtype = torch.float32, device: torch.device = torch.device("cpu")
) -> List[Tensor]:
_wh_pairs: List[Tensor] = []
for k in range(num_outputs):
# Adding the 2 default width-height pairs for aspect ratio 1 and scale s'k
s_k = self.scales[k]
s_prime_k = math.sqrt(self.scales[k] * self.scales[k + 1])
wh_pairs = [[s_k, s_k], [s_prime_k, s_prime_k]]
# Adding 2 pairs for each aspect ratio of the feature map k
for ar in self.aspect_ratios[k]:
sq_ar = math.sqrt(ar)
w = self.scales[k] * sq_ar
h = self.scales[k] / sq_ar
wh_pairs.extend([[w, h], [h, w]])
_wh_pairs.append(torch.as_tensor(wh_pairs, dtype=dtype, device=device))
return _wh_pairs
def num_anchors_per_location(self) -> List[int]:
# Estimate num of anchors based on aspect ratios: 2 default boxes + 2 * ratios of feaure map.
return [2 + 2 * len(r) for r in self.aspect_ratios]
# Default Boxes calculation based on page 6 of SSD paper
def _grid_default_boxes(
self, grid_sizes: List[List[int]], image_size: List[int], dtype: torch.dtype = torch.float32
) -> Tensor:
default_boxes = []
for k, f_k in enumerate(grid_sizes):
# Now add the default boxes for each width-height pair
if self.steps is not None:
x_f_k = image_size[1] / self.steps[k]
y_f_k = image_size[0] / self.steps[k]
else:
y_f_k, x_f_k = f_k
shifts_x = ((torch.arange(0, f_k[1]) + 0.5) / x_f_k).to(dtype=dtype)
shifts_y = ((torch.arange(0, f_k[0]) + 0.5) / y_f_k).to(dtype=dtype)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij")
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y) * len(self._wh_pairs[k]), dim=-1).reshape(-1, 2)
# Clipping the default boxes while the boxes are encoded in format (cx, cy, w, h)
_wh_pair = self._wh_pairs[k].clamp(min=0, max=1) if self.clip else self._wh_pairs[k]
wh_pairs = _wh_pair.repeat((f_k[0] * f_k[1]), 1)
default_box = torch.cat((shifts, wh_pairs), dim=1)
default_boxes.append(default_box)
return torch.cat(default_boxes, dim=0)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"aspect_ratios={self.aspect_ratios}"
f", clip={self.clip}"
f", scales={self.scales}"
f", steps={self.steps}"
")"
)
return s
def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
image_size = image_list.tensors.shape[-2:]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
default_boxes = self._grid_default_boxes(grid_sizes, image_size, dtype=dtype)
default_boxes = default_boxes.to(device)
dboxes = []
x_y_size = torch.tensor([image_size[1], image_size[0]], device=default_boxes.device)
for _ in image_list.image_sizes:
dboxes_in_image = default_boxes
dboxes_in_image = torch.cat(
[
(dboxes_in_image[:, :2] - 0.5 * dboxes_in_image[:, 2:]) * x_y_size,
(dboxes_in_image[:, :2] + 0.5 * dboxes_in_image[:, 2:]) * x_y_size,
],
-1,
)
dboxes.append(dboxes_in_image)
return dboxes
| bsd-3-clause | f2566e71a5835594fbd9cab6400222d6 | 43.246269 | 119 | 0.590993 | 3.598786 | false | false | false | false |
pytorch/vision | test/test_prototype_datasets_utils.py | 1 | 10326 | import gzip
import pathlib
import sys
import numpy as np
import pytest
import torch
from datasets_utils import make_fake_flo_file, make_tar
from torchdata.datapipes.iter import FileOpener, TarArchiveLoader
from torchvision.datasets._optical_flow import _read_flo as read_flo_ref
from torchvision.datasets.utils import _decompress
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import fromfile, read_flo
@pytest.mark.filterwarnings("error:The given NumPy array is not writeable:UserWarning")
@pytest.mark.parametrize(
("np_dtype", "torch_dtype", "byte_order"),
[
(">f4", torch.float32, "big"),
("<f8", torch.float64, "little"),
("<i4", torch.int32, "little"),
(">i8", torch.int64, "big"),
("|u1", torch.uint8, sys.byteorder),
],
)
@pytest.mark.parametrize("count", (-1, 2))
@pytest.mark.parametrize("mode", ("rb", "r+b"))
def test_fromfile(tmpdir, np_dtype, torch_dtype, byte_order, count, mode):
path = tmpdir / "data.bin"
rng = np.random.RandomState(0)
rng.randn(5 if count == -1 else count + 1).astype(np_dtype).tofile(path)
for count_ in (-1, count // 2):
expected = torch.from_numpy(np.fromfile(path, dtype=np_dtype, count=count_).astype(np_dtype[1:]))
with open(path, mode) as file:
actual = fromfile(file, dtype=torch_dtype, byte_order=byte_order, count=count_)
torch.testing.assert_close(actual, expected)
def test_read_flo(tmpdir):
path = tmpdir / "test.flo"
make_fake_flo_file(3, 4, path)
with open(path, "rb") as file:
actual = read_flo(file)
expected = torch.from_numpy(read_flo_ref(path).astype("f4", copy=False))
torch.testing.assert_close(actual, expected)
class TestOnlineResource:
class DummyResource(OnlineResource):
def __init__(self, download_fn=None, **kwargs):
super().__init__(**kwargs)
self._download_fn = download_fn
def _download(self, root):
if self._download_fn is None:
raise pytest.UsageError(
"`_download()` was called, but `DummyResource(...)` was constructed without `download_fn`."
)
return self._download_fn(self, root)
def _make_file(self, root, *, content, name="file.txt"):
file = root / name
with open(file, "w") as fh:
fh.write(content)
return file
def _make_folder(self, root, *, name="folder"):
folder = root / name
subfolder = folder / "subfolder"
subfolder.mkdir(parents=True)
files = {}
for idx, root in enumerate([folder, folder, subfolder]):
content = f"sentinel{idx}"
file = self._make_file(root, name=f"file{idx}.txt", content=content)
files[str(file)] = content
return folder, files
def _make_tar(self, root, *, name="archive.tar", remove=True):
folder, files = self._make_folder(root, name=name.split(".")[0])
archive = make_tar(root, name, folder, remove=remove)
files = {str(archive / pathlib.Path(file).relative_to(root)): content for file, content in files.items()}
return archive, files
def test_load_file(self, tmp_path):
content = "sentinel"
file = self._make_file(tmp_path, content=content)
resource = self.DummyResource(file_name=file.name)
dp = resource.load(tmp_path)
assert isinstance(dp, FileOpener)
data = list(dp)
assert len(data) == 1
path, buffer = data[0]
assert path == str(file)
assert buffer.read().decode() == content
def test_load_folder(self, tmp_path):
folder, files = self._make_folder(tmp_path)
resource = self.DummyResource(file_name=folder.name)
dp = resource.load(tmp_path)
assert isinstance(dp, FileOpener)
assert {path: buffer.read().decode() for path, buffer in dp} == files
def test_load_archive(self, tmp_path):
archive, files = self._make_tar(tmp_path)
resource = self.DummyResource(file_name=archive.name)
dp = resource.load(tmp_path)
assert isinstance(dp, TarArchiveLoader)
assert {path: buffer.read().decode() for path, buffer in dp} == files
def test_priority_decompressed_gt_raw(self, tmp_path):
# We don't need to actually compress here. Adding the suffix is sufficient
self._make_file(tmp_path, content="raw_sentinel", name="file.txt.gz")
file = self._make_file(tmp_path, content="decompressed_sentinel", name="file.txt")
resource = self.DummyResource(file_name=file.name)
dp = resource.load(tmp_path)
path, buffer = next(iter(dp))
assert path == str(file)
assert buffer.read().decode() == "decompressed_sentinel"
def test_priority_extracted_gt_decompressed(self, tmp_path):
archive, _ = self._make_tar(tmp_path, remove=False)
resource = self.DummyResource(file_name=archive.name)
dp = resource.load(tmp_path)
# If the archive had been selected, this would be a `TarArchiveReader`
assert isinstance(dp, FileOpener)
def test_download(self, tmp_path):
download_fn_was_called = False
def download_fn(resource, root):
nonlocal download_fn_was_called
download_fn_was_called = True
return self._make_file(root, content="_", name=resource.file_name)
resource = self.DummyResource(
file_name="file.txt",
download_fn=download_fn,
)
resource.load(tmp_path)
assert download_fn_was_called, "`download_fn()` was never called"
# This tests the `"decompress"` literal as well as a custom callable
@pytest.mark.parametrize(
"preprocess",
[
"decompress",
lambda path: _decompress(str(path), remove_finished=True),
],
)
def test_preprocess_decompress(self, tmp_path, preprocess):
file_name = "file.txt.gz"
content = "sentinel"
def download_fn(resource, root):
file = root / resource.file_name
with gzip.open(file, "wb") as fh:
fh.write(content.encode())
return file
resource = self.DummyResource(file_name=file_name, preprocess=preprocess, download_fn=download_fn)
dp = resource.load(tmp_path)
data = list(dp)
assert len(data) == 1
path, buffer = data[0]
assert path == str(tmp_path / file_name).replace(".gz", "")
assert buffer.read().decode() == content
def test_preprocess_extract(self, tmp_path):
files = None
def download_fn(resource, root):
nonlocal files
archive, files = self._make_tar(root, name=resource.file_name)
return archive
resource = self.DummyResource(file_name="folder.tar", preprocess="extract", download_fn=download_fn)
dp = resource.load(tmp_path)
assert files is not None, "`download_fn()` was never called"
assert isinstance(dp, FileOpener)
actual = {path: buffer.read().decode() for path, buffer in dp}
expected = {
path.replace(resource.file_name, resource.file_name.split(".")[0]): content
for path, content in files.items()
}
assert actual == expected
def test_preprocess_only_after_download(self, tmp_path):
file = self._make_file(tmp_path, content="_")
def preprocess(path):
raise AssertionError("`preprocess` was called although the file was already present.")
resource = self.DummyResource(
file_name=file.name,
preprocess=preprocess,
)
resource.load(tmp_path)
class TestHttpResource:
def test_resolve_to_http(self, mocker):
file_name = "data.tar"
original_url = f"http://downloads.pytorch.org/{file_name}"
redirected_url = original_url.replace("http", "https")
sha256_sentinel = "sha256_sentinel"
def preprocess_sentinel(path):
return path
original_resource = HttpResource(
original_url,
sha256=sha256_sentinel,
preprocess=preprocess_sentinel,
)
mocker.patch("torchvision.prototype.datasets.utils._resource._get_redirect_url", return_value=redirected_url)
redirected_resource = original_resource.resolve()
assert isinstance(redirected_resource, HttpResource)
assert redirected_resource.url == redirected_url
assert redirected_resource.file_name == file_name
assert redirected_resource.sha256 == sha256_sentinel
assert redirected_resource._preprocess is preprocess_sentinel
def test_resolve_to_gdrive(self, mocker):
file_name = "data.tar"
original_url = f"http://downloads.pytorch.org/{file_name}"
id_sentinel = "id-sentinel"
redirected_url = f"https://drive.google.com/file/d/{id_sentinel}/view"
sha256_sentinel = "sha256_sentinel"
def preprocess_sentinel(path):
return path
original_resource = HttpResource(
original_url,
sha256=sha256_sentinel,
preprocess=preprocess_sentinel,
)
mocker.patch("torchvision.prototype.datasets.utils._resource._get_redirect_url", return_value=redirected_url)
redirected_resource = original_resource.resolve()
assert isinstance(redirected_resource, GDriveResource)
assert redirected_resource.id == id_sentinel
assert redirected_resource.file_name == file_name
assert redirected_resource.sha256 == sha256_sentinel
assert redirected_resource._preprocess is preprocess_sentinel
def test_missing_dependency_error():
class DummyDataset(Dataset):
def __init__(self):
super().__init__(root="root", dependencies=("fake_dependency",))
def _resources(self):
pass
def _datapipe(self, resource_dps):
pass
def __len__(self):
pass
with pytest.raises(ModuleNotFoundError, match="depends on the third-party package 'fake_dependency'"):
DummyDataset()
| bsd-3-clause | 3c6cc8c5de959e13b992b269a79a165d | 33.192053 | 117 | 0.624056 | 3.932216 | false | true | false | false |
pytorch/vision | torchvision/models/quantization/resnet.py | 1 | 18400 | from functools import partial
from typing import Any, List, Optional, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
from torchvision.models.resnet import (
BasicBlock,
Bottleneck,
ResNet,
ResNet18_Weights,
ResNet50_Weights,
ResNeXt101_32X8D_Weights,
ResNeXt101_64X4D_Weights,
)
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableResNet",
"ResNet18_QuantizedWeights",
"ResNet50_QuantizedWeights",
"ResNeXt101_32X8D_QuantizedWeights",
"ResNeXt101_64X4D_QuantizedWeights",
"resnet18",
"resnet50",
"resnext101_32x8d",
"resnext101_64x4d",
]
class QuantizableBasicBlock(BasicBlock):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.add_relu = torch.nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.add_relu.add_relu(out, identity)
return out
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], is_qat, inplace=True)
if self.downsample:
_fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
class QuantizableBottleneck(Bottleneck):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.skip_add_relu = nn.quantized.FloatFunctional()
self.relu1 = nn.ReLU(inplace=False)
self.relu2 = nn.ReLU(inplace=False)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip_add_relu.add_relu(out, identity)
return out
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(
self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], is_qat, inplace=True
)
if self.downsample:
_fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
class QuantizableResNet(ResNet):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
# Ensure scriptability
# super(QuantizableResNet,self).forward(x)
# is not scriptable
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in resnet models
Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
_fuse_modules(self, ["conv1", "bn1", "relu"], is_qat, inplace=True)
for m in self.modules():
if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock:
m.fuse_model(is_qat)
def _resnet(
block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]],
layers: List[int],
weights: Optional[WeightsEnum],
progress: bool,
quantize: bool,
**kwargs: Any,
) -> QuantizableResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableResNet(block, layers, **kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
}
class ResNet18_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 11689512,
"unquantized": ResNet18_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.494,
"acc@5": 88.882,
}
},
"_ops": 1.814,
"_weight_size": 11.238,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
class ResNet50_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.920,
"acc@5": 92.814,
}
},
"_ops": 4.089,
"_weight_size": 24.759,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 25557032,
"unquantized": ResNet50_Weights.IMAGENET1K_V2,
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.282,
"acc@5": 94.976,
}
},
"_ops": 4.089,
"_weight_size": 24.953,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.986,
"acc@5": 94.480,
}
},
"_ops": 16.414,
"_weight_size": 86.034,
},
)
IMAGENET1K_FBGEMM_V2 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 88791336,
"unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.574,
"acc@5": 96.132,
}
},
"_ops": 16.414,
"_weight_size": 86.645,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V2
class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 83455272,
"recipe": "https://github.com/pytorch/vision/pull/5935",
"unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.898,
"acc@5": 96.326,
}
},
"_ops": 15.46,
"_weight_size": 81.556,
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@register_model(name="quantized_resnet18")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNet18_Weights.IMAGENET1K_V1,
)
)
def resnet18(
*,
weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNet-18 model from
`Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNet18_Weights
:members:
:noindex:
"""
weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights)
return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnet50")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNet50_Weights.IMAGENET1K_V1,
)
)
def resnet50(
*,
weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNet-50 model from
`Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNet50_Weights
:members:
:noindex:
"""
weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights)
return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnext101_32x8d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
)
)
def resnext101_32x8d(
*,
weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNeXt-101 32x8d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
:members:
:noindex:
"""
weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights)
_ovewrite_named_param(kwargs, "groups", 32)
_ovewrite_named_param(kwargs, "width_per_group", 8)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
@register_model(name="quantized_resnext101_64x4d")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
)
)
def resnext101_64x4d(
*,
weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableResNet:
"""ResNeXt-101 64x4d model from
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
:members:
.. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
:members:
:noindex:
"""
weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights)
_ovewrite_named_param(kwargs, "groups", 64)
_ovewrite_named_param(kwargs, "width_per_group", 4)
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..resnet import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
"resnet18_fbgemm": ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
"resnet50_fbgemm": ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
"resnext101_32x8d_fbgemm": ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
| bsd-3-clause | dd195079e21ac887c1721a1fcc8917f6 | 35.947791 | 165 | 0.629022 | 3.389206 | false | false | false | false |
pytorch/vision | torchvision/prototype/features/_mask.py | 1 | 4657 | from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> Mask:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
| bsd-3-clause | a0fae8c3e7c2521b13fe974d51bcdf44 | 32.503597 | 116 | 0.60919 | 3.728583 | false | false | false | false |
automl/auto-sklearn | autosklearn/pipeline/components/data_preprocessing/rescaling/__init__.py | 1 | 3397 | from typing import Dict, Optional
import os
from collections import OrderedDict
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter
from sklearn.base import BaseEstimator
from autosklearn.askl_typing import FEAT_TYPE_TYPE
from autosklearn.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autosklearn.pipeline.components.data_preprocessing.rescaling.abstract_rescaling import ( # noqa: E501
Rescaling,
)
from ...base import (
AutoSklearnChoice,
AutoSklearnPreprocessingAlgorithm,
ThirdPartyComponents,
_addons,
find_components,
)
rescaling_directory = os.path.split(__file__)[0]
_rescalers = find_components(
__package__, rescaling_directory, AutoSklearnPreprocessingAlgorithm
)
additional_components = ThirdPartyComponents(AutoSklearnPreprocessingAlgorithm)
_addons["data_preprocessing.rescaling"] = additional_components
def add_rescaler(rescaler: Rescaling) -> None:
additional_components.add_component(rescaler)
class RescalingChoice(AutoSklearnChoice):
@classmethod
def get_components(cls: BaseEstimator) -> Dict[str, BaseEstimator]:
components: Dict[str, BaseEstimator] = OrderedDict()
components.update(_rescalers)
components.update(additional_components.components)
return components
def get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
default: Optional[str] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
if dataset_properties is None:
dataset_properties = {}
# Compile a list of legal preprocessors for this problem
available_preprocessors = self.get_available_components(
dataset_properties=dataset_properties, include=include, exclude=exclude
)
if len(available_preprocessors) == 0:
raise ValueError("No rescalers found, please add any rescaling component.")
if default is None:
defaults = ["standardize", "none", "minmax", "normalize"]
for default_ in defaults:
if default_ in available_preprocessors:
default = default_
break
preprocessor = CategoricalHyperparameter(
"__choice__", list(available_preprocessors.keys()), default_value=default
)
cs.add_hyperparameter(preprocessor)
for name in available_preprocessors:
preprocessor_configuration_space = available_preprocessors[
name
].get_hyperparameter_search_space(
feat_type=feat_type, dataset_properties=dataset_properties
)
parent_hyperparameter = {"parent": preprocessor, "value": name}
cs.add_configuration_space(
name,
preprocessor_configuration_space,
parent_hyperparameter=parent_hyperparameter,
)
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
return self.choice.transform(X)
| bsd-3-clause | 9460340caf23d12c8e2019b17206b986 | 35.138298 | 107 | 0.678834 | 4.366324 | false | true | false | false |
mozilla/kitsune | kitsune/kpi/tests/test_surveygizmo_utils.py | 1 | 3646 | from datetime import datetime
from unittest.mock import patch
from kitsune.kpi.surveygizmo_utils import (
add_email_to_campaign,
get_email_addresses,
get_exit_survey_results,
)
from kitsune.sumo.tests import TestCase
class GetEmailAddressesTests(TestCase):
@patch("kitsune.kpi.surveygizmo_utils.requests")
def test_early_exit(self, mock_requests):
"""
If there are no creds, don't hit the API and return an empty
list.
"""
with self.settings(SURVEYGIZMO_API_TOKEN=None, SURVEYGIZMO_API_TOKEN_SECRET=None):
emails = get_email_addresses("general", datetime(2016, 1, 1), datetime(2016, 1, 2))
self.assertEqual(emails, [])
assert not mock_requests.get.called
@patch("kitsune.kpi.surveygizmo_utils.requests")
def test_creds(self, mock_requests):
"""Ensure the token and secret are passed correctly."""
mock_requests.get.return_value.content = SURVEY_GIZMO_EMPTY_RESPONSE
with self.settings(
SURVEYGIZMO_API_TOKEN="mytoken", SURVEYGIZMO_API_TOKEN_SECRET="mysecret"
):
get_email_addresses(
"general", datetime(2016, 1, 1, 12, 0), datetime(2016, 1, 2, 13, 0)
)
url = mock_requests.get.call_args[0][0]
assert "api_token=mytoken" in url
assert "api_token_secret=mysecret" in url
class AddEmailToCampaignTests(TestCase):
@patch("kitsune.kpi.surveygizmo_utils.requests")
def test_early_exit(self, mock_requests):
"""
If there are no creds, don't hit the API and return an empty
list.
"""
with self.settings(SURVEYGIZMO_API_TOKEN=None, SURVEYGIZMO_API_TOKEN_SECRET=None):
add_email_to_campaign("general", "a@example.com")
assert not mock_requests.put.called
@patch("kitsune.kpi.surveygizmo_utils.requests")
def test_creds(self, mock_requests):
"""Ensure the token and secret are passed correctly."""
with self.settings(
SURVEYGIZMO_API_TOKEN="mytoken", SURVEYGIZMO_API_TOKEN_SECRET="mysecret"
):
add_email_to_campaign("general", "a@example.com")
url = mock_requests.put.call_args[0][0]
assert "api_token=mytoken" in url
assert "api_token_secret=mysecret" in url
class GetExitSurveyResults(TestCase):
@patch("kitsune.kpi.surveygizmo_utils.requests")
def test_early_exit(self, mock_requests):
"""
If there are no creds, don't hit the API and return an empty
list.
"""
with self.settings(SURVEYGIZMO_API_TOKEN=None, SURVEYGIZMO_API_TOKEN_SECRET=None):
summary = get_exit_survey_results("general", datetime(2016, 1, 1))
self.assertEqual(summary, {"yes": 0, "no": 0, "dont-know": 0})
assert not mock_requests.put.called
@patch("kitsune.kpi.surveygizmo_utils.requests")
def test_creds(self, mock_requests):
"""Ensure the token and secret are passed correctly."""
mock_requests.get.return_value.content = SURVEY_GIZMO_EMPTY_RESPONSE
with self.settings(
SURVEYGIZMO_API_TOKEN="mytoken", SURVEYGIZMO_API_TOKEN_SECRET="mysecret"
):
get_exit_survey_results("general", datetime(2016, 1, 1))
url = mock_requests.get.call_args[0][0]
assert "api_token=mytoken" in url
assert "api_token_secret=mysecret" in url
SURVEY_GIZMO_EMPTY_RESPONSE = """
{
"total_count": "0",
"total_pages": 1,
"results_per_page": "500",
"result_ok": true,
"data": [],
"page": "1"
}
"""
| bsd-3-clause | 3ade033e5eb62c48792290034d8b6064 | 34.745098 | 95 | 0.626166 | 3.344954 | false | true | false | false |
mozilla/kitsune | kitsune/kpi/models.py | 1 | 3930 | from django.db.models import CharField, DateField, ForeignKey, PositiveIntegerField, CASCADE
from kitsune.sumo.models import ModelBase
VISITORS_METRIC_CODE = "general keymetrics:visitors"
L10N_METRIC_CODE = "general l10n:coverage"
SUPPORT_FORUM_CONTRIBUTORS_METRIC_CODE = "general supportforum:contributors"
KB_ENUS_CONTRIBUTORS_METRIC_CODE = "general kb:en-US:contributors"
KB_L10N_CONTRIBUTORS_METRIC_CODE = "general kb:l10n:contributors"
SEARCH_SEARCHES_METRIC_CODE = "search clickthroughs:elastic:searches"
SEARCH_CLICKS_METRIC_CODE = "search clickthroughs:elastic:clicks"
EXIT_SURVEY_YES_CODE = "exit-survey:yes"
EXIT_SURVEY_NO_CODE = "exit-survey:no"
EXIT_SURVEY_DONT_KNOW_CODE = "exit-survey:dont-know"
CONTRIBUTORS_CSAT_METRIC_CODE = "csat contributors"
SUPPORT_FORUM_CONTRIBUTORS_CSAT_METRIC_CODE = "csat contributors:supportforum"
KB_ENUS_CONTRIBUTORS_CSAT_METRIC_CODE = "csat contributors:kb:en-US"
KB_L10N_CONTRIBUTORS_CSAT_METRIC_CODE = "csat contributors:kb-l10n"
CONTRIBUTOR_COHORT_CODE = "contributor"
KB_ENUS_CONTRIBUTOR_COHORT_CODE = "contributor:kb:en-US"
KB_L10N_CONTRIBUTOR_COHORT_CODE = "contributor:kb:l10n"
SUPPORT_FORUM_HELPER_COHORT_CODE = "contributor:supportforum:helper"
class MetricKind(ModelBase):
"""A programmer-readable identifier of a metric, like 'clicks: search'"""
code = CharField(max_length=255, unique=True)
def __str__(self):
return self.code
class Metric(ModelBase):
"""A single numeric measurement aggregated over a span of time.
For example, the number of hits to a page during a specific week.
"""
# If we need to (and I would prefer to avoid this, because it wrecks the
# consistent semantics of rows--some will be aggregations and others will
# not), we can lift the unique constraint on kind/start/end for things that
# are collected in realtime and can't be immediately bucketed. However, in
# such cases it would probably be nicer to our future selves to put them in
# a separate store (table or whatever) until bucketing.
# In the design of this table, we trade off constraints for generality.
# There's no way to have the DB prove, for example, that both halves of a
# clickthrough rate ratio will always exist, but the app can make sure it's
# true upon inserting them.
kind = ForeignKey(MetricKind, on_delete=CASCADE)
start = DateField()
# Not useful yet. Present metrics have spans of known length.
end = DateField()
# Ints should be good enough for all the currently wish-listed metrics.
# Percents can be (even better) represented by 2 separate metrics: one for
# numerator, one for denominator.
value = PositiveIntegerField()
class Meta(object):
unique_together = [("kind", "start", "end")]
def __str__(self):
return "%s (%s thru %s): %s" % (self.kind, self.start, self.end, self.value)
class CohortKind(ModelBase):
"""A programmer-readable identifier of a cohort, like 'contributor'"""
code = CharField(max_length=255, unique=True)
def __str__(self):
return self.code
class Cohort(ModelBase):
"""A group of users who have shared a particular activity in a given time frame"""
kind = ForeignKey(CohortKind, on_delete=CASCADE)
start = DateField()
end = DateField()
size = PositiveIntegerField(default=0)
class Meta(object):
unique_together = [("kind", "start", "end")]
def __str__(self):
return "%s (%s thru %s): %s" % (self.kind, self.start, self.end, self.size)
class RetentionMetric(ModelBase):
"""A measurement of the number of active users from a cohort during a given time period."""
cohort = ForeignKey(Cohort, on_delete=CASCADE, related_name="retention_metrics")
start = DateField()
end = DateField()
size = PositiveIntegerField(default=0)
class Meta(object):
unique_together = [("cohort", "start", "end")]
| bsd-3-clause | c892293255a579f6b4a297e1a6b7d19d | 35.728972 | 95 | 0.711959 | 3.534173 | false | false | false | false |
mozilla/kitsune | kitsune/questions/badges.py | 1 | 1181 | from django.db.models.signals import post_save
from kitsune.questions.models import Answer
# Yo ******! These are year-agnostic badge templates which code uses
# to get-or-create the actual Badge instances. These strings should
# not be l10n-ized here--the badge title and description strings get
# l10n-ized elsewhere. Peace!
QUESTIONS_BADGES = {
"answer-badge": {
"slug": "{year}-support-forum-badge",
"title": "{year} Support Forum Badge",
"description": "This badge is awarded to contributors with 30 "
"support forum replies during {year}.",
},
}
def on_reply_save(sender, instance, created, **kwargs):
"""Handle the reply save signal.
* We award the Support Forum badge on 30 answers.
"""
answer = instance
year = answer.created.year
creator = answer.creator
# If this is a new answer (not an edit), then the creator
# might qualify for the answers badge.
if created:
from kitsune.questions.tasks import maybe_award_badge
maybe_award_badge.delay(QUESTIONS_BADGES["answer-badge"], year, creator.id)
def register_signals():
post_save.connect(on_reply_save, sender=Answer)
| bsd-3-clause | 93a1b50f8d43c94bda203baf2e4129e0 | 30.918919 | 83 | 0.685859 | 3.656347 | false | false | false | false |
mozilla/kitsune | kitsune/kbforums/urls.py | 1 | 2181 | from django.conf import settings
from django.urls import re_path
from kitsune.flagit import views as flagit_views
from kitsune.kbforums import views
from kitsune.kbforums.feeds import PostsFeed, ThreadsFeed
from kitsune.kbforums.models import Post
from kitsune.sumo.views import handle404
if settings.DISABLE_FEEDS:
threads_feed_view = handle404
posts_feed_view = handle404
else:
threads_feed_view = ThreadsFeed()
posts_feed_view = PostsFeed()
# These patterns inherit from /document/discuss
urlpatterns = [
re_path(r"^$", views.threads, name="wiki.discuss.threads"),
re_path(r"^/feed", threads_feed_view, name="wiki.discuss.threads.feed"),
re_path(r"^/new", views.new_thread, name="wiki.discuss.new_thread"),
re_path(r"^/watch", views.watch_forum, name="wiki.discuss.watch_forum"),
re_path(
r"^/post-preview-async$", views.post_preview_async, name="wiki.discuss.post_preview_async"
),
re_path(r"^/(?P<thread_id>\d+)$", views.posts, name="wiki.discuss.posts"),
re_path(r"^/(?P<thread_id>\d+)/feed$", posts_feed_view, name="wiki.discuss.posts.feed"),
re_path(r"^/(?P<thread_id>\d+)/watch$", views.watch_thread, name="wiki.discuss.watch_thread"),
re_path(r"^/(?P<thread_id>\d+)/reply$", views.reply, name="wiki.discuss.reply"),
re_path(
r"^/(?P<thread_id>\d+)/sticky$", views.sticky_thread, name="wiki.discuss.sticky_thread"
),
re_path(r"^/(?P<thread_id>\d+)/lock$", views.lock_thread, name="wiki.discuss.lock_thread"),
re_path(r"^/(?P<thread_id>\d+)/edit$", views.edit_thread, name="wiki.discuss.edit_thread"),
re_path(
r"^/(?P<thread_id>\d+)/delete$", views.delete_thread, name="wiki.discuss.delete_thread"
),
re_path(
r"^/(?P<thread_id>\d+)/(?P<post_id>\d+)/edit",
views.edit_post,
name="wiki.discuss.edit_post",
),
re_path(
r"^/(?P<thread_id>\d+)/(?P<post_id>\d+)/delete",
views.delete_post,
name="wiki.discuss.delete_post",
),
# Flag discussion posts
re_path(
r"^/(?P<object_id>\d+)/flag$",
flagit_views.flag,
{"model": Post},
name="wiki.discuss.flag_post",
),
]
| bsd-3-clause | bd97a152b8b2857f3fb94a4d72891c83 | 38.654545 | 98 | 0.634113 | 3.004132 | false | false | true | false |
mozilla/kitsune | kitsune/search/fields.py | 1 | 1617 | from functools import partial
from django.conf import settings
from elasticsearch_dsl.field import Keyword
from elasticsearch_dsl.field import Object as DSLObject
from elasticsearch_dsl.field import Text
from kitsune.search.es7_utils import es_analyzer_for_locale
SUPPORTED_LANGUAGES = list(settings.SUMO_LANGUAGES)
# this is a test locale - no need to add it to ES
SUPPORTED_LANGUAGES.remove("xx")
def _get_fields(field, locales, **params):
"""Construct the sub-fields of locale aware multi-field"""
data = {}
for locale in locales:
if field is Text:
analyzer = es_analyzer_for_locale(locale)
search_analyzer = es_analyzer_for_locale(locale, search_analyzer=True)
field_obj = field(
analyzer=analyzer,
search_analyzer=search_analyzer,
search_quote_analyzer=analyzer,
**params,
)
else:
field_obj = field(**params)
data[locale] = field_obj
return data
def construct_locale_field(field, locales, **params):
"""Construct a locale aware object."""
inner_fields = _get_fields(locales=locales, field=field, **params)
return DSLObject(properties=inner_fields)
SumoTextField = partial(construct_locale_field, field=Text)
SumoKeywordField = partial(construct_locale_field, field=Keyword)
# This is an object in the form of
# {'en-US': Text(analyzer_for_the_specific_locale)}
SumoLocaleAwareTextField = partial(SumoTextField, locales=SUPPORTED_LANGUAGES)
SumoLocaleAwareKeywordField = partial(SumoKeywordField, locales=SUPPORTED_LANGUAGES)
| bsd-3-clause | e479f70e3a66268c990c1b58c8f72008 | 33.404255 | 84 | 0.699443 | 3.934307 | false | false | false | false |
mozilla/kitsune | kitsune/upload/storage.py | 1 | 1281 | import hashlib
import itertools
import os
import time
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from storages.backends.s3boto3 import S3Boto3Storage
DjangoStorage = S3Boto3Storage if settings.AWS_ACCESS_KEY_ID else FileSystemStorage
class RenameFileStorage(DjangoStorage):
"""Subclass Django's file system storage to add our file naming
conventions."""
def get_available_name(self, name, max_length=None):
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# Set file_root to something we like: clean and all ascii
md5_sub = hashlib.md5(file_root.encode("utf8")).hexdigest()[0:6]
file_root = time.strftime("%Y-%m-%d-%H-%M-%S-", time.localtime()) + md5_sub
name = os.path.join(dir_name, file_root + file_ext)
# If the filename already exists, add an underscore and a number
# (before the file extension, if one exists) to the filename until
# the generated filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
| bsd-3-clause | 40a418f3350a5faf4402fdf970b5dfc8 | 34.583333 | 89 | 0.676034 | 3.670487 | false | false | false | false |
mozilla/kitsune | kitsune/journal/models.py | 1 | 1300 | from datetime import datetime
from django.db import models
RECORD_INFO = "info"
RECORD_ERROR = "error"
class RecordManager(models.Manager):
def log(self, level, src, msg, **kwargs):
msg = msg.format(**kwargs).encode("utf-8")
return Record.objects.create(level=RECORD_INFO, src=src, msg=msg)
def info(self, src, msg, **kwargs):
self.log(RECORD_INFO, src, msg, **kwargs)
def error(self, src, msg, **kwargs):
self.log(RECORD_ERROR, src, msg, **kwargs)
class Record(models.Model):
"""Defines an audit record for something that happened in translations"""
TYPE_CHOICES = [
(RECORD_INFO, RECORD_INFO),
(RECORD_ERROR, RECORD_ERROR),
]
# The log level of this message (e.g. "info", "error", ...)
level = models.CharField(choices=TYPE_CHOICES, max_length=20)
# What component was running (e.g. "sumo.ratelimit", "questions.aaq")
src = models.CharField(max_length=50)
# The message details. (e.g. "user bob hit the ratelimit for questions.ask")
msg = models.CharField(max_length=255)
# When this log entry was created
created = models.DateTimeField(default=datetime.now)
objects = RecordManager()
def __str__(self):
return "<Record {self.src} {self.msg}>".format(self=self)
| bsd-3-clause | 02a475bdca3360890cba9d2d4c7bebde | 27.888889 | 80 | 0.650769 | 3.542234 | false | false | false | false |
mozilla/kitsune | kitsune/wiki/diff.py | 1 | 1779 | import difflib
class BetterHtmlDiff(difflib.HtmlDiff):
"""Modified version of HtmlDiff.
* Replaces nowrap="nowrap" hard-coded style with a class
* Only replaces every other consecutive space with a to allow for
line wrapping.
For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
def _format_line(self, side, flag, linenum, text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = "%d" % linenum
id = ' id="%s%s"' % (self._prefix[side], linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ""
# replace those things that would get confused with HTML symbols
text = text.replace("&", "&").replace(">", ">")
text = text.replace("<", "<")
text = text.replace(" ", " ").rstrip()
return '<td class="diff_header"%s>%s</td><td class="text">%s</td>' % (id, linenum, text)
| bsd-3-clause | 6de14504f426f8e30903582c9e6c19cd | 37.673913 | 96 | 0.633502 | 4.266187 | false | false | false | false |
mozilla/kitsune | kitsune/kbforums/tests/test_views.py | 1 | 6805 | from kitsune.kbforums.events import NewPostEvent, NewThreadEvent
from kitsune.kbforums.models import Thread
from kitsune.kbforums.tests import KBForumTestCase, ThreadFactory
from kitsune.sumo.tests import get, post
from kitsune.users.tests import UserFactory, add_permission
from kitsune.wiki.tests import ApprovedRevisionFactory, DocumentFactory
class ThreadTests(KBForumTestCase):
"""Test thread views."""
def test_watch_forum(self):
"""Watch then unwatch a forum."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
d = ApprovedRevisionFactory().document
post(self.client, "wiki.discuss.watch_forum", {"watch": "yes"}, args=[d.slug])
assert NewThreadEvent.is_notifying(u, d)
# NewPostEvent is not notifying.
t = ThreadFactory(document=d)
p = t.new_post(creator=t.creator, content="test")
assert not NewPostEvent.is_notifying(u, p)
post(self.client, "wiki.discuss.watch_forum", {"watch": "no"}, args=[d.slug])
assert not NewThreadEvent.is_notifying(u, d)
def test_watch_thread(self):
"""Watch then unwatch a thread."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
t = ThreadFactory()
post(
self.client,
"wiki.discuss.watch_thread",
{"watch": "yes"},
args=[t.document.slug, t.id],
)
assert NewPostEvent.is_notifying(u, t)
# NewThreadEvent is not notifying.
assert not NewThreadEvent.is_notifying(u, t.document)
post(
self.client, "wiki.discuss.watch_thread", {"watch": "no"}, args=[t.document.slug, t.id]
)
assert not NewPostEvent.is_notifying(u, t)
def test_edit_thread(self):
"""Changing thread title works."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
d = DocumentFactory()
t = ThreadFactory(title="Sticky Thread", document=d, creator=u)
post(
self.client, "wiki.discuss.edit_thread", {"title": "A new title"}, args=[d.slug, t.id]
)
edited_t = d.thread_set.get(pk=t.id)
self.assertEqual("Sticky Thread", t.title)
self.assertEqual("A new title", edited_t.title)
def test_edit_thread_moderator(self):
"""Editing post as a moderator works."""
u = UserFactory()
add_permission(u, Thread, "change_thread")
t = ThreadFactory(title="Sticky Thread")
d = t.document
self.client.login(username=u.username, password="testpass")
self.assertEqual("Sticky Thread", t.title)
r = post(
self.client, "wiki.discuss.edit_thread", {"title": "new title"}, args=[d.slug, t.id]
)
self.assertEqual(200, r.status_code)
edited_t = Thread.objects.get(pk=t.id)
self.assertEqual("new title", edited_t.title)
def test_disallowed_404(self):
"""If document.allow_discussion is false, should return 404."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
doc = ApprovedRevisionFactory(document__allow_discussion=False).document
def check(url):
response = get(self.client, url, args=[doc.slug])
st = response.status_code
self.assertEqual(404, st, "%s was %s, not 404" % (url, st))
check("wiki.discuss.threads")
check("wiki.discuss.new_thread")
check("wiki.discuss.threads.feed")
class ThreadPermissionsTests(KBForumTestCase):
def setUp(self):
super(ThreadPermissionsTests, self).setUp()
self.doc = DocumentFactory()
self.u = UserFactory()
self.thread = ThreadFactory(document=self.doc, creator=self.u)
self.post = self.thread.new_post(creator=self.thread.creator, content="foo")
# Login for testing 403s
u2 = UserFactory()
self.client.login(username=u2.username, password="testpass")
def tearDown(self):
self.client.logout()
super(ThreadPermissionsTests, self).tearDown()
def test_edit_thread_403(self):
"""Editing a thread without permissions returns 403."""
response = get(
self.client, "wiki.discuss.edit_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_edit_locked_thread_403(self):
"""Editing a locked thread returns 403."""
t = ThreadFactory(document=self.doc, creator=self.u, is_locked=True)
response = get(self.client, "wiki.discuss.edit_thread", args=[self.doc.slug, t.id])
self.assertEqual(403, response.status_code)
def test_delete_thread_403(self):
"""Deleting a thread without permissions returns 403."""
response = get(
self.client, "wiki.discuss.delete_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_sticky_thread_405(self):
"""Marking a thread sticky with a HTTP GET returns 405."""
response = get(
self.client, "wiki.discuss.sticky_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(405, response.status_code)
def test_sticky_thread_403(self):
"""Marking a thread sticky without permissions returns 403."""
response = post(
self.client, "wiki.discuss.sticky_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_locked_thread_403(self):
"""Marking a thread locked without permissions returns 403."""
response = post(
self.client, "wiki.discuss.lock_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(403, response.status_code)
def test_locked_thread_405(self):
"""Marking a thread locked via a GET instead of a POST request."""
response = get(
self.client, "wiki.discuss.lock_thread", args=[self.doc.slug, self.thread.id]
)
self.assertEqual(405, response.status_code)
def test_post_edit_403(self):
"""Editing a post without permissions returns 403."""
response = get(
self.client,
"wiki.discuss.edit_post",
args=[self.doc.slug, self.thread.id, self.post.id],
)
self.assertEqual(403, response.status_code)
def test_post_delete_403(self):
"""Deleting a post without permissions returns 403."""
response = get(
self.client,
"wiki.discuss.delete_post",
args=[self.doc.slug, self.thread.id, self.post.id],
)
self.assertEqual(403, response.status_code)
| bsd-3-clause | 88ba7f045f22813877eb701edd315965 | 37.446328 | 99 | 0.623071 | 3.751378 | false | true | false | false |
mozilla/kitsune | kitsune/customercare/migrations/0001_initial.py | 1 | 2658 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import kitsune.search.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('twitter_username', models.CharField(max_length=20)),
('tweet_id', models.BigIntegerField()),
('raw_json', models.TextField()),
('locale', models.CharField(max_length=20)),
('created', models.DateTimeField(default=datetime.datetime.now, db_index=True)),
('reply_to_tweet_id', models.BigIntegerField()),
('user', models.ForeignKey(on_delete=models.CASCADE, related_name='tweet_replies', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
bases=(models.Model, kitsune.search.models.SearchMixin),
),
migrations.CreateModel(
name='Tweet',
fields=[
('tweet_id', models.BigIntegerField(serialize=False, primary_key=True)),
('raw_json', models.TextField()),
('locale', models.CharField(max_length=20, db_index=True)),
('created', models.DateTimeField(default=datetime.datetime.now, db_index=True)),
('hidden', models.BooleanField(default=False, db_index=True)),
('reply_to', models.ForeignKey(on_delete=models.CASCADE, related_name='replies', to='customercare.Tweet', null=True)),
],
options={
'ordering': ('-tweet_id',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TwitterAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(max_length=15)),
('banned', models.BooleanField(default=False, db_index=True)),
('ignored', models.BooleanField(default=False, db_index=True)),
],
options={
'permissions': (('ban_account', 'Can ban twitter accounts'), ('ignore_account', 'Can tag accounts to ignore')),
},
bases=(models.Model,),
),
]
| bsd-3-clause | 2888b9d596fc681bbb2d2476b1de6e4d | 41.870968 | 152 | 0.559819 | 4.489865 | false | false | false | false |
flask-restful/flask-restful | flask_restful/__init__.py | 1 | 27859 | from __future__ import absolute_import
from functools import wraps, partial
from flask import request, url_for, current_app
from flask import abort as original_flask_abort
from flask import make_response as original_flask_make_response
from flask.views import MethodView
from flask.signals import got_request_exception
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException, MethodNotAllowed, NotFound, NotAcceptable, InternalServerError
from werkzeug.wrappers import Response as ResponseBase
from flask_restful.utils import http_status_message, unpack, OrderedDict
from flask_restful.representations.json import output_json
import sys
from types import MethodType
import operator
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
__all__ = ('Api', 'Resource', 'marshal', 'marshal_with', 'marshal_with_field', 'abort')
def abort(http_status_code, **kwargs):
"""Raise a HTTPException for the given http_status_code. Attach any keyword
arguments to the exception for later processing.
"""
#noinspection PyUnresolvedReferences
try:
original_flask_abort(http_status_code)
except HTTPException as e:
if len(kwargs):
e.data = kwargs
raise
DEFAULT_REPRESENTATIONS = [('application/json', output_json)]
class Api(object):
"""
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = restful.Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
:param app: the Flask application object
:type app: flask.Flask or flask.Blueprint
:param prefix: Prefix all routes with a value, eg v1 or 2010-04-01
:type prefix: str
:param default_mediatype: The default media type to return
:type default_mediatype: str
:param decorators: Decorators to attach to every resource
:type decorators: list
:param catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param serve_challenge_on_401: Whether to serve a challenge response to
clients on receiving 401. This usually leads to a username/password
popup in web browsers.
:param url_part_order: A string that controls the order that the pieces
of the url are concatenated when the full url is constructed. 'b'
is the blueprint (or blueprint registration) prefix, 'a' is the api
prefix, and 'e' is the path component the endpoint is added with
:type catch_all_404s: bool
:param errors: A dictionary to define a custom response for each
exception or error raised during a request
:type errors: dict
"""
def __init__(self, app=None, prefix='',
default_mediatype='application/json', decorators=None,
catch_all_404s=False, serve_challenge_on_401=False,
url_part_order='bae', errors=None):
self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.serve_challenge_on_401 = serve_challenge_on_401
self.url_part_order = url_part_order
self.errors = errors or {}
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
self.blueprint = None
if app is not None:
self.app = app
self.init_app(app)
def init_app(self, app):
"""Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
:type app: flask.Flask
:type app: flask.Blueprint
Examples::
api = Api()
api.add_resource(...)
api.init_app(app)
"""
# If app is a blueprint, defer the initialization
try:
app.record(self._deferred_blueprint_init)
# Flask.Blueprint has a 'record' attribute, Flask.Api does not
except AttributeError:
self._init_app(app)
else:
self.blueprint = app
def _complete_url(self, url_part, registration_prefix):
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
"""
parts = {
'b': registration_prefix,
'a': self.prefix,
'e': url_part
}
return ''.join(parts[key] for key in self.url_part_order if parts[key])
@staticmethod
def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):
"""Method used to patch BlueprintSetupState.add_url_rule for setup
state instance corresponding to this Api instance. Exists primarily
to enable _complete_url's function.
:param blueprint_setup: The BlueprintSetupState instance (self)
:param rule: A string or callable that takes a string and returns a
string(_complete_url) that is the url rule for the endpoint
being registered
:param endpoint: See BlueprintSetupState.add_url_rule
:param view_func: See BlueprintSetupState.add_url_rule
:param **options: See BlueprintSetupState.add_url_rule
"""
if callable(rule):
rule = rule(blueprint_setup.url_prefix)
elif blueprint_setup.url_prefix:
rule = blueprint_setup.url_prefix + rule
options.setdefault('subdomain', blueprint_setup.subdomain)
if endpoint is None:
endpoint = view_func.__name__
defaults = blueprint_setup.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
def _deferred_blueprint_init(self, setup_state):
"""Synchronize prefix between blueprint/api and registration options, then
perform initialization with setup_state.app :class:`flask.Flask` object.
When a :class:`flask_restful.Api` object is initialized with a blueprint,
this method is recorded on the blueprint to be run when the blueprint is later
registered to a :class:`flask.Flask` object. This method also monkeypatches
BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.
:param setup_state: The setup state object passed to deferred functions
during blueprint registration
:type setup_state: flask.blueprints.BlueprintSetupState
"""
self.blueprint_setup = setup_state
if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':
setup_state._original_add_url_rule = setup_state.add_url_rule
setup_state.add_url_rule = MethodType(Api._blueprint_setup_add_url_rule_patch,
setup_state)
if not setup_state.first_registration:
raise ValueError('flask-restful blueprints can only be registered once.')
self._init_app(setup_state.app)
def _init_app(self, app):
"""Perform initialization actions with the given :class:`flask.Flask`
object.
:param app: The flask application object
:type app: flask.Flask
"""
app.handle_exception = partial(self.error_router, app.handle_exception)
app.handle_user_exception = partial(self.error_router, app.handle_user_exception)
if len(self.resources) > 0:
for resource, urls, kwargs in self.resources:
self._register_view(app, resource, *urls, **kwargs)
def owns_endpoint(self, endpoint):
"""Tests if an endpoint name (not path) belongs to this Api. Takes
in to account the Blueprint name part of the endpoint name.
:param endpoint: The name of the endpoint being checked
:return: bool
"""
if self.blueprint:
if endpoint.startswith(self.blueprint.name):
endpoint = endpoint.split(self.blueprint.name + '.', 1)[-1]
else:
return False
return endpoint in self.endpoints
def _should_use_fr_error_handler(self):
""" Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non-FR-related routes,
and FR errors (with the correct media type) for FR endpoints. This
method currently handles 404 and 405 errors.
:return: bool
"""
adapter = current_app.create_url_adapter(request)
try:
adapter.match()
except MethodNotAllowed as e:
# Check if the other HTTP methods at this url would hit the Api
valid_route_method = e.valid_methods[0]
rule, _ = adapter.match(method=valid_route_method, return_rule=True)
return self.owns_endpoint(rule.endpoint)
except NotFound:
return self.catch_all_404s
except:
# Werkzeug throws other kinds of exceptions, such as Redirect
pass
def _has_fr_route(self):
"""Encapsulating the rules for whether the request was to a Flask endpoint"""
# 404's, 405's, which might not have a url_rule
if self._should_use_fr_error_handler():
return True
# for all other errors, just check if FR dispatched the route
if not request.url_rule:
return False
return self.owns_endpoint(request.url_rule.endpoint)
def error_router(self, original_handler, e):
"""This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restful endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
"""
if self._has_fr_route():
try:
return self.handle_error(e)
except Exception:
pass # Fall through to original handler
return original_handler(e)
def handle_error(self, e):
"""Error handler for the API transforms a raised exception into a Flask
response, with the appropriate HTTP status code and body.
:param e: the raised Exception object
:type e: Exception
"""
got_request_exception.send(current_app._get_current_object(), exception=e)
if not isinstance(e, HTTPException) and current_app.propagate_exceptions:
exc_type, exc_value, tb = sys.exc_info()
if exc_value is e:
raise
else:
raise e
headers = Headers()
if isinstance(e, HTTPException):
if e.response is not None:
# If HTTPException is initialized with a response, then return e.get_response().
# This prevents specified error response from being overridden.
# eg. HTTPException(response=Response("Hello World"))
resp = e.get_response()
return resp
code = e.code
default_data = {
'message': getattr(e, 'description', http_status_message(code))
}
headers = e.get_response().headers
else:
code = 500
default_data = {
'message': http_status_message(code),
}
# Werkzeug exceptions generate a content-length header which is added
# to the response in addition to the actual content-length header
# https://github.com/flask-restful/flask-restful/issues/534
remove_headers = ('Content-Length',)
for header in remove_headers:
headers.pop(header, None)
data = getattr(e, 'data', default_data)
if code and code >= 500:
exc_info = sys.exc_info()
if exc_info[1] is None:
exc_info = None
current_app.log_exception(exc_info)
error_cls_name = type(e).__name__
if error_cls_name in self.errors:
custom_data = self.errors.get(error_cls_name, {})
code = custom_data.get('status', 500)
data.update(custom_data)
if code == 406 and self.default_mediatype is None:
# if we are handling NotAcceptable (406), make sure that
# make_response uses a representation we support as the
# default mediatype (so that make_response doesn't throw
# another NotAcceptable error).
supported_mediatypes = list(self.representations.keys())
fallback_mediatype = supported_mediatypes[0] if supported_mediatypes else "text/plain"
resp = self.make_response(
data,
code,
headers,
fallback_mediatype = fallback_mediatype
)
else:
resp = self.make_response(data, code, headers)
if code == 401:
resp = self.unauthorized(resp)
return resp
def mediatypes_method(self):
"""Return a method that returns a list of mediatypes
"""
return lambda resource_cls: self.mediatypes() + [self.default_mediatype]
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Type[Resource]`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app is not None:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs))
def resource(self, *urls, **kwargs):
"""Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`.
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
"""
def decorator(cls):
self.add_resource(cls, *urls, **kwargs)
return cls
return decorator
def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()
self.endpoints.add(endpoint)
resource_class_args = kwargs.pop('resource_class_args', ())
resource_class_kwargs = kwargs.pop('resource_class_kwargs', {})
# NOTE: 'view_functions' is cleaned up from Blueprint class in Flask 1.0
if endpoint in getattr(app, 'view_functions', {}):
previous_view_class = app.view_functions[endpoint].__dict__['view_class']
# if you override the endpoint with a different class, avoid the collision by raising an exception
if previous_view_class != resource:
raise ValueError('This endpoint (%s) is already set to the class %s.' % (endpoint, previous_view_class.__name__))
resource.mediatypes = self.mediatypes_method() # Hacky
resource.endpoint = endpoint
resource_func = self.output(resource.as_view(endpoint, *resource_class_args,
**resource_class_kwargs))
for decorator in self.decorators:
resource_func = decorator(resource_func)
for url in urls:
# If this Api has a blueprint
if self.blueprint:
# And this Api has been setup
if self.blueprint_setup:
# Set the rule to a string directly, as the blueprint is already
# set up.
self.blueprint_setup.add_url_rule(url, view_func=resource_func, **kwargs)
continue
else:
# Set the rule to a function that expects the blueprint prefix
# to construct the final url. Allows deferment of url finalization
# in the case that the associated Blueprint has not yet been
# registered to an application, so we can wait for the registration
# prefix
rule = partial(self._complete_url, url)
else:
# If we've got no Blueprint, just build a url with no prefix
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_url_rule(rule, view_func=resource_func, **kwargs)
def output(self, resource):
"""Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
resp = resource(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
data, code, headers = unpack(resp)
return self.make_response(data, code, headers=headers)
return wrapper
def url_for(self, resource, **values):
"""Generates a URL to the given resource.
Works like :func:`flask.url_for`."""
endpoint = resource.endpoint
if self.blueprint:
endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)
return url_for(endpoint, **values)
def make_response(self, data, *args, **kwargs):
"""Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
"""
default_mediatype = kwargs.pop('fallback_mediatype', None) or self.default_mediatype
mediatype = request.accept_mimetypes.best_match(
self.representations,
default=default_mediatype,
)
if mediatype is None:
raise NotAcceptable()
if mediatype in self.representations:
resp = self.representations[mediatype](data, *args, **kwargs)
resp.headers['Content-Type'] = mediatype
return resp
elif mediatype == 'text/plain':
resp = original_flask_make_response(str(data), *args, **kwargs)
resp.headers['Content-Type'] = 'text/plain'
return resp
else:
raise InternalServerError()
def mediatypes(self):
"""Returns a list of requested mediatypes sent in the Accept header"""
return [h for h, q in sorted(request.accept_mimetypes,
key=operator.itemgetter(1), reverse=True)]
def representation(self, mediatype):
"""Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
"""
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
def unauthorized(self, response):
""" Given a response, change it to ask for credentials """
if self.serve_challenge_on_401:
realm = current_app.config.get("HTTP_BASIC_AUTH_REALM", "flask-restful")
challenge = u"{0} realm=\"{1}\"".format("Basic", realm)
response.headers['WWW-Authenticate'] = challenge
return response
class Resource(MethodView):
"""
Represents an abstract RESTful resource. Concrete resources should
extend from this class and expose methods for each supported HTTP
method. If a resource is invoked with an unsupported HTTP method,
the API will return a response with status 405 Method Not Allowed.
Otherwise the appropriate method is called and passed all arguments
from the url rule used when adding the resource to an Api instance. See
:meth:`~flask_restful.Api.add_resource` for details.
"""
representations = None
method_decorators = []
def dispatch_request(self, *args, **kwargs):
# Taken from flask
#noinspection PyUnresolvedReferences
meth = getattr(self, request.method.lower(), None)
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
if isinstance(self.method_decorators, Mapping):
decorators = self.method_decorators.get(request.method.lower(), [])
else:
decorators = self.method_decorators
for decorator in decorators:
meth = decorator(meth)
resp = meth(*args, **kwargs)
if isinstance(resp, ResponseBase): # There may be a better way to test
return resp
representations = self.representations or OrderedDict()
#noinspection PyUnresolvedReferences
mediatype = request.accept_mimetypes.best_match(representations, default=None)
if mediatype in representations:
data, code, headers = unpack(resp)
resp = representations[mediatype](data, code, headers)
resp.headers['Content-Type'] = mediatype
return resp
return resp
def marshal(data, fields, envelope=None):
"""Takes raw data (in the form of a dict, list, object) and a dict of
fields to output and filters the data based on those fields.
:param data: the actual object(s) from which the fields are taken from
:param fields: a dict of whose keys will make up the final serialized
response output
:param envelope: optional key that will be used to envelop the serialized
response
>>> from flask_restful import fields, marshal
>>> data = { 'a': 100, 'b': 'foo' }
>>> mfields = { 'a': fields.Raw }
>>> marshal(data, mfields)
OrderedDict([('a', 100)])
>>> marshal(data, mfields, envelope='data')
OrderedDict([('data', OrderedDict([('a', 100)]))])
"""
def make(cls):
if isinstance(cls, type):
return cls()
return cls
if isinstance(data, (list, tuple)):
return (OrderedDict([(envelope, [marshal(d, fields) for d in data])])
if envelope else [marshal(d, fields) for d in data])
items = ((k, marshal(data, v) if isinstance(v, dict)
else make(v).output(k, data))
for k, v in fields.items())
return OrderedDict([(envelope, OrderedDict(items))]) if envelope else OrderedDict(items)
class marshal_with(object):
"""A decorator that apply marshalling to the return values of your methods.
>>> from flask_restful import fields, marshal_with
>>> mfields = { 'a': fields.Raw }
>>> @marshal_with(mfields)
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('a', 100)])
>>> @marshal_with(mfields, envelope='data')
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('data', OrderedDict([('a', 100)]))])
see :meth:`flask_restful.marshal`
"""
def __init__(self, fields, envelope=None):
"""
:param fields: a dict of whose keys will make up the final
serialized response output
:param envelope: optional key that will be used to envelop the serialized
response
"""
self.fields = fields
self.envelope = envelope
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return marshal(data, self.fields, self.envelope), code, headers
else:
return marshal(resp, self.fields, self.envelope)
return wrapper
class marshal_with_field(object):
"""
A decorator that formats the return values of your methods with a single field.
>>> from flask_restful import marshal_with_field, fields
>>> @marshal_with_field(fields.List(fields.Integer))
... def get():
... return ['1', 2, 3.0]
...
>>> get()
[1, 2, 3]
see :meth:`flask_restful.marshal_with`
"""
def __init__(self, field):
"""
:param field: a single field with which to marshal the output.
"""
if isinstance(field, type):
self.field = field()
else:
self.field = field
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return self.field.format(data), code, headers
return self.field.format(resp)
return wrapper
| bsd-3-clause | bd2573593fdc898cdec7dc2f811dee3d | 37.909218 | 129 | 0.613231 | 4.452453 | false | false | false | false |
mozilla/kitsune | kitsune/products/tests/__init__.py | 1 | 1578 | # -*- coding: utf-8 -*-
from django.template.defaultfilters import slugify
import factory
import factory.fuzzy
import factory.django
from kitsune.products.models import Product, Topic, Version
from kitsune.sumo.tests import FuzzyUnicode
class ProductFactory(factory.django.DjangoModelFactory):
class Meta:
model = Product
title = FuzzyUnicode()
slug = factory.LazyAttribute(lambda o: slugify(o.title))
description = FuzzyUnicode()
display_order = factory.fuzzy.FuzzyInteger(10)
visible = True
image = factory.django.ImageField()
image_offset = 0
image_cachebuster = FuzzyUnicode()
sprite_height = 100
class TopicFactory(factory.django.DjangoModelFactory):
class Meta:
model = Topic
title = FuzzyUnicode()
slug = factory.LazyAttribute(lambda o: slugify(o.title))
description = FuzzyUnicode()
image = factory.django.ImageField()
product = factory.SubFactory(ProductFactory)
display_order = factory.fuzzy.FuzzyInteger(10)
visible = True
in_aaq = factory.fuzzy.FuzzyChoice([True, False])
class VersionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Version
min_version = factory.fuzzy.FuzzyDecimal(100)
max_version = factory.LazyAttribute(lambda obj: obj.min_version + 1)
name = factory.LazyAttribute(lambda obj: "Version %d" % obj.min_version)
slug = factory.LazyAttribute(lambda obj: "v%d" % obj.min_version)
visible = True
default = factory.fuzzy.FuzzyChoice([False, True])
product = factory.SubFactory(ProductFactory)
| bsd-3-clause | e689ea708795d0424298bb2816d80bac | 29.346154 | 76 | 0.718631 | 3.839416 | false | false | false | false |
mozilla/kitsune | kitsune/questions/tests/test_feeds.py | 1 | 4558 | from datetime import datetime, timedelta
from django.core.cache import cache
from pyquery import PyQuery as pq
from kitsune.products.tests import ProductFactory, TopicFactory
from kitsune.questions.feeds import QuestionsFeed, TaggedQuestionsFeed
from kitsune.questions.models import Question
from kitsune.questions.tests import QuestionFactory, TestCaseBase
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.urlresolvers import reverse
from kitsune.tags.tests import TagFactory
from kitsune.users.tests import UserFactory
class ForumTestFeeds(TestCaseBase):
def test_tagged_feed(self):
"""Test the tagged feed."""
t = TagFactory(name="green", slug="green")
q = QuestionFactory()
q.tags.add("green")
items = TaggedQuestionsFeed().items(t)
self.assertEqual(1, len(items))
self.assertEqual(q.id, items[0].id)
cache.clear()
q = QuestionFactory()
q.tags.add("green")
q.updated = datetime.now() + timedelta(days=1)
q.save()
items = TaggedQuestionsFeed().items(t)
self.assertEqual(2, len(items))
self.assertEqual(q.id, items[0].id)
def test_tagged_feed_link(self):
"""Make sure the tagged feed is discoverable on the questions page."""
TagFactory(name="green", slug="green")
url = urlparams(reverse("questions.list", args=["all"]), tagged="green")
response = self.client.get(url)
self.assertEqual(200, response.status_code)
doc = pq(response.content)
feed_links = doc('link[type="application/atom+xml"]')
self.assertEqual(2, len(feed_links))
self.assertEqual("Recently updated questions", feed_links[0].attrib["title"])
self.assertEqual("/en-US/questions/feed?product=all", feed_links[0].attrib["href"])
self.assertEqual("Recently updated questions tagged green", feed_links[1].attrib["title"])
self.assertEqual("/en-US/questions/tagged/green/feed", feed_links[1].attrib["href"])
def test_no_inactive_users(self):
"""Ensure that inactive users' questions don't appear in the feed."""
u = UserFactory(is_active=False)
q = Question(title="Test Question", content="Lorem Ipsum Dolor", creator_id=u.id)
q.save()
assert q.id not in [x.id for x in QuestionsFeed().items({})]
def test_question_feed_with_product(self):
"""Test that questions feeds with products work."""
p = ProductFactory()
url = reverse("questions.list", args=[p.slug])
res = self.client.get(url)
self.assertEqual(200, res.status_code)
doc = pq(res.content)
feed_links = doc('link[type="application/atom+xml"]')
feed = feed_links[0]
self.assertEqual(1, len(feed_links))
self.assertEqual("Recently updated questions", feed.attrib["title"])
self.assertEqual("/en-US/questions/feed?product=" + p.slug, feed.attrib["href"])
self.assertEqual(200, self.client.get(feed.attrib["href"]).status_code)
def test_question_feed_with_product_and_topic(self):
"""Test that questions feeds with products and topics work."""
p = ProductFactory()
t = TopicFactory(product=p)
url = urlparams(reverse("questions.list", args=[p.slug]), topic=t.slug)
res = self.client.get(url)
self.assertEqual(200, res.status_code)
doc = pq(res.content)
feed_links = doc('link[type="application/atom+xml"]')
feed = feed_links[0]
self.assertEqual(1, len(feed_links))
self.assertEqual("Recently updated questions", feed.attrib["title"])
self.assertEqual(
urlparams("/en-US/questions/feed", product=p.slug, topic=t.slug), feed.attrib["href"]
)
self.assertEqual(200, self.client.get(feed.attrib["href"]).status_code)
def test_question_feed_with_locale(self):
"""Test that questions feeds with products and topics work."""
url = reverse("questions.list", args=["all"], locale="pt-BR")
res = self.client.get(url)
self.assertEqual(200, res.status_code)
doc = pq(res.content)
feed_links = doc('link[type="application/atom+xml"]')
feed = feed_links[0]
self.assertEqual(1, len(feed_links))
self.assertEqual("Recently updated questions", feed.attrib["title"])
self.assertEqual(urlparams("/pt-BR/questions/feed?product=all"), feed.attrib["href"])
self.assertEqual(200, self.client.get(feed.attrib["href"]).status_code)
| bsd-3-clause | f43f9dc1a9ec7785e8abf33a609df575 | 43.252427 | 98 | 0.654892 | 3.760726 | false | true | false | false |
mozilla/kitsune | kitsune/wiki/events.py | 1 | 14820 | import difflib
import logging
from bleach import clean
from django.conf import settings
from django.contrib.sites.models import Site
from django.urls import reverse as django_reverse
from django.utils.translation import gettext_lazy as _lazy
from django.utils.translation import ugettext as _
from wikimarkup.parser import ALLOWED_ATTRIBUTES, ALLOWED_TAGS
from kitsune.sumo import email_utils
from kitsune.sumo.templatetags.jinja_helpers import add_utm
from kitsune.sumo.urlresolvers import reverse
from kitsune.tidings.events import Event, EventUnion, InstanceEvent
from kitsune.tidings.utils import hash_to_unsigned
from kitsune.wiki.models import Document
log = logging.getLogger("k.wiki.events")
def get_diff_for(doc, old_rev, new_rev):
fromfile = "[%s] %s #%s" % (doc.locale, doc.title, old_rev.id)
tofile = "[%s] %s #%s" % (doc.locale, doc.title, new_rev.id)
# Get diff
diff_parts = difflib.unified_diff(
old_rev.content.splitlines(1),
new_rev.content.splitlines(1),
fromfile=fromfile,
tofile=tofile,
)
# Join diff parts
# XXX this is super goofy
acc = ""
for d in diff_parts:
if isinstance(d, str):
acc = acc + d
else:
acc = acc + d.decode("utf8")
# Clean output
return clean(acc, ALLOWED_TAGS, ALLOWED_ATTRIBUTES)
def context_dict(revision, ready_for_l10n=False, revision_approved=False):
"""Return a dict that fills in the blanks in KB notification templates."""
diff = ""
l10n = revision.document.revisions.filter(is_ready_for_localization=True)
approved = revision.document.revisions.filter(is_approved=True)
if ready_for_l10n and l10n.count() > 1:
old_rev = l10n.order_by("-created")[1]
diff = get_diff_for(revision.document, old_rev, revision)
elif revision_approved and approved.count() > 1:
old_rev = approved.order_by("-created")[1]
diff = get_diff_for(revision.document, old_rev, revision)
elif revision.document.current_revision is not None:
old_rev = revision.document.current_revision
diff = get_diff_for(revision.document, old_rev, revision)
return {
"document_title": revision.document.title,
"creator": revision.creator,
"host": Site.objects.get_current().domain,
"diff": diff,
"summary": clean(revision.summary, ALLOWED_TAGS, ALLOWED_ATTRIBUTES),
"fulltext": clean(revision.content, ALLOWED_TAGS, ALLOWED_ATTRIBUTES),
}
class EditDocumentEvent(InstanceEvent):
"""Event fired when a certain document is edited"""
event_type = "wiki edit document"
content_type = Document
def __init__(self, revision):
super(EditDocumentEvent, self).__init__(revision.document)
self.revision = revision
def _mails(self, users_and_watches):
revision = self.revision
document = revision.document
log.debug("Sending edited notification email for document (id=%s)" % document.id)
subject = _lazy("{title} was edited by {creator}")
url = reverse("wiki.document_revisions", locale=document.locale, args=[document.slug])
context = context_dict(revision)
context["revisions_url"] = add_utm(url, "wiki-edit")
context["locale"] = document.locale
context["title"] = document.title
context["creator"] = revision.creator
context["comment"] = revision.comment
return email_utils.emails_with_users_and_watches(
subject=subject,
text_template="wiki/email/edited.ltxt",
html_template="wiki/email/edited.html",
context_vars=context,
users_and_watches=users_and_watches,
default_locale=document.locale,
)
def serialize(self):
"""
Serialize this event into a JSON-friendly dictionary.
"""
return {
"event": {"module": "kitsune.wiki.events", "class": "EditDocumentEvent"},
"instance": {
"module": "kitsune.wiki.models",
"class": "Revision",
"id": self.revision.id,
},
}
class _RevisionConstructor(object):
"""An event that receives a revision when constructed"""
def __init__(self, revision):
super(_RevisionConstructor, self).__init__()
self.revision = revision
class _BaseProductFilter(object):
"""A base class for product filters.
It adds a _filter_by_product method that filters down a list of
(user, watches) to only the users watching the products for the
revision.
"""
def _filter_by_product(self, all_watchers):
products = self.revision.document.get_products()
product_hashes = [hash_to_unsigned(s.slug) for s in products]
watchers_and_watches = []
# Weed out the users that have a product filter that isn't one of the
# document's products.
for user, watches in all_watchers:
for watch in watches:
# Get the product filters for the watch, if any.
prods = watch.filters.filter(name="product").values_list("value", flat=True)
# If there are no product filters, they are watching them all.
if len(prods) == 0:
watchers_and_watches.append((user, watches))
break
# Otherwise, check if they are watching any of the document's
# products.
for prod in prods:
if prod in product_hashes:
watchers_and_watches.append((user, watches))
break
return watchers_and_watches
class _ProductFilter(_BaseProductFilter):
"""An event that receives a revision when constructed and filters according
to that revision's document's products"""
filters = {"product"}
# notify(), stop_notifying(), and is_notifying() take...
# (user_or_email, product=optional_product)
def _users_watching(self, **kwargs):
# Get the users watching any or all products.
users = list(self._users_watching_by_filter(**kwargs))
# Weed out the users that have a product filter that isn't one of the
# document's products.
return self._filter_by_product(users)
class _LocaleAndProductFilter(_BaseProductFilter):
"""An event that receives a revision when constructed and filters according
to that revision's document's locale and products."""
filters = {"locale", "product"}
# notify(), stop_notifying(), and is_notifying() take...
# (user_or_email, locale=some_locale, product=optional_product)
def _users_watching(self, **kwargs):
locale = self.revision.document.locale
# Get the users just subscribed to the locale (any and all products).
users = list(self._users_watching_by_filter(locale=locale, **kwargs))
# Weed out the users that have a product filter that isn't one of the
# document's products.
return self._filter_by_product(users)
class ReviewableRevisionInLocaleEvent(_RevisionConstructor, _LocaleAndProductFilter, Event):
"""Event fired when any revision in a certain locale is ready for review"""
# Our event_type suffices to limit our scope, so we don't bother
# setting content_type.
event_type = "reviewable wiki in locale"
def _mails(self, users_and_watches):
revision = self.revision
document = revision.document
log.debug("Sending ready for review email for revision (id=%s)" % revision.id)
subject = _lazy("{title} is ready for review ({creator})")
url = reverse(
"wiki.review_revision",
locale=document.locale,
args=[document.slug, revision.id],
)
context = context_dict(revision)
context["revision_url"] = add_utm(url, "wiki-ready-review")
context["locale"] = document.locale
context["title"] = document.title
context["creator"] = revision.creator
context["comment"] = revision.comment
users = []
for u, w in users_and_watches:
if document.allows(u, "review_revision"):
users.append((u, w))
return email_utils.emails_with_users_and_watches(
subject=subject,
text_template="wiki/email/ready_for_review.ltxt",
html_template="wiki/email/ready_for_review.html",
context_vars=context,
users_and_watches=users,
default_locale=document.locale,
)
def serialize(self):
"""
Serialize this event into a JSON-friendly dictionary.
"""
return {
"event": {"module": "kitsune.wiki.events", "class": "ReviewableRevisionInLocaleEvent"},
"instance": {
"module": "kitsune.wiki.models",
"class": "Revision",
"id": self.revision.id,
},
}
class ReadyRevisionEvent(_RevisionConstructor, _ProductFilter, Event):
"""Event fired when a revision becomes ready for l10n."""
event_type = "ready wiki"
def _mails(self, users_and_watches):
"""Send readiness mails."""
revision = self.revision
document = revision.document
log.debug("Sending ready notifications for revision (id=%s)" % revision.id)
subject = _lazy("{title} has a revision ready for localization")
url = django_reverse("wiki.translate", args=[document.slug])
context = context_dict(revision, ready_for_l10n=True)
context["l10n_url"] = add_utm(url, "wiki-ready-l10n")
context["title"] = document.title
return email_utils.emails_with_users_and_watches(
subject=subject,
text_template="wiki/email/ready_for_l10n.ltxt",
html_template="wiki/email/ready_for_l10n.html",
context_vars=context,
users_and_watches=users_and_watches,
default_locale=document.locale,
)
def serialize(self):
"""
Serialize this event into a JSON-friendly dictionary.
"""
return {
"event": {"module": "kitsune.wiki.events", "class": "ReadyRevisionEvent"},
"instance": {
"module": "kitsune.wiki.models",
"class": "Revision",
"id": self.revision.id,
},
}
class ApproveRevisionInLocaleEvent(_RevisionConstructor, _LocaleAndProductFilter, Event):
"""Event fed to a union when any revision in a certain locale is approved
Not intended to be fired individually
"""
# No other content types have a concept of approval, so we don't bother
# setting content_type.
event_type = "approved wiki in locale"
def serialize(self):
"""
Serialize this event into a JSON-friendly dictionary.
"""
return {
"event": {"module": "kitsune.wiki.events", "class": "ApproveRevisionInLocaleEvent"},
"instance": {
"module": "kitsune.wiki.models",
"class": "Revision",
"id": self.revision.id,
},
}
class ApprovedOrReadyUnion(EventUnion):
"""
Event union fired when a revision is approved and also possibly ready for localization.
"""
def __init__(self, revision):
self.revision = revision
events = [ApproveRevisionInLocaleEvent(revision)]
if revision.is_ready_for_localization:
events.append(ReadyRevisionEvent(revision))
super(ApprovedOrReadyUnion, self).__init__(*events)
def _mails(self, users_and_watches):
"""Send approval or readiness mails, as appropriate.
If a given user is watching the Ready event and the revision
is in fact ready, say so. Otherwise, just send the Approval
email.
"""
revision = self.revision
document = revision.document
is_ready = revision.is_ready_for_localization
log.debug("Sending approved/ready notifications for revision (id=%s)" % revision.id)
# Localize the subject and message with the appropriate
# context. If there is an error, fall back to English.
@email_utils.safe_translation
def _make_mail(locale, user, watches):
if is_ready and ReadyRevisionEvent.event_type in (w.event_type for w in watches):
c = context_dict(revision, ready_for_l10n=True)
# TODO: Expose all watches
c["watch"] = watches[0]
url = reverse("wiki.translate", args=[document.slug], locale=locale)
c["l10n_url"] = add_utm(url, "wiki-ready-l10n")
subject = _("{title} has a revision ready for localization")
text_template = "wiki/email/ready_for_l10n.ltxt"
html_template = "wiki/email/ready_for_l10n.html"
else:
c = context_dict(revision, revision_approved=True)
approved_url = reverse(
"wiki.document", locale=document.locale, args=[document.slug]
)
c["document_url"] = add_utm(approved_url, "wiki-approved")
# TODO: Expose all watches.
c["watch"] = watches[0]
c["reviewer"] = revision.reviewer
subject = _("{title} ({locale}) has a new approved revision ({reviewer})")
text_template = "wiki/email/approved.ltxt"
html_template = "wiki/email/approved.html"
subject = subject.format(
title=document.title,
reviewer=revision.reviewer.username,
locale=document.locale,
)
mail = email_utils.make_mail(
subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=c,
from_email=settings.TIDINGS_FROM_ADDRESS,
to_email=user.email,
)
return mail
for user, watches in users_and_watches:
# Figure out the locale to use for l10n.
if hasattr(user, "profile"):
locale = user.profile.locale
else:
locale = document.locale
yield _make_mail(locale, user, watches)
def serialize(self):
"""
Serialize this event into a JSON-friendly dictionary.
"""
return {
"event": {"module": "kitsune.wiki.events", "class": "ApprovedOrReadyUnion"},
"instance": {
"module": "kitsune.wiki.models",
"class": "Revision",
"id": self.revision.id,
},
}
| bsd-3-clause | 3f8313d49d76c77452aabb0057bfb80d | 34.625 | 99 | 0.603509 | 4.109817 | false | false | false | false |
mozilla/kitsune | kitsune/flagit/migrations/0001_initial.py | 1 | 2059 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlaggedObject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('status', models.IntegerField(default=0, db_index=True, choices=[(0, 'Pending'), (1, 'Accepted and Fixed'), (2, 'Rejected')])),
('reason', models.CharField(max_length=64, choices=[(b'spam', 'Spam or other unrelated content'), (b'language', 'Inappropriate language/dialog'), (b'bug_support', 'Misplaced bug report or support request'), (b'abuse', 'Abusive content'), (b'other', 'Other (please specify)')])),
('notes', models.TextField(default=b'', blank=True)),
('created', models.DateTimeField(default=datetime.datetime.now, db_index=True)),
('handled', models.DateTimeField(default=datetime.datetime.now, db_index=True)),
('content_type', models.ForeignKey(on_delete=models.CASCADE, to='contenttypes.ContentType')),
('creator', models.ForeignKey(on_delete=models.CASCADE, related_name='flags', to=settings.AUTH_USER_MODEL)),
('handled_by', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['created'],
'permissions': (('can_moderate', 'Can moderate flagged objects'),),
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='flaggedobject',
unique_together={('content_type', 'object_id', 'creator')},
),
]
| bsd-3-clause | 25a2e83ce9909f6e571886bcaefc21f9 | 49.219512 | 294 | 0.603691 | 4.210634 | false | false | false | false |
mozilla/kitsune | kitsune/kpi/api.py | 1 | 15594 | from collections import defaultdict
from datetime import date, timedelta
from operator import itemgetter
from django.conf import settings
from django.core.cache import cache
from django.db import connections, router
from django.db.models import Count, F
import django_filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters, serializers, viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from kitsune.kpi.models import (
Cohort,
Metric,
MetricKind,
RetentionMetric,
KB_ENUS_CONTRIBUTORS_METRIC_CODE,
KB_L10N_CONTRIBUTORS_METRIC_CODE,
L10N_METRIC_CODE,
SUPPORT_FORUM_CONTRIBUTORS_METRIC_CODE,
VISITORS_METRIC_CODE,
EXIT_SURVEY_YES_CODE,
EXIT_SURVEY_NO_CODE,
EXIT_SURVEY_DONT_KNOW_CODE,
)
from kitsune.questions.models import Question, Answer, AnswerVote
from kitsune.wiki.models import HelpfulVote
from functools import reduce
class CachedAPIView(APIView):
"""An APIView that caches the objects to be returned.
Subclasses must implement the get_objects() method.
"""
def _cache_key(self, request):
params = []
for key, value in list(request.GET.items()):
params.append("%s=%s" % (key, value))
return "{viewname}:{params}".format(
viewname=self.__class__.__name__, params=":".join(sorted(params))
)
def get(self, request):
cache_key = self._cache_key(request)
objs = cache.get(cache_key)
if objs is None:
objs = self.get_objects(request)
cache.add(cache_key, objs, settings.CACHE_MEDIUM_TIMEOUT)
return Response({"objects": objs})
def get_objects(self, request):
"""Returns a list of dicts the API view will return."""
raise NotImplementedError("Must be overriden in subclass")
class SearchClickthroughMetricList(CachedAPIView):
"""The API list view for search click-through rate metrics."""
engine = "elastic"
@property
def searches_kind(self):
return "search clickthroughs:%s:searches" % self.engine
@property
def clicks_kind(self):
return "search clickthroughs:%s:clicks" % self.engine
def get_objects(self, request):
"""Return all the ratios.
Or, if a ``min_start`` query param is present, return the (potentially
limited) ratios later than or equal to that. ``min_start`` should be
something like ``2001-07-30``.
If, somehow, half a ratio is missing, that ratio is not returned.
"""
# Get min_start from query string and validate it.
min_start = request.GET.get("min_start")
if min_start:
try:
_parse_date(min_start)
except (ValueError, TypeError):
min_start = None
# I'm not sure you can join a table to itself with the ORM.
cursor = _cursor()
# n for numerator, d for denominator
query = """
SELECT n.start, n.value, d.value
FROM kpi_metric n
INNER JOIN kpi_metric d ON n.start=d.start
WHERE n.kind_id=(SELECT id FROM kpi_metrickind WHERE code=%s)
AND d.kind_id=(SELECT id FROM kpi_metrickind WHERE code=%s)
""" + (
"AND n.start>=%s" if min_start else ""
)
args = [self.clicks_kind, self.searches_kind]
if min_start:
args.append(min_start)
cursor.execute(query, args)
return [dict(start=s, clicks=n, searches=d) for s, n, d in reversed(cursor.fetchall())]
class QuestionsMetricList(CachedAPIView):
"""The API list view for support forum metrics.
* Number of questions asked
* Number of questions responded to within 24 hours
* Number of questions responded to within 72 hours
* Number of questions solved
"""
def get_objects(self, request):
# Set up the queries for the data we need
locale = request.GET.get("locale")
product = request.GET.get("product")
# Set up the query for the data we need.
qs = _daily_qs_for(Question)
# Don't count locked questions
qs = qs.exclude(is_locked=True)
# Don't count spam questions
qs = qs.exclude(is_spam=True)
if locale:
qs = qs.filter(locale=locale)
if product:
qs = qs.filter(product__slug=product)
# All answers that were created within 3 days of the question.
aq_72 = Answer.objects.filter(created__lt=F("question__created") + timedelta(days=3))
# Questions of said answers.
rs_72 = qs.filter(id__in=aq_72.values_list("question"))
# All answers that were created within 24 hours of the question.
aq_24 = Answer.objects.filter(created__lt=F("question__created") + timedelta(hours=24))
# Questions of said answers.
rs_24 = qs.filter(id__in=aq_24.values_list("question"))
# Questions with a solution.
qs_with_solutions = qs.exclude(solution_id=None)
return merge_results(
questions=qs, solved=qs_with_solutions, responded_72=rs_72, responded_24=rs_24
)
class VoteMetricList(CachedAPIView):
"""The API list view for vote metrics."""
def get_objects(self, request):
# Set up the queries for the data we need
qs_kb_votes = _qs_for(HelpfulVote)
qs_ans_votes = _qs_for(AnswerVote)
# Filter on helpful
qs_kb_helpful_votes = qs_kb_votes.filter(helpful=True)
qs_ans_helpful_votes = qs_ans_votes.filter(helpful=True)
return merge_results(
kb_votes=qs_kb_votes,
kb_helpful=qs_kb_helpful_votes,
ans_votes=qs_ans_votes,
ans_helpful=qs_ans_helpful_votes,
)
class KBVoteMetricList(CachedAPIView):
"""The API list view for KB vote metrics."""
def get_objects(self, request):
# Set up the queries for the data we need
locale = request.GET.get("locale")
product = request.GET.get("product")
qs_kb_votes = HelpfulVote.objects.filter(created__gte=date(2011, 1, 1))
if locale:
qs_kb_votes = qs_kb_votes.filter(revision__document__locale=locale)
if product and product != "null":
qs_kb_votes = qs_kb_votes.filter(revision__document__products__slug=product) # WHOA
qs_kb_votes = (
qs_kb_votes.extra(
select={
"day": "extract( day from wiki_helpfulvote.created )",
"month": "extract( month from wiki_helpfulvote.created )",
"year": "extract( year from wiki_helpfulvote.created )",
}
)
.values("year", "month", "day")
.annotate(count=Count("created"))
)
# Filter on helpful
qs_kb_helpful_votes = qs_kb_votes.filter(helpful=True)
return merge_results(kb_votes=qs_kb_votes, kb_helpful=qs_kb_helpful_votes)
class ContributorsMetricList(CachedAPIView):
"""The API list view for active contributor metrics.
* en-US KB contributors
* non-en-US contributors
* Support Forum contributors
"""
def get_objects(self, request):
# Set up the queries for the data we need
kind = MetricKind.objects.get(code=KB_ENUS_CONTRIBUTORS_METRIC_CODE)
en_us = Metric.objects.filter(kind=kind).order_by("-start")
kind = MetricKind.objects.get(code=KB_L10N_CONTRIBUTORS_METRIC_CODE)
l10n = Metric.objects.filter(kind=kind).order_by("-start")
kind = MetricKind.objects.get(code=SUPPORT_FORUM_CONTRIBUTORS_METRIC_CODE)
answers = Metric.objects.filter(kind=kind).order_by("-start")
# Put all the results in a dict with the date as the key.
results_dict = {}
def merge_results(metrics_qs, label):
for metric in metrics_qs:
results_dict.setdefault(metric.end, {})[label] = metric.value
merge_results(en_us, "en_us")
merge_results(l10n, "non_en_us")
merge_results(answers, "support_forum")
# Convert that to a list of dicts.
results_list = [dict(date=k, **v) for k, v in list(results_dict.items())]
return [dict(**x) for x in sorted(results_list, key=itemgetter("date"), reverse=True)]
class VisitorsMetricList(CachedAPIView):
"""The API list view for visitor metrics."""
def get_objects(self, request):
# Set up the query for the data we need
kind = MetricKind.objects.get(code=VISITORS_METRIC_CODE)
qs = Metric.objects.filter(kind=kind).order_by("-start")
return [dict(date=m.start, visitors=m.value) for m in qs]
class L10nCoverageMetricList(CachedAPIView):
"""The API list view for L10n coverage metrics."""
def get_objects(self, request):
# Set up the query for the data we need
kind = MetricKind.objects.get(code=L10N_METRIC_CODE)
qs = Metric.objects.filter(kind=kind).order_by("-start")
return [dict(date=m.start, coverage=m.value) for m in qs]
class ExitSurveyMetricList(CachedAPIView):
"""The API list view for exit survey metrics."""
def get_objects(self, request):
# Set up the queries for the data we need
kind = MetricKind.objects.get(code=EXIT_SURVEY_YES_CODE)
yes = Metric.objects.filter(kind=kind).order_by("-start")
kind = MetricKind.objects.get(code=EXIT_SURVEY_NO_CODE)
no = Metric.objects.filter(kind=kind).order_by("-start")
kind = MetricKind.objects.get(code=EXIT_SURVEY_DONT_KNOW_CODE)
dont_know = Metric.objects.filter(kind=kind).order_by("-start")
# Put all the results in a dict with the date as the key.
results_dict = {}
def merge_results(metrics_qs, label):
for metric in metrics_qs:
results_dict.setdefault(metric.end, {})[label] = metric.value
merge_results(yes, "yes")
merge_results(no, "no")
merge_results(dont_know, "dont_know")
# Convert that to a list of dicts.
results_list = [dict(date=k, **v) for k, v in list(results_dict.items())]
return [dict(**x) for x in sorted(results_list, key=itemgetter("date"), reverse=True)]
class CSATMetricList(CachedAPIView):
"""The API list view for contributor CSAT metrics"""
code = None
def get_objects(self, request):
kind = MetricKind.objects.get(code=self.code)
since = date.today() - timedelta(days=30)
metrics = Metric.objects.filter(start__gte=since, kind=kind).order_by("-start")
return [{"date": m.start, "csat": m.value} for m in metrics]
def _daily_qs_for(model_cls):
"""Return the daily grouped queryset we need for model_cls."""
# Limit to newer than 2011/1/1 and active creators.
return (
model_cls.objects.filter(created__gte=date(2011, 1, 1), creator__is_active=1)
.extra(
select={
"day": "extract( day from created )",
"month": "extract( month from created )",
"year": "extract( year from created )",
}
)
.values("year", "month", "day")
.annotate(count=Count("created"))
)
def _qs_for(model_cls):
"""Return the monthly grouped queryset we need for model_cls."""
return (
model_cls.objects.filter(created__gte=date(2011, 1, 1))
.extra(
select={
"day": "extract( day from created )",
"month": "extract( month from created )",
"year": "extract( year from created )",
}
)
.values("year", "month", "day")
.annotate(count=Count("created"))
)
def _start_date():
"""The date from which we start querying monthly data."""
# Lets start on the first day of the month a year ago
year_ago = date.today() - timedelta(days=365)
return date(year_ago.year, year_ago.month, 1)
def _remap_date_counts(**kwargs):
"""Remap the query result.
kwargs = {
<label>=[
{'count': 45, 'month': 2L, 'year': 2010L},
{'count': 6, 'month': 2L, 'year': 2010L}, # Note duplicate date
{'count': 12, 'month': 1L, 'year': 2010L},
{'count': 1, 'month': 12L, 'year': 2009L},
...
],
<label>=[{...},...],
}
returns [
{
datetime.date(2009, 12, 1): {'<label>': 1},
datetime.date(2010, 1, 1): {'<label>': 12},
datetime.date(2010, 2, 1): {'<label>': 51} # Note summed counts
...
},
...]
"""
for label, qs in kwargs.items():
res = defaultdict(lambda: {label: 0})
# For each date mentioned in qs, sum up the counts for that day
# Note: days may be duplicated
for x in qs:
key = date(x["year"], x["month"], x.get("day", 1))
res[key][label] += x["count"]
yield res
def merge_results(**kwargs):
res_dict = reduce(_merge_results, _remap_date_counts(**kwargs))
res_list = [dict(date=k, **v) for k, v in list(res_dict.items())]
return [dict(**x) for x in sorted(res_list, key=itemgetter("date"), reverse=True)]
def _merge_results(x, y):
"""Merge query results arrays into one array.
From:
[{"date": "2011-10-01", "votes": 3},...]
and
[{"date": "2011-10-01", "helpful": 7},...]
To:
[{"date": "2011-10-01", "votes": 3, "helpful": 7},...]
"""
return dict(
(s, dict(list(x.get(s, {}).items()) + list(y.get(s, {}).items())))
for s in set(list(x.keys()) + list(y.keys()))
)
def _cursor():
"""Return a DB cursor for reading."""
return connections[router.db_for_read(Metric)].cursor()
def _parse_date(text):
"""Parse a text date like ``"2004-08-30`` into a triple of numbers.
May fling ValueErrors or TypeErrors around if the input or date is invalid.
It should at least be a string--I mean, come on.
"""
return tuple(int(i) for i in text.split("-"))
class RetentionMetricSerializer(serializers.ModelSerializer):
start = serializers.DateField()
end = serializers.DateField()
size = serializers.IntegerField()
class Meta:
model = RetentionMetric
fields = (
"start",
"end",
"size",
)
class CohortSerializer(serializers.ModelSerializer):
kind = serializers.SlugRelatedField(slug_field="code", read_only=True)
start = serializers.DateField()
end = serializers.DateField()
size = serializers.IntegerField()
retention_metrics = RetentionMetricSerializer(many=True)
class Meta:
model = Cohort
fields = (
"kind",
"start",
"end",
"size",
"retention_metrics",
)
class CohortFilter(django_filters.FilterSet):
kind = django_filters.CharFilter(field_name="kind__code")
start = django_filters.DateFilter(lookup_expr="gte")
end = django_filters.DateFilter(lookup_expr="lte")
class Meta:
model = Cohort
fields = (
"kind",
"start",
"end",
)
class CohortViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Cohort.objects.all()
serializer_class = CohortSerializer
filterset_class = CohortFilter
filter_backends = [
DjangoFilterBackend,
filters.OrderingFilter,
]
ordering_fields = [
"start",
]
ordering = ("start",)
| bsd-3-clause | 9e92692610072995c03e8452a373cd62 | 31.352697 | 96 | 0.602796 | 3.727055 | false | false | false | false |
mozilla/kitsune | kitsune/wiki/badges.py | 1 | 1631 | from django.conf import settings
from django.db.models.signals import post_save
from kitsune.wiki.models import Revision
# Yo ******! These are year-agnostic badge templates which code uses
# to get-or-create the actual Badge instances. These strings should
# not be l10n-ized here--the badge title and description strings get
# l10n-ized elsewhere. Peace!
WIKI_BADGES = {
"kb-badge": {
"slug": "{year}-kb-badge",
"title": "{year} KB Badge",
"description": "This badge is awarded to contributors with 10 "
"approved English edits during {year}.",
},
"l10n-badge": {
"slug": "{year}-l10n-badge",
"title": "{year} L10n Badge",
"description": "This badge is awarded to contributors with 10 "
"approved translations edits during {year}.",
},
}
def on_revision_save(sender, instance, **kwargs):
"""Handle the revision save signal.
* We award the KB badge on 10 approved en-US edits.
* We award the L10n badge on 10 approved translation edits.
"""
rev = instance
year = rev.created.year
creator = rev.creator
# We only care about approved revisions.
if not rev.is_approved:
return
# The badge to be awarded depends on the locale.
if rev.document.locale == settings.WIKI_DEFAULT_LANGUAGE:
badge_template = WIKI_BADGES["kb-badge"]
else:
badge_template = WIKI_BADGES["l10n-badge"]
from kitsune.wiki.tasks import maybe_award_badge
maybe_award_badge.delay(badge_template, year, creator.id)
def register_signals():
post_save.connect(on_revision_save, sender=Revision)
| bsd-3-clause | 9aea6f577e81d544c8b1bf6a4ae474d1 | 30.365385 | 71 | 0.664623 | 3.568928 | false | false | false | false |
web2py/pydal | pydal/dialects/teradata.py | 1 | 3400 | from .._compat import basestring
from ..adapters.teradata import Teradata
from . import dialects, sqltype_for
from .base import SQLDialect
@dialects.register_for(Teradata)
class TeradataDialect(SQLDialect):
@sqltype_for("integer")
def type_integer(self):
return "INT"
@sqltype_for("text")
def type_text(self):
return "VARCHAR(2000)"
@sqltype_for("json")
def type_json(self):
return "VARCHAR(4000)"
@sqltype_for("float")
def type_float(self):
return "REAL"
@sqltype_for("list:integer")
def type_list_integer(self):
return self.types["json"]
@sqltype_for("list:string")
def type_list_string(self):
return self.types["json"]
@sqltype_for("list:reference")
def type_list_reference(self):
return self.types["json"]
@sqltype_for("bigint")
def type_bigint(self):
return "BIGINT"
@sqltype_for("id")
def type_id(self):
return "INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL"
@sqltype_for("big-id")
def type_big_id(self):
return "BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL"
@sqltype_for("reference")
def type_reference(self):
return "INT"
@sqltype_for("big-reference")
def type_big_reference(self):
return "BIGINT"
@sqltype_for("geometry")
def type_geometry(self):
return "ST_GEOMETRY"
@sqltype_for("reference FK")
def type_reference_fk(self):
return " REFERENCES %(foreign_key)s "
@sqltype_for("reference TFK")
def type_reference_tfk(self):
return (
" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s"
+ " (%(foreign_key)s)"
)
def left_join(self, val, query_env={}):
# Left join must always have an ON clause
if not isinstance(val, basestring):
val = self.expand(val, query_env=query_env)
return "LEFT OUTER JOIN %s" % val
def select(
self,
fields,
tables,
where=None,
groupby=None,
having=None,
orderby=None,
limitby=None,
distinct=False,
for_update=False,
with_cte=None,
):
dst, whr, grp, order, limit, offset, upd = "", "", "", "", "", "", ""
if distinct is True:
dst = " DISTINCT"
elif distinct:
dst = " DISTINCT ON (%s)" % distinct
if where:
whr = " %s" % self.where(where)
if groupby:
grp = " GROUP BY %s" % groupby
if having:
grp += " HAVING %s" % having
if orderby:
order = " ORDER BY %s" % orderby
if limitby:
(lmin, lmax) = limitby
limit = " TOP %i" % lmax
if for_update:
upd = " FOR UPDATE"
if with_cte:
recursive, cte = with_cte
recursive = " RECURSIVE" if recursive else ""
with_cte = "WITH%s %s " % (recursive, cte)
else:
with_cte = ""
return "%sSELECT%s%s %s FROM %s%s%s%s%s%s;" % (
with_cte,
dst,
limit,
fields,
tables,
whr,
grp,
order,
offset,
upd,
)
def truncate(self, table, mode=""):
return ["DELETE FROM %s ALL;" % table._rname]
| bsd-3-clause | 3620446674fbcbd161b1dde5e3f07445 | 24.954198 | 77 | 0.531471 | 3.667745 | false | false | false | false |
web2py/pydal | pydal/tools/tags.py | 1 | 1946 | import datetime
import functools
from .. import Field
from ..validators import *
class Tags:
def __init__(self, table, name="default"):
self.table = table
db = table._db
self.tag_table = db.define_table(
table._tablename + "_tag_" + name,
Field("tagpath"),
Field("record_id", table),
)
db.commit()
def get(self, record_id):
tag_table = self.tag_table
db = tag_table._db
rows = db(tag_table.record_id == record_id).select(tag_table.tagpath)
return [row.tagpath.strip("/") for row in rows]
def add(self, record_id, tags):
tag_table = self.tag_table
db = tag_table._db
if not isinstance(tags, list):
tags = [tags]
for tag in tags:
path = "/%s/" % tag.strip("/")
if not db(
(tag_table.record_id == record_id) & (tag_table.tagpath == path)
).count():
tag_table.insert(record_id=record_id, tagpath=path)
def remove(self, record_id, tags):
tag_table = self.tag_table
db = tag_table._db
if not isinstance(tags, list):
tags = [tags]
paths = ["/%s/" % tag.strip("/") for tag in tags]
db(
(tag_table.record_id == record_id) & (tag_table.tagpath.belongs(paths))
).delete()
def find(self, tags, mode="and"):
table = self.table
tag_table = self.tag_table
db = tag_table._db
queries = []
if not isinstance(tags, list):
tags = [tags]
for tag in tags:
path = "/%s/" % tag.strip("/")
subquery = db(tag_table.tagpath.startswith(path))._select(
tag_table.record_id
)
queries.append(table.id.belongs(subquery))
func = lambda a, b: (a & b) if mode == "and" else (a | b)
return functools.reduce(func, queries)
| bsd-3-clause | d2710f3d15125f91e01f124175e5ef0e | 30.901639 | 83 | 0.517986 | 3.685606 | false | false | false | false |
web2py/pydal | pydal/adapters/sap.py | 1 | 1460 | import re
from .._compat import integer_types, long
from . import adapters
from .base import SQLAdapter
@adapters.register_for("sapdb")
class SAPDB(SQLAdapter):
dbengine = "sapdb"
drivers = ("sapdb",)
REGEX_URI = (
"^(?P<user>[^:@]+)(:(?P<password>[^@]*))?"
r"@(?P<host>[^:/]+|\[[^\]]+\])/(?P<db>[^?]+)$"
)
def _initialize_(self):
super(SAPDB, self)._initialize_()
ruri = self.uri.split("://", 1)[1]
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = self.credential_decoder(m.group("user"))
password = self.credential_decoder(m.group("password"))
if password is None:
password = ""
host = m.group("host")
db = m.group("db")
self.driver_args.update(user=user, password=password, database=db, host=host)
def connector(self):
self.driver.connect(**self.driver_args)
def lastrowid(self, table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
def create_sequence_and_triggers(self, query, table, **args):
self.execute("CREATE SEQUENCE %s;" % table._sequence_name)
self.execute(
"ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');"
% (table._rname, table._id._rname, table._sequence_name)
)
self.execute(query)
| bsd-3-clause | 39f672f0ac1f6c902a1353a7452cb7a1 | 31.444444 | 85 | 0.574658 | 3.451537 | false | false | false | false |
openstates/billy | billy/models/utils.py | 4 | 4335 | import collections
from django.core import urlresolvers
def mongoid_2_url(abbr, _id):
letter = _id[2]
viewname = {
'B': 'bill',
'C': 'committee',
'V': 'vote',
'L': 'legislator_noslug',
}[letter]
return urlresolvers.reverse(viewname, args=[abbr, _id])
## {{{ http://code.activestate.com/recipes/276643/ (r1)
class CachedAttribute(object):
'''Computes attribute value and caches it in instance.
Example:
class MyClass(object):
def myMethod(self):
# ...
myMethod = CachedAttribute(myMethod)
Use "del inst.myMethod" to clear cache.'''
def __init__(self, method, name=None):
self.method = method
self.name = name or method.__name__
def __get__(self, inst, cls):
if inst is None:
return self
result = self.method(inst)
setattr(inst, self.name, result)
return result
class CachedClassAttribute(object):
'''Computes attribute value and caches it in class.
Example:
class MyClass(object):
def myMethod(cls):
# ...
myMethod = CachedClassAttribute(myMethod)
Use "del MyClass.myMethod" to clear cache.'''
def __init__(self, method, name=None):
self.method = method
self.name = name or method.__name__
def __get__(self, inst, cls):
result = self.method(cls)
setattr(cls, self.name, result)
return result
class ReadAliasAttribute(object):
'''If not explcitly assigned this attribute is an alias for other.
Example:
class Document(object):
title='?'
shortTitle=ReadAliasAttribute('title')'''
def __init__(self, name):
self.name = name
def __get__(self, inst, cls):
if inst is None:
return self
return getattr(inst, self.name)
class AliasAttribute(ReadAliasAttribute):
'''This attribute is an alias for other.
Example:
class Document(object):
newAttrName=somevalue
deprecatedAttrName=AliasAttribute('newAttrName')'''
def __set__(self, inst, value):
setattr(inst, self.name, value)
def __delete__(self, inst):
delattr(inst, self.name)
## end of http://code.activestate.com/recipes/276643/ }}}
## {{{ http://code.activestate.com/recipes/576694/ (r7)
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
if __name__ == '__main__':
print(OrderedSet('abracadaba'))
print(OrderedSet('simsalabim'))
## end of http://code.activestate.com/recipes/576694/ }}}
| bsd-3-clause | 57a522aa2968a4c30d97f8f5f641033b | 26.09375 | 78 | 0.551788 | 3.863636 | false | false | false | false |
numpy/numpy | numpy/fft/tests/test_helper.py | 20 | 6148 | """Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy import fft, pi
class TestFFTShift:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
assert_array_almost_equal(fft.fftshift(freqs), shifted)
assert_array_almost_equal(fft.ifftshift(shifted), freqs)
def test_uneven_dims(self):
""" Test 2D input, which has uneven dimension sizes """
freqs = [
[0, 1],
[2, 3],
[4, 5]
]
# shift in dimension 0
shift_dim0 = [
[4, 5],
[0, 1],
[2, 3]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
# shift in dimension 1
shift_dim1 = [
[1, 0],
[3, 2],
[5, 4]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
# shift in both dimensions
shift_dim_both = [
[5, 4],
[1, 0],
[3, 2]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
# axes=None (default) shift in all dimensions
assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
def test_equal_to_original(self):
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
""" How fftshift was implemented in v1.14"""
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
def original_ifftshift(x, axes=None):
""" How ifftshift was implemented in v1.14 """
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = n - (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
# create possible 2d array combinations and try all possible keywords
# compare output to original functions
for i in range(16):
for j in range(16):
for axes_keyword in [0, 1, None, (0,), (0, 1)]:
inp = np.random.rand(i, j)
assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
original_fftshift(inp, axes_keyword))
assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
original_ifftshift(inp, axes_keyword))
class TestFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
| bsd-3-clause | c366c6abce439214112f20a7b6b56f9f | 35.814371 | 106 | 0.530416 | 3.217164 | false | true | false | false |
numpy/numpy | numpy/linalg/lapack_lite/make_lite.py | 12 | 12214 | #!/usr/bin/env python2.7
# WARNING! This a Python 2 script. Read README.rst for rationale.
"""
Usage: make_lite.py <wrapped_routines_file> <lapack_dir>
Typical invocation:
make_lite.py wrapped_routines /tmp/lapack-3.x.x
Requires the following to be on the path:
* f2c
* patch
"""
import sys
import os
import re
import subprocess
import shutil
import fortran
import clapack_scrub
try:
from distutils.spawn import find_executable as which # Python 2
except ImportError:
from shutil import which # Python 3
# Arguments to pass to f2c. You'll always want -A for ANSI C prototypes
# Others of interest: -a to not make variables static by default
# -C to check array subscripts
F2C_ARGS = ['-A', '-Nx800']
# The header to add to the top of the f2c_*.c file. Note that dlamch_() calls
# will be replaced by the macros below by clapack_scrub.scrub_source()
HEADER_BLURB = '''\
/*
* NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
* information on remaking this file.
*/
'''
HEADER = HEADER_BLURB + '''\
#include "f2c.h"
#ifdef HAVE_CONFIG
#include "config.h"
#else
extern doublereal dlamch_(char *);
#define EPSILON dlamch_("Epsilon")
#define SAFEMINIMUM dlamch_("Safe minimum")
#define PRECISION dlamch_("Precision")
#define BASE dlamch_("Base")
#endif
extern doublereal dlapy2_(doublereal *x, doublereal *y);
/*
f2c knows the exact rules for precedence, and so omits parentheses where not
strictly necessary. Since this is generated code, we don't really care if
it's readable, and we know what is written is correct. So don't warn about
them.
*/
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wparentheses"
#endif
'''
class FortranRoutine:
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
def __init__(self, name=None, filename=None):
self.filename = filename
if name is None:
root, ext = os.path.splitext(filename)
name = root
self.name = name
self._dependencies = None
def dependencies(self):
if self._dependencies is None:
deps = fortran.getDependencies(self.filename)
self._dependencies = [d.lower() for d in deps]
return self._dependencies
def __repr__(self):
return "FortranRoutine({!r}, filename={!r})".format(self.name,
self.filename)
class UnknownFortranRoutine(FortranRoutine):
"""Wrapper for a Fortran routine for which the corresponding file
is not known.
"""
type = 'unknown'
def __init__(self, name):
FortranRoutine.__init__(self, name=name, filename='<unknown>')
def dependencies(self):
return []
class FortranLibrary:
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
self._src_dirs = src_dirs
self.names_to_routines = {}
def _findRoutine(self, rname):
rname = rname.lower()
for s in self._src_dirs:
ffilename = os.path.join(s, rname + '.f')
if os.path.exists(ffilename):
return self._newFortranRoutine(rname, ffilename)
return UnknownFortranRoutine(rname)
def _newFortranRoutine(self, rname, filename):
return FortranRoutine(rname, filename)
def addIgnorableRoutine(self, rname):
"""Add a routine that we don't want to consider when looking at
dependencies.
"""
rname = rname.lower()
routine = UnknownFortranRoutine(rname)
self.names_to_routines[rname] = routine
def addRoutine(self, rname):
"""Add a routine to the library.
"""
self.getRoutine(rname)
def getRoutine(self, rname):
"""Get a routine from the library. Will add if it's not found.
"""
unique = []
rname = rname.lower()
routine = self.names_to_routines.get(rname, unique)
if routine is unique:
routine = self._findRoutine(rname)
self.names_to_routines[rname] = routine
return routine
def allRoutineNames(self):
"""Return the names of all the routines.
"""
return list(self.names_to_routines.keys())
def allRoutines(self):
"""Return all the routines.
"""
return list(self.names_to_routines.values())
def resolveAllDependencies(self):
"""Try to add routines to the library to satisfy all the dependencies
for each routine in the library.
Returns a set of routine names that have the dependencies unresolved.
"""
done_this = set()
last_todo = set()
while True:
todo = set(self.allRoutineNames()) - done_this
if todo == last_todo:
break
for rn in todo:
r = self.getRoutine(rn)
deps = r.dependencies()
for d in deps:
self.addRoutine(d)
done_this.add(rn)
last_todo = todo
return todo
class LapackLibrary(FortranLibrary):
def _newFortranRoutine(self, rname, filename):
routine = FortranLibrary._newFortranRoutine(self, rname, filename)
if 'blas' in filename.lower():
routine.type = 'blas'
elif 'install' in filename.lower():
routine.type = 'config'
elif rname.startswith('z'):
routine.type = 'z_lapack'
elif rname.startswith('c'):
routine.type = 'c_lapack'
elif rname.startswith('s'):
routine.type = 's_lapack'
elif rname.startswith('d'):
routine.type = 'd_lapack'
else:
routine.type = 'lapack'
return routine
def allRoutinesByType(self, typename):
routines = sorted((r.name, r) for r in self.allRoutines() if r.type == typename)
return [a[1] for a in routines]
def printRoutineNames(desc, routines):
print(desc)
for r in routines:
print('\t%s' % r.name)
def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
if not os.path.exists(blas_src_dir):
blas_src_dir = os.path.join(lapack_dir, 'blas', 'src')
lapack_src_dir = os.path.join(lapack_dir, 'SRC')
if not os.path.exists(lapack_src_dir):
lapack_src_dir = os.path.join(lapack_dir, 'src')
install_src_dir = os.path.join(lapack_dir, 'INSTALL')
if not os.path.exists(install_src_dir):
install_src_dir = os.path.join(lapack_dir, 'install')
library = LapackLibrary([install_src_dir, blas_src_dir, lapack_src_dir])
for r in ignores:
library.addIgnorableRoutine(r)
for w in wrapped_routines:
library.addRoutine(w)
library.resolveAllDependencies()
return library
def getWrappedRoutineNames(wrapped_routines_file):
routines = []
ignores = []
with open(wrapped_routines_file) as fo:
for line in fo:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('IGNORE:'):
line = line[7:].strip()
ig = line.split()
ignores.extend(ig)
else:
routines.append(line)
return routines, ignores
types = {'blas', 'lapack', 'd_lapack', 's_lapack', 'z_lapack', 'c_lapack', 'config'}
def dumpRoutineNames(library, output_dir):
for typename in {'unknown'} | types:
routines = library.allRoutinesByType(typename)
filename = os.path.join(output_dir, typename + '_routines.lst')
with open(filename, 'w') as fo:
for r in routines:
deps = r.dependencies()
fo.write('%s: %s\n' % (r.name, ' '.join(deps)))
def concatenateRoutines(routines, output_file):
with open(output_file, 'w') as output_fo:
for r in routines:
with open(r.filename, 'r') as fo:
source = fo.read()
output_fo.write(source)
class F2CError(Exception):
pass
def runF2C(fortran_filename, output_dir):
fortran_filename = fortran_filename.replace('\\', '/')
try:
subprocess.check_call(
["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename]
)
except subprocess.CalledProcessError:
raise F2CError
def scrubF2CSource(c_file):
with open(c_file) as fo:
source = fo.read()
source = clapack_scrub.scrubSource(source, verbose=True)
with open(c_file, 'w') as fo:
fo.write(HEADER)
fo.write(source)
def ensure_executable(name):
try:
which(name)
except Exception:
raise SystemExit(name + ' not found')
def create_name_header(output_dir):
routine_re = re.compile(r'^ (subroutine|.* function)\s+(\w+)\(.*$',
re.I)
extern_re = re.compile(r'^extern [a-z]+ ([a-z0-9_]+)\(.*$')
# BLAS/LAPACK symbols
symbols = set(['xerbla'])
for fn in os.listdir(output_dir):
fn = os.path.join(output_dir, fn)
if not fn.endswith('.f'):
continue
with open(fn, 'r') as f:
for line in f:
m = routine_re.match(line)
if m:
symbols.add(m.group(2).lower())
# f2c symbols
f2c_symbols = set()
with open('f2c.h', 'r') as f:
for line in f:
m = extern_re.match(line)
if m:
f2c_symbols.add(m.group(1))
with open(os.path.join(output_dir, 'lapack_lite_names.h'), 'w') as f:
f.write(HEADER_BLURB)
f.write(
"/*\n"
" * This file renames all BLAS/LAPACK and f2c symbols to avoid\n"
" * dynamic symbol name conflicts, in cases where e.g.\n"
" * integer sizes do not match with 'standard' ABI.\n"
" */\n")
# Rename BLAS/LAPACK symbols
for name in sorted(symbols):
f.write("#define %s_ BLAS_FUNC(%s)\n" % (name, name))
# Rename also symbols that f2c exports itself
f.write("\n"
"/* Symbols exported by f2c.c */\n")
for name in sorted(f2c_symbols):
f.write("#define %s numpy_lapack_lite_%s\n" % (name, name))
def main():
if len(sys.argv) != 3:
print(__doc__)
return
# Make sure that patch and f2c are found on path
ensure_executable('f2c')
ensure_executable('patch')
wrapped_routines_file = sys.argv[1]
lapack_src_dir = sys.argv[2]
output_dir = os.path.join(os.path.dirname(__file__), 'build')
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file)
library = getLapackRoutines(wrapped_routines, ignores, lapack_src_dir)
dumpRoutineNames(library, output_dir)
for typename in types:
fortran_file = os.path.join(output_dir, 'f2c_%s.f' % typename)
c_file = fortran_file[:-2] + '.c'
print('creating %s ...' % c_file)
routines = library.allRoutinesByType(typename)
concatenateRoutines(routines, fortran_file)
# apply the patchpatch
patch_file = os.path.basename(fortran_file) + '.patch'
if os.path.exists(patch_file):
subprocess.check_call(['patch', '-u', fortran_file, patch_file])
print("Patched {}".format(fortran_file))
try:
runF2C(fortran_file, output_dir)
except F2CError:
print('f2c failed on %s' % fortran_file)
break
scrubF2CSource(c_file)
# patch any changes needed to the C file
c_patch_file = c_file + '.patch'
if os.path.exists(c_patch_file):
subprocess.check_call(['patch', '-u', c_file, c_patch_file])
print()
create_name_header(output_dir)
for fname in os.listdir(output_dir):
if fname.endswith('.c') or fname == 'lapack_lite_names.h':
print('Copying ' + fname)
shutil.copy(
os.path.join(output_dir, fname),
os.path.abspath(os.path.dirname(__file__)),
)
if __name__ == '__main__':
main()
| bsd-3-clause | ef5d00ff5bc19cb4934ddce8d6e0085d | 30.317949 | 88 | 0.590552 | 3.613609 | false | false | false | false |
numpy/numpy | numpy/polynomial/tests/test_hermite.py | 24 | 18577 | """Tests for hermite module.
"""
from functools import reduce
import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
H0 = np.array([1])
H1 = np.array([0, 2])
H2 = np.array([-2, 0, 4])
H3 = np.array([0, -12, 0, 8])
H4 = np.array([12, 0, -48, 0, 16])
H5 = np.array([0, 120, 0, -160, 0, 32])
H6 = np.array([-120, 0, 720, 0, -480, 0, 64])
H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])
H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])
H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])
Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]
def trim(x):
return herm.hermtrim(x, tol=1e-6)
class TestConstants:
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
def test_hermzero(self):
assert_equal(herm.hermzero, [0])
def test_hermone(self):
assert_equal(herm.hermone, [1])
def test_hermx(self):
assert_equal(herm.hermx, [0, .5])
class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herm.hermadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermsub(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herm.hermsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermmulx(self):
assert_equal(herm.hermmulx([0]), [0])
assert_equal(herm.hermmulx([1]), [0, .5])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, .5]
assert_equal(herm.hermmulx(ser), tgt)
def test_hermmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herm.hermval(self.x, pol1)
for j in range(5):
msg = f"At i={i}, j={j}"
pol2 = [0]*j + [1]
val2 = herm.hermval(self.x, pol2)
pol3 = herm.hermmul(pol1, pol2)
val3 = herm.hermval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermdiv(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herm.hermadd(ci, cj)
quo, rem = herm.hermdiv(tgt, ci)
res = herm.hermadd(herm.hermmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermpow(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(herm.hermmul, [c]*j, np.array([1]))
res = herm.hermpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermval(self):
#check empty input
assert_equal(herm.hermval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Hlist]
for i in range(10):
msg = f"At i={i}"
tgt = y[i]
res = herm.hermval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herm.hermval(x, [1]).shape, dims)
assert_equal(herm.hermval(x, [1, 0]).shape, dims)
assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims)
def test_hermval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herm.hermval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herm.hermval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herm.hermgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herm.hermgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral:
def test_hermint(self):
# check exceptions
assert_raises(TypeError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
assert_raises(ValueError, herm.hermint, [0], lbnd=[0])
assert_raises(ValueError, herm.hermint, [0], scl=[0])
assert_raises(TypeError, herm.hermint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herm.hermint([0], m=i, k=k)
assert_almost_equal(res, [0, .5])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i])
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herm.hermval(-1, hermint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1)
res = herm.hermint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k])
res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T
res = herm.hermint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c) for c in c2d])
res = herm.hermint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])
res = herm.hermint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative:
def test_hermder(self):
# check exceptions
assert_raises(TypeError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herm.hermder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T
res = herm.hermder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermder(c) for c in c2d])
res = herm.hermder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermvander(self):
# check for 1d x
x = np.arange(3)
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
def test_hermvander2d(self):
# also tests hermval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herm.hermvander2d(x1, x2, [1, 2])
tgt = herm.hermval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermvander3d(self):
# also tests hermval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])
tgt = herm.hermval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting:
def test_hermfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, herm.hermfit, [1], [1], -1)
assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
assert_raises(TypeError, herm.hermfit, [], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])
assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, herm.hermfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herm.hermfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
coef3 = herm.hermfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
#
coef4 = herm.hermfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
#
coef2d = herm.hermfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herm.hermfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])
# test fitting only even Legendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = herm.hermfit(x, y, 4)
assert_almost_equal(herm.hermval(x, coef1), y)
coef2 = herm.hermfit(x, y, [0, 2, 4])
assert_almost_equal(herm.hermval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestCompanion:
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
assert_raises(ValueError, herm.hermcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herm.hermcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
class TestGauss:
def test_100(self):
x, w = herm.hermgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herm.hermvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc:
def test_hermfromroots(self):
res = herm.hermfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herm.hermfromroots(roots)
res = herm.hermval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herm.herm2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermroots(self):
assert_almost_equal(herm.hermroots([1]), [])
assert_almost_equal(herm.hermroots([1, 1]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herm.hermroots(herm.hermfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herm.hermtrim, coef, -1)
# Test results
assert_equal(herm.hermtrim(coef), coef[:-1])
assert_equal(herm.hermtrim(coef, 1), coef[:-3])
assert_equal(herm.hermtrim(coef, 2), [0])
def test_hermline(self):
assert_equal(herm.hermline(3, 4), [3, 2])
def test_herm2poly(self):
for i in range(10):
assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])
def test_poly2herm(self):
for i in range(10):
assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-x**2)
res = herm.hermweight(x)
assert_almost_equal(res, tgt)
| bsd-3-clause | 29bd1090b96d47479a6e518aa3a01a88 | 32.472072 | 78 | 0.511331 | 2.854487 | false | true | false | false |
numpy/numpy | numpy/lib/histograms.py | 2 | 37698 | """
Histogram-related functions
"""
import contextlib
import functools
import operator
import warnings
import numpy as np
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _ptp(x):
"""Peak-to-peak value of x.
This implementation avoids the problem of signed integer arrays having a
peak-to-peak value that cannot be represented with the array's data type.
This function returns an unsigned value for signed integer arrays.
"""
return _unsigned_subtract(x.max(), x.min())
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
n = x.size
ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.",
RuntimeWarning, stacklevel=3)
return ptp_x / nbins
def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 0 for the bin width.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
If the bin width from the FD estimator is 0, the Sturges estimator is used.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off-the-shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance-based estimators will be of
use, so we revert to the Sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, str):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError as e:
raise TypeError(
'`bins` must be an integer, a string, or an array') from e
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that takes into account data variability
and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``. The final bin width is often less
than what is returned by the estimators below.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}(n) + 1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, density=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, density=None, weights=None):
r"""
Compute the histogram of a dataset.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
Text(0.5, 1.0, "Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
weights=None):
if hasattr(sample, 'shape'): # same condition as used in histogramdd
yield sample
else:
yield from sample
with contextlib.suppress(TypeError):
yield from bins
yield weights
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, density=None, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (N, D) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if density is True. If density is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See density and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, np.intp)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
try:
n = operator.index(bins[i])
except TypeError as e:
raise TypeError(
"`bins[{}]` must be an integer, when a scalar".format(i)
) from e
edges[i] = np.linspace(smin, smax, n + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
| bsd-3-clause | 4a14341aff1849a2ff5dc4136c15e95b | 34.231776 | 104 | 0.60104 | 3.865669 | false | false | false | false |
numpy/numpy | numpy/core/overrides.py | 7 | 8306 | """Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
import os
from numpy.core._multiarray_umath import (
add_docstring, implement_array_function, _get_implementing_args)
from numpy.compat._inspect import getargspec
ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
array_function_like_doc = (
"""like : array_like, optional
Reference object to allow the creation of arrays which are not
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
by it. In this case, it ensures the creation of an array object
compatible with that passed in via this argument."""
)
def set_array_function_like_doc(public_api):
if public_api.__doc__ is not None:
public_api.__doc__ = public_api.__doc__.replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
)
return public_api
add_docstring(
implement_array_function,
"""
Implement a function with checks for __array_function__ overrides.
All arguments are required, and can only be passed by position.
Parameters
----------
implementation : function
Function that implements the operation on NumPy array without
overrides when called like ``implementation(*args, **kwargs)``.
public_api : function
Function exposed by NumPy's public API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __array_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : dict
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
Result from calling ``implementation()`` or an ``__array_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
""")
# exposed for testing purposes; used internally by implement_array_function
add_docstring(
_get_implementing_args,
"""
Collect arguments on which to call __array_function__.
Parameters
----------
relevant_args : iterable of array-like
Iterable of possibly array-like arguments to check for
__array_function__ methods.
Returns
-------
Sequence of arguments with __array_function__ methods, in the order in
which they should be called.
""")
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
def verify_matching_signatures(implementation, dispatcher):
"""Verify that a dispatcher function has the right signature."""
implementation_spec = ArgSpec(*getargspec(implementation))
dispatcher_spec = ArgSpec(*getargspec(dispatcher))
if (implementation_spec.args != dispatcher_spec.args or
implementation_spec.varargs != dispatcher_spec.varargs or
implementation_spec.keywords != dispatcher_spec.keywords or
(bool(implementation_spec.defaults) !=
bool(dispatcher_spec.defaults)) or
(implementation_spec.defaults is not None and
len(implementation_spec.defaults) !=
len(dispatcher_spec.defaults))):
raise RuntimeError('implementation and dispatcher for %s have '
'different function signatures' % implementation)
if implementation_spec.defaults is not None:
if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
raise RuntimeError('dispatcher functions can only use None for '
'default argument values')
def set_module(module):
"""Decorator for overriding __module__ on a function or class.
Example usage::
@set_module('numpy')
def example():
pass
assert example.__module__ == 'numpy'
"""
def decorator(func):
if module is not None:
func.__module__ = module
return func
return decorator
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
See NEP-18 for example usage.
Parameters
----------
dispatcher : callable
Function that when called like ``dispatcher(*args, **kwargs)`` with
arguments from the NumPy function call returns an iterable of
array-like arguments to check for ``__array_function__``.
module : str, optional
__module__ attribute to set on new function, e.g., ``module='numpy'``.
By default, module is copied from the decorated function.
verify : bool, optional
If True, verify the that the signature of the dispatcher and decorated
function signatures match exactly: all required and optional arguments
should appear in order with the same names, but the default values for
all optional arguments should be ``None``. Only disable verification
if the dispatcher's signature needs to deviate for some particular
reason, e.g., because the function has a signature like
``func(*args, **kwargs)``.
docs_from_dispatcher : bool, optional
If True, copy docs from the dispatcher function onto the dispatched
function, rather than from the implementation. This is useful for
functions defined in C, which otherwise don't have docstrings.
Returns
-------
Function suitable for decorating the implementation of a NumPy function.
"""
if not ARRAY_FUNCTION_ENABLED:
def decorator(implementation):
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
if module is not None:
implementation.__module__ = module
return implementation
return decorator
def decorator(implementation):
if verify:
verify_matching_signatures(implementation, dispatcher)
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
try:
relevant_args = dispatcher(*args, **kwargs)
except TypeError as exc:
# Try to clean up a signature related TypeError. Such an
# error will be something like:
# dispatcher.__name__() got an unexpected keyword argument
#
# So replace the dispatcher name in this case. In principle
# TypeErrors may be raised from _within_ the dispatcher, so
# we check that the traceback contains a string that starts
# with the name. (In principle we could also check the
# traceback length, as it would be deeper.)
msg = exc.args[0]
disp_name = dispatcher.__name__
if not isinstance(msg, str) or not msg.startswith(disp_name):
raise
# Replace with the correct name and re-raise:
new_msg = msg.replace(disp_name, public_api.__name__)
raise TypeError(new_msg) from None
return implement_array_function(
implementation, public_api, relevant_args, args, kwargs)
public_api.__code__ = public_api.__code__.replace(
co_name=implementation.__name__,
co_filename='<__array_function__ internals>')
if module is not None:
public_api.__module__ = module
public_api._implementation = implementation
return public_api
return decorator
def array_function_from_dispatcher(
implementation, module=None, verify=True, docs_from_dispatcher=True):
"""Like array_function_dispatcher, but with function arguments flipped."""
def decorator(dispatcher):
return array_function_dispatch(
dispatcher, module, verify=verify,
docs_from_dispatcher=docs_from_dispatcher)(implementation)
return decorator
| bsd-3-clause | 138b84d23e00c70eb90eb04885e42146 | 36.080357 | 79 | 0.636769 | 4.938169 | false | false | false | false |
numpy/numpy | doc/postprocess.py | 18 | 1328 | #!/usr/bin/env python3
"""
Post-processes HTML and Latex files output by Sphinx.
"""
import io
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('mode', help='file mode', choices=('html', 'tex'))
parser.add_argument('file', nargs='+', help='input file(s)')
args = parser.parse_args()
mode = args.mode
for fn in args.file:
with io.open(fn, 'r', encoding="utf-8") as f:
if mode == 'html':
lines = process_html(fn, f.readlines())
elif mode == 'tex':
lines = process_tex(f.readlines())
with io.open(fn, 'w', encoding="utf-8") as f:
f.write("".join(lines))
def process_html(fn, lines):
return lines
def process_tex(lines):
"""
Remove unnecessary section titles from the LaTeX file.
"""
new_lines = []
for line in lines:
if (line.startswith(r'\section{numpy.')
or line.startswith(r'\subsection{numpy.')
or line.startswith(r'\subsubsection{numpy.')
or line.startswith(r'\paragraph{numpy.')
or line.startswith(r'\subparagraph{numpy.')
):
pass # skip!
else:
new_lines.append(line)
return new_lines
if __name__ == "__main__":
main()
| bsd-3-clause | dacb470f8c2068a447c5dea67ffabb89 | 26.102041 | 74 | 0.561747 | 3.827089 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.