repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
KiranSurath/Audacity-Endeavoru | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
thesuperzapper/tensorflow | tensorflow/python/kernel_tests/resource_variable_ops_test.py | 16 | 10281 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testHandleDtypeShapeMatch(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([0], dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testDtypeSurvivesIdentity(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testCreateRead(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
value = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32).eval()
self.assertAllEqual(1, value)
def testManyAssigns(self):
with self.test_session() as session:
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = session.run([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
def testAssignAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(read.eval(), 2)
def testScatterAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)).run()
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)).run()
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(read.eval(), [[3]])
def testGPU(self):
with self.test_session(use_gpu=True) as sess:
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
sess.run(variables.global_variables_initializer())
print(sess.run(abc))
def testInitFn(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testInitFnDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, v.value().dtype)
def testInitFnNoDtype(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1)
self.assertEqual(dtypes.int32, v.value().dtype)
def testInitializeAllVariables(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32)
with self.assertRaises(errors.NotFoundError):
v.value().eval()
variables.global_variables_initializer().run()
self.assertEqual(1.0, v.value().eval())
def testOperatorOverload(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
self.assertEqual(2.0, (v+v).eval())
def testAssignMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.assign(2.0).eval()
self.assertEqual(2.0, v.value().eval())
def testToFromProto(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEquals(2, math_ops.add(w, 1).eval())
def testAssignAddMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
v.assign_add(1.0).eval()
self.assertEqual(2.0, v.value().eval())
def testAssignSubMethod(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
v.assign_sub(1.0).eval()
self.assertEqual(2.0, v.value().eval())
def testDestroyResource(self):
with self.test_session() as sess:
v = resource_variable_ops.ResourceVariable(3.0)
variables.global_variables_initializer().run()
self.assertEqual(3.0, v.value().eval())
sess.run(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.NotFoundError):
v.value().eval()
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
sess.run(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
def testAssignDifferentShapes(self):
with self.test_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run([assign],
feed_dict={placeholder: np.zeros(shape=[2, 2],
dtype=np.float32)})
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaisesRegexp(ValueError, "No attr named '_class'"):
_ = w.value().op.get_attr("_class")
def testSharedName(self):
with self.test_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var1")
v.initializer.run()
w = resource_variable_ops.var_handle_op(dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var1")
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, w_read.eval())
x = resource_variable_ops.var_handle_op(dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var1/")
x_read = resource_variable_ops.read_variable_op(x, v.dtype.base_dtype)
with self.assertRaisesOpError("Resource .*/var1//.* does not exist"):
_ = x_read.eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
simongoffin/website_version | openerp/addons/base/ir/ir_default.py | 342 | 1883 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class ir_default(osv.osv):
_name = 'ir.default'
_columns = {
'field_tbl': fields.char('Object'),
'field_name': fields.char('Object Field'),
'value': fields.char('Default Value'),
'uid': fields.many2one('res.users', 'Users'),
'page': fields.char('View'),
'ref_table': fields.char('Table Ref.'),
'ref_id': fields.integer('ID Ref.',size=64),
'company_id': fields.many2one('res.company','Company')
}
def _get_company_id(self, cr, uid, context=None):
res = self.pool.get('res.users').read(cr, uid, [uid], ['company_id'], context=context)
if res and res[0]['company_id']:
return res[0]['company_id'][0]
return False
_defaults = {
'company_id': _get_company_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
redtocatta/paycoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
aewhatley/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
ddolzhenko/jacis | jacis/plugins/sync.py | 1 | 2555 | # Copyright (c) 2016 Dmitry Dolzhenko
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-------------------------------------------------------------------------------
"""syncing tool
"""
#-------------------------------------------------------------------------------
__author__ = "Dmitry Dolzhenko"
__email__ = "d.dolzhenko@gmail.com"
#-------------------------------------------------------------------------------
import os
import git
import argparse
from jacis import utils
#-------------------------------------------------------------------------------
class Error(Exception):
def __init__(self, *args):
super().__init__(*args)
def jacis_plugin(argv):
parser = argparse.ArgumentParser()
parser.add_argument('url', help='git url')
parser.add_argument('dir', help='local dir')
args = parser.parse_args(argv)
sync(args.url, args.dir)
def store(info):
if info['type'] == 'git':
git.Repo.clone_from(info['url'], 'repo')
else:
raise Exception('not supported:' + info['type'])
def sync(url, local_dir):
git.Repo.clone_from(info['url'], '')
@utils.strong_typed(str, str, str, str)
def auto_repo(kind, remote_url, remote_dir, local_dir):
handlers = { 'git', GITRepo }
if kind not in handlers:
raise Error('unknown repo: ', kind)
Repo = handlers[kind]
return Repo(remote_url, remote_dir, local_dir)
def git_repo(remote_url, remote_dir, local_dir):
git.Repo(local_dir)
class GITRepo:
def __init__(self, arg):
self.arg = arg
| mit |
steventimberman/masterDebater | venv/lib/python2.7/site-packages/pbr/find_package.py | 101 | 1043 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import setuptools
def smart_find_packages(package_list):
"""Run find_packages the way we intend."""
packages = []
for pkg in package_list.strip().split("\n"):
pkg_path = pkg.replace('.', os.path.sep)
packages.append(pkg)
packages.extend(['%s.%s' % (pkg, f)
for f in setuptools.find_packages(pkg_path)])
return "\n".join(set(packages))
| mit |
hellais/libdnet | python/test.py | 14 | 8525 | #!/usr/bin/env python
import sys, unittest
sys.path.insert(0, './build')
import dnet
class AddrTestCase(unittest.TestCase):
def test_addr_cmp(self):
for atxt in ('1.2.3.0', '0:d:e:a:d:0', 'fe::ed:fa:ce:0'):
a = dnet.addr(atxt)
b = dnet.addr(atxt)
assert a == b
b = dnet.addr(atxt[:-1] + '1')
assert a < b
assert b > a
def test_addr_bcast(self):
d = { 32:'10.0.0.0', 31:'10.0.0.1', 30:'10.0.0.3', 29:'10.0.0.7',
28:'10.0.0.15', 27:'10.0.0.31', 26:'10.0.0.63', 25:'10.0.0.127',
24:'10.0.0.255', 23:'10.0.1.255', 22:'10.0.3.255',
21:'10.0.7.255', 20:'10.0.15.255', 19:'10.0.31.255' }
for bits in d:
a = dnet.addr('%s/%d' % (d[32], bits))
b = a.bcast()
self.failUnless(b.__str__() == d[bits],
'wrong bcast for /%d' % bits)
def test_addr_net(self):
d = { 32:'1.255.255.255', 31:'1.255.255.254', 30:'1.255.255.252',
29:'1.255.255.248', 28:'1.255.255.240', 27:'1.255.255.224',
26:'1.255.255.192', 25:'1.255.255.128', 24:'1.255.255.0',
23:'1.255.254.0', 22:'1.255.252.0', 21:'1.255.248.0',
20:'1.255.240.0', 19:'1.255.224.0' }
for bits in d:
a = dnet.addr('%s/%d' % (d[32], bits))
b = a.net()
self.failUnless(b.__str__() == d[bits],
'wrong net for /%d' % bits)
def test_addr_properties(self):
atxt = '1.2.3.4/24'
a = dnet.addr(atxt)
assert a.type == dnet.ADDR_TYPE_IP and a.bits == 24
assert a.ip == '\x01\x02\x03\x04' and a.__str__() == atxt
try: self.failUnless(a.eth == 'xxx', 'invalid eth property')
except ValueError: pass
atxt = '00:0d:0e:0a:0d:00'
a = dnet.addr(atxt)
assert a == dnet.addr('0:d:E:a:D:0')
assert a.type == dnet.ADDR_TYPE_ETH and a.bits == 48
assert a.eth == '\x00\x0d\x0e\x0a\x0d\x00' and a.__str__() == atxt
try: self.failUnless(a.ip6 == 'xxx', 'invalid ip6 property')
except ValueError: pass
atxt = 'fe80::dead:beef:feed:face/48'
a = dnet.addr(atxt)
assert a == dnet.addr('fe80:0:0::dead:beef:feed:face/48')
assert a.type == dnet.ADDR_TYPE_IP6 and a.bits == 48
assert a.ip6 == '\xfe\x80\x00\x00\x00\x00\x00\x00\xde\xad\xbe\xef\xfe\xed\xfa\xce' and a.__str__() == atxt
try: self.failUnless(a.ip == 'xxx', 'invalid ip property')
except ValueError: pass
class ArpTestCase(unittest.TestCase):
def setUp(self):
self.arp = dnet.arp()
self.failUnless(self.arp, "couldn't open ARP handle")
def tearDown(self):
del self.arp
def test_arp(self):
# XXX - site-specific values here!
pa = dnet.addr('192.168.0.123')
ha = dnet.addr('0:d:e:a:d:0')
self.failUnless(self.arp.add(pa, ha) == None, "couldn't add ARP entry")
self.failUnless(self.arp.get(pa) == ha, "couldn't find ARP entry")
self.failUnless(self.arp.delete(pa) == None, "couldn't delete ARP entry")
def __arp_cb(self, pa, ha, arg):
# XXX - do nothing
return arg
def test_arp_loop(self):
assert self.arp.loop(self.__arp_cb, 0) == 0
assert self.arp.loop(self.__arp_cb, 123) == 123
def test_arp_misc(self):
sha = '\x00\x0d\x0e\x0a\x0d\x00'
spa = '\x01\x02\x03\x04'
dha = '\x00\x0b\x0e\x0e\x0f\x00'
dpa = '\x05\x06\x07\x08'
msg = dnet.arp_pack_hdr_ethip(dnet.ARP_OP_REQUEST, sha, spa, dha, dpa)
assert msg == '\x00\x01\x08\x00\x06\x04\x00\x01\x00\r\x0e\n\r\x00\x01\x02\x03\x04\x00\x0b\x0e\x0e\x0f\x00\x05\x06\x07\x08'
class EthTestCase(unittest.TestCase):
def setUp(self):
self.dev = dnet.intf().get_dst(dnet.addr('1.2.3.4'))['name']
self.eth = dnet.eth(self.dev)
self.failUnless(self.eth, "couldn't open Ethernet handle")
def tearDown(self):
del self.eth
def test_eth_get(self):
mac = self.eth.get()
self.failUnless(mac, "couldn't get Ethernet address for %s" % self.dev)
def test_eth_misc(self):
n = "\x00\x0d\x0e\x0a\x0d\x00"
a = '00:0d:0e:0a:0d:00'
self.failUnless(dnet.eth_ntoa(n) == a)
self.failUnless(dnet.eth_aton(a) == n)
dst = "\x00\x0d\x0e\x0a\x0d\x01"
self.failUnless(dnet.eth_pack_hdr(n, dst, dnet.ETH_TYPE_IP) ==
'\x00\r\x0e\n\r\x00\x00\r\x0e\n\r\x01\x08\x00')
class FwTestCase(unittest.TestCase):
def setUp(self):
self.dev = dnet.intf().get_dst(dnet.addr('1.2.3.4'))['name']
self.fw = dnet.fw()
self.failUnless(self.fw, "couldn't open firewall handle")
def tearDown(self):
del self.fw
def test_fw(self):
src = dnet.addr('1.2.3.4')
dst = dnet.addr('5.6.7.8')
d = { 'device':self.dev,
'op':dnet.FW_OP_BLOCK,
'dir':dnet.FW_DIR_OUT,
'proto':dnet.IP_PROTO_UDP,
'src':src,
'dst':dst,
'dport':(660, 666)
}
self.failUnless(self.fw.add(d) == None,
"couldn't add firewall rule: %s" % d)
self.failUnless(self.fw.delete(d) == None,
"couldn't delete firewall rule: %s" % d)
def __fw_cb(self, rule, arg):
# XXX - do nothing
return arg
def test_fw_loop(self):
assert self.fw.loop(self.__fw_cb, 0) == 0
# XXX - no guarantee of existing fw rules.
#assert self.fw.loop(self.__fw_cb, 123) == 123
class IntfTestCase(unittest.TestCase):
def setUp(self):
self.intf = dnet.intf()
self.failUnless(self.intf, "couldn't open interface handle")
def tearDown(self):
del self.intf
def test_intf_get(self):
lo0 = self.intf.get('lo0')
self.failUnless(lo0['name'] == 'lo0', "couldn't get loopback config")
self.failUnless(self.intf.get_src(dnet.addr('127.0.0.1')) == lo0,
"couldn't get_src 127.0.0.1")
gw = self.intf.get_dst(dnet.addr('1.2.3.4'))
self.failUnless(gw, "couldn't get outgoing interface")
def test_intf_set(self):
lo0 = self.intf.get('lo0')
old_mtu = lo0['mtu']
new_mtu = 1234
lo0['mtu'] = new_mtu
self.intf.set(lo0)
lo0 = self.intf.get('lo0')
assert lo0['mtu'] == new_mtu
lo0['mtu'] = old_mtu
self.intf.set(lo0)
def __intf_cb(self, ifent, arg):
# XXX - do nothing
return arg
def test_intf_loop(self):
assert self.intf.loop(self.__intf_cb, 0) == 0
assert self.intf.loop(self.__intf_cb, 123) == 123
class IpTestCase(unittest.TestCase):
def setUp(self):
self.ip = dnet.ip()
self.failUnless(self.ip, "couldn't open raw IP handle")
def tearDown(self):
del self.ip
def test_ip_misc(self):
n = '\x01\x02\x03\x04'
a = '1.2.3.4'
self.failUnless(dnet.ip_ntoa(n) == a)
self.failUnless(dnet.ip_aton(a) == n)
dst = '\x05\x06\x07\x08'
hdr = dnet.ip_pack_hdr(0, dnet.IP_HDR_LEN, 666, 0, 255,
dnet.IP_PROTO_UDP, n, dst)
assert hdr == 'E\x00\x00\x14\x02\x9a\x00\x00\xff\x11\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08'
hdr = dnet.ip_checksum(hdr)
assert hdr == 'E\x00\x00\x14\x02\x9a\x00\x00\xff\x11\xa9+\x01\x02\x03\x04\x05\x06\x07\x08'
class RandTestCase(unittest.TestCase):
def setUp(self):
self.rand = dnet.rand()
self.failUnless(self.rand, "couldn't open random handle")
def tearDown(self):
del self.rand
class RouteTestCase(unittest.TestCase):
def setUp(self):
self.route = dnet.route()
self.failUnless(self.route, "couldn't open route handle")
def tearDown(self):
del self.route
def test_route(self):
dst = dnet.addr('1.2.3.4/24')
gw = dnet.addr('127.0.0.1')
self.route.add(dst, gw)
self.failUnless(self.route.get(dst) == gw)
self.route.delete(dst)
def __route_cb(self, dst, gw, arg):
# XXX - do nothing
return arg
def test_route_loop(self):
assert self.route.loop(self.__route_cb, 0) == 0
assert self.route.loop(self.__route_cb, 123) == 123
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
eepalms/gem5-newcache | src/arch/x86/isa/insts/system/undefined_operation.py | 41 | 2195 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop UD2
{
fault "new InvalidOpcode()"
};
'''
| bsd-3-clause |
schaubl/libcloud | libcloud/test/compute/test_cloudsigma_v2_0.py | 45 | 27901 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.compute.drivers.cloudsigma import CloudSigmaNodeDriver
from libcloud.compute.drivers.cloudsigma import CloudSigma_2_0_NodeDriver
from libcloud.compute.drivers.cloudsigma import CloudSigmaError
from libcloud.compute.types import NodeState
from libcloud.test import unittest
from libcloud.test import MockHttpTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
class CloudSigmaAPI20BaseTestCase(object):
def setUp(self):
self.driver_klass.connectionCls.conn_classes = \
(CloudSigmaMockHttp, CloudSigmaMockHttp)
CloudSigmaMockHttp.type = None
CloudSigmaMockHttp.use_param = 'do'
self.driver = self.driver_klass(*self.driver_args,
**self.driver_kwargs)
self.driver.DRIVE_TRANSITION_SLEEP_INTERVAL = 0.1
self.driver.DRIVE_TRANSITION_TIMEOUT = 1
self.node = self.driver.list_nodes()[0]
def test_invalid_api_versions(self):
expected_msg = 'Unsupported API version: invalid'
self.assertRaisesRegexp(NotImplementedError, expected_msg,
CloudSigmaNodeDriver, 'username', 'password',
api_version='invalid')
def test_invalid_credentials(self):
CloudSigmaMockHttp.type = 'INVALID_CREDS'
self.assertRaises(InvalidCredsError, self.driver.list_nodes)
def test_invalid_region(self):
expected_msg = 'Invalid region:'
self.assertRaisesRegexp(ValueError, expected_msg,
CloudSigma_2_0_NodeDriver, 'foo', 'bar',
region='invalid')
def test_list_sizes(self):
sizes = self.driver.list_sizes()
size = sizes[0]
self.assertEqual(size.id, 'micro-regular')
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(image.name, 'ubuntu-10.04-toMP')
self.assertEqual(image.extra['image_type'], 'preinst')
self.assertEqual(image.extra['media'], 'disk')
self.assertEqual(image.extra['os'], 'linux')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
node = nodes[0]
self.assertEqual(len(nodes), 2)
self.assertEqual(node.id, '9de75ed6-fd33-45e2-963f-d405f31fd911')
self.assertEqual(node.name, 'test no drives')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['185.12.5.181', '178.22.68.55'])
self.assertEqual(node.private_ips, [])
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
metadata = {'foo': 'bar'}
node = self.driver.create_node(name='test node', size=size, image=image,
ex_metadata=metadata)
self.assertEqual(node.name, 'test node')
self.assertEqual(len(node.extra['nics']), 1)
self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp')
def test_create_node_with_vlan(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
vlan_uuid = '39ae851d-433f-4ac2-a803-ffa24cb1fa3e'
node = self.driver.create_node(name='test node vlan', size=size,
image=image, ex_vlan=vlan_uuid)
self.assertEqual(node.name, 'test node vlan')
self.assertEqual(len(node.extra['nics']), 2)
self.assertEqual(node.extra['nics'][0]['ip_v4_conf']['conf'], 'dhcp')
self.assertEqual(node.extra['nics'][1]['vlan']['uuid'], vlan_uuid)
def test_destroy_node(self):
status = self.driver.destroy_node(node=self.node)
self.assertTrue(status)
def test_ex_start_node(self):
status = self.driver.ex_start_node(node=self.node)
self.assertTrue(status)
def test_ex_start_node_avoid_mode(self):
CloudSigmaMockHttp.type = 'AVOID_MODE'
ex_avoid = ['1', '2']
status = self.driver.ex_start_node(node=self.node,
ex_avoid=ex_avoid)
self.assertTrue(status)
def test_ex_start_node_already_started(self):
CloudSigmaMockHttp.type = 'ALREADY_STARTED'
expected_msg = 'Cannot start guest in state "started". Guest should ' \
'be in state "stopped'
self.assertRaisesRegexp(CloudSigmaError, expected_msg,
self.driver.ex_start_node, node=self.node)
def test_ex_stop_node(self):
status = self.driver.ex_stop_node(node=self.node)
self.assertTrue(status)
def test_ex_stop_node_already_stopped(self):
CloudSigmaMockHttp.type = 'ALREADY_STOPPED'
expected_msg = 'Cannot stop guest in state "stopped"'
self.assertRaisesRegexp(CloudSigmaError, expected_msg,
self.driver.ex_stop_node, node=self.node)
def test_ex_clone_node(self):
node_to_clone = self.driver.list_nodes()[0]
cloned_node = self.driver.ex_clone_node(node=node_to_clone,
name='test cloned node')
self.assertEqual(cloned_node.name, 'test cloned node')
def test_ex_open_vnc_tunnel(self):
node = self.driver.list_nodes()[0]
vnc_url = self.driver.ex_open_vnc_tunnel(node=node)
self.assertEqual(vnc_url, 'vnc://direct.lvs.cloudsigma.com:41111')
def test_ex_close_vnc_tunnel(self):
node = self.driver.list_nodes()[0]
status = self.driver.ex_close_vnc_tunnel(node=node)
self.assertTrue(status)
def test_ex_list_library_drives(self):
drives = self.driver.ex_list_library_drives()
drive = drives[0]
self.assertEqual(drive.name, 'IPCop 2.0.2')
self.assertEqual(drive.size, 1000000000)
self.assertEqual(drive.media, 'cdrom')
self.assertEqual(drive.status, 'unmounted')
def test_ex_list_user_drives(self):
drives = self.driver.ex_list_user_drives()
drive = drives[0]
self.assertEqual(drive.name, 'test node 2-drive')
self.assertEqual(drive.size, 13958643712)
self.assertEqual(drive.media, 'disk')
self.assertEqual(drive.status, 'unmounted')
def test_ex_create_drive(self):
CloudSigmaMockHttp.type = 'CREATE'
name = 'test drive 5'
size = 2000 * 1024 * 1024
drive = self.driver.ex_create_drive(name=name, size=size, media='disk')
self.assertEqual(drive.name, 'test drive 5')
self.assertEqual(drive.media, 'disk')
def test_ex_clone_drive(self):
drive = self.driver.ex_list_user_drives()[0]
cloned_drive = self.driver.ex_clone_drive(drive=drive,
name='cloned drive')
self.assertEqual(cloned_drive.name, 'cloned drive')
def test_ex_resize_drive(self):
drive = self.driver.ex_list_user_drives()[0]
size = 1111 * 1024 * 1024
resized_drive = self.driver.ex_resize_drive(drive=drive, size=size)
self.assertEqual(resized_drive.name, 'test drive 5')
self.assertEqual(resized_drive.media, 'disk')
self.assertEqual(resized_drive.size, size)
def test_ex_list_firewall_policies(self):
policies = self.driver.ex_list_firewall_policies()
policy = policies[1]
rule = policy.rules[0]
self.assertEqual(policy.name, 'My awesome policy')
self.assertEqual(rule.action, 'drop')
self.assertEqual(rule.direction, 'out')
self.assertEqual(rule.dst_ip, '23.0.0.0/32')
self.assertEqual(rule.ip_proto, 'tcp')
self.assertEqual(rule.dst_port, None)
self.assertEqual(rule.src_ip, None)
self.assertEqual(rule.src_port, None)
self.assertEqual(rule.comment, 'Drop traffic from the VM to IP address 23.0.0.0/32')
def test_ex_create_firewall_policy_no_rules(self):
CloudSigmaMockHttp.type = 'CREATE_NO_RULES'
policy = self.driver.ex_create_firewall_policy(name='test policy 1')
self.assertEqual(policy.name, 'test policy 1')
self.assertEqual(policy.rules, [])
def test_ex_create_firewall_policy_with_rules(self):
CloudSigmaMockHttp.type = 'CREATE_WITH_RULES'
rules = [
{
'action': 'accept',
'direction': 'out',
'ip_proto': 'tcp',
'src_ip': '127.0.0.1',
'dst_ip': '127.0.0.1'
}
]
policy = self.driver.ex_create_firewall_policy(name='test policy 2',
rules=rules)
rule = policy.rules[0]
self.assertEqual(policy.name, 'test policy 2')
self.assertEqual(len(policy.rules), 1)
self.assertEqual(rule.action, 'accept')
self.assertEqual(rule.direction, 'out')
self.assertEqual(rule.ip_proto, 'tcp')
def test_ex_attach_firewall_policy(self):
policy = self.driver.ex_list_firewall_policies()[0]
node = self.driver.list_nodes()[0]
CloudSigmaMockHttp.type = 'ATTACH_POLICY'
updated_node = self.driver.ex_attach_firewall_policy(policy=policy,
node=node)
nic = updated_node.extra['nics'][0]
self.assertEqual(nic['firewall_policy']['uuid'],
'461dfb8c-e641-43d7-a20e-32e2aa399086')
def test_ex_attach_firewall_policy_inexistent_nic(self):
policy = self.driver.ex_list_firewall_policies()[0]
node = self.driver.list_nodes()[0]
nic_mac = 'inexistent'
expected_msg = 'Cannot find the NIC interface to attach a policy to'
self.assertRaisesRegexp(ValueError, expected_msg,
self.driver.ex_attach_firewall_policy,
policy=policy,
node=node,
nic_mac=nic_mac)
def test_ex_delete_firewall_policy(self):
policy = self.driver.ex_list_firewall_policies()[0]
status = self.driver.ex_delete_firewall_policy(policy=policy)
self.assertTrue(status)
def test_ex_list_tags(self):
tags = self.driver.ex_list_tags()
tag = tags[0]
self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d')
self.assertEqual(tag.name, 'test tag 2')
def test_ex_get_tag(self):
tag = self.driver.ex_get_tag(tag_id='a010ec41-2ead-4630-a1d0-237fa77e4d4d')
self.assertEqual(tag.id, 'a010ec41-2ead-4630-a1d0-237fa77e4d4d')
self.assertEqual(tag.name, 'test tag 2')
def test_ex_create_tag(self):
tag = self.driver.ex_create_tag(name='test tag 3')
self.assertEqual(tag.name, 'test tag 3')
def test_ex_create_tag_with_resources(self):
CloudSigmaMockHttp.type = 'WITH_RESOURCES'
resource_uuids = ['1']
tag = self.driver.ex_create_tag(name='test tag 3',
resource_uuids=resource_uuids)
self.assertEqual(tag.name, 'test tag 3')
self.assertEqual(tag.resources, resource_uuids)
def test_ex_tag_resource(self):
node = self.driver.list_nodes()[0]
tag = self.driver.ex_list_tags()[0]
updated_tag = self.driver.ex_tag_resource(resource=node, tag=tag)
self.assertEqual(updated_tag.name, 'test tag 3')
def test_ex_tag_resources(self):
nodes = self.driver.list_nodes()
tag = self.driver.ex_list_tags()[0]
updated_tag = self.driver.ex_tag_resources(resources=nodes, tag=tag)
self.assertEqual(updated_tag.name, 'test tag 3')
def test_ex_tag_resource_invalid_resource_object(self):
tag = self.driver.ex_list_tags()[0]
expected_msg = 'Resource doesn\'t have id attribute'
self.assertRaisesRegexp(ValueError, expected_msg,
self.driver.ex_tag_resource, tag=tag,
resource={})
def test_ex_delete_tag(self):
tag = self.driver.ex_list_tags()[0]
status = self.driver.ex_delete_tag(tag=tag)
self.assertTrue(status)
def test_ex_get_balance(self):
balance = self.driver.ex_get_balance()
self.assertEqual(balance['balance'], '10.00')
self.assertEqual(balance['currency'], 'USD')
def test_ex_get_pricing(self):
pricing = self.driver.ex_get_pricing()
self.assertTrue('current' in pricing)
self.assertTrue('next' in pricing)
self.assertTrue('objects' in pricing)
def test_ex_get_usage(self):
pricing = self.driver.ex_get_usage()
self.assertTrue('balance' in pricing)
self.assertTrue('usage' in pricing)
def test_ex_list_subscriptions(self):
subscriptions = self.driver.ex_list_subscriptions()
subscription = subscriptions[0]
self.assertEqual(len(subscriptions), 5)
self.assertEqual(subscription.id, '7272')
self.assertEqual(subscription.resource, 'vlan')
self.assertEqual(subscription.amount, 1)
self.assertEqual(subscription.period, '345 days, 0:00:00')
self.assertEqual(subscription.status, 'active')
self.assertEqual(subscription.price, '0E-20')
def test_ex_create_subscription(self):
CloudSigmaMockHttp.type = 'CREATE_SUBSCRIPTION'
subscription = self.driver.ex_create_subscription(amount=1,
period='1 month',
resource='vlan')
self.assertEqual(subscription.amount, 1)
self.assertEqual(subscription.period, '1 month')
self.assertEqual(subscription.resource, 'vlan')
self.assertEqual(subscription.price, '10.26666666666666666666666667')
self.assertEqual(subscription.auto_renew, False)
self.assertEqual(subscription.subscribed_object, '2494079f-8376-40bf-9b37-34d633b8a7b7')
def test_ex_list_subscriptions_status_filterting(self):
CloudSigmaMockHttp.type = 'STATUS_FILTER'
self.driver.ex_list_subscriptions(status='active')
def test_ex_list_subscriptions_resource_filterting(self):
CloudSigmaMockHttp.type = 'RESOURCE_FILTER'
resources = ['cpu', 'mem']
self.driver.ex_list_subscriptions(resources=resources)
def test_ex_toggle_subscription_auto_renew(self):
subscription = self.driver.ex_list_subscriptions()[0]
status = self.driver.ex_toggle_subscription_auto_renew(
subscription=subscription)
self.assertTrue(status)
def test_ex_list_capabilities(self):
capabilities = self.driver.ex_list_capabilities()
self.assertEqual(capabilities['servers']['cpu']['min'], 250)
def test_ex_list_servers_availability_groups(self):
groups = self.driver.ex_list_servers_availability_groups()
self.assertEqual(len(groups), 3)
self.assertEqual(len(groups[0]), 2)
self.assertEqual(len(groups[2]), 1)
def test_ex_list_drives_availability_groups(self):
groups = self.driver.ex_list_drives_availability_groups()
self.assertEqual(len(groups), 1)
self.assertEqual(len(groups[0]), 11)
def test_wait_for_drive_state_transition_timeout(self):
drive = self.driver.ex_list_user_drives()[0]
state = 'timeout'
expected_msg = 'Timed out while waiting for drive transition'
self.assertRaisesRegexp(Exception, expected_msg,
self.driver._wait_for_drive_state_transition,
drive=drive, state=state,
timeout=0.5)
def test_wait_for_drive_state_transition_success(self):
drive = self.driver.ex_list_user_drives()[0]
state = 'unmounted'
drive = self.driver._wait_for_drive_state_transition(drive=drive,
state=state,
timeout=0.5)
self.assertEqual(drive.status, state)
class CloudSigmaAPI20DirectTestCase(CloudSigmaAPI20BaseTestCase,
unittest.TestCase):
driver_klass = CloudSigma_2_0_NodeDriver
driver_args = ('foo', 'bar')
driver_kwargs = {}
class CloudSigmaAPI20IndirectTestCase(CloudSigmaAPI20BaseTestCase,
unittest.TestCase):
driver_klass = CloudSigmaNodeDriver
driver_args = ('foo', 'bar')
driver_kwargs = {'api_version': '2.0'}
class CloudSigmaMockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('cloudsigma_2_0')
def _api_2_0_servers_detail_INVALID_CREDS(self, method, url, body, headers):
body = self.fixtures.load('libdrives.json')
return (httplib.UNAUTHORIZED, body, {},
httplib.responses[httplib.UNAUTHORIZED])
def _api_2_0_libdrives(self, method, url, body, headers):
body = self.fixtures.load('libdrives.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_servers_detail(self, method, url, body, headers):
body = self.fixtures.load('servers_detail_mixed_state.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911(self, method, url, body, headers):
body = ''
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _api_2_0_servers(self, method, url, body, headers):
if method == 'POST':
# create_node
parsed = json.loads(body)
if 'vlan' in parsed['name']:
self.assertEqual(len(parsed['nics']), 2)
body = self.fixtures.load('servers_create_with_vlan.json')
else:
body = self.fixtures.load('servers_create.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_start(self, method, url, body, headers):
body = self.fixtures.load('start_success.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_AVOID_MODE_start(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'avoid': '1,2'})
body = self.fixtures.load('start_success.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STARTED_start(self, method, url, body, headers):
body = self.fixtures.load('start_already_started.json')
return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_stop(self, method, url, body, headers):
body = self.fixtures.load('stop_success.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_ALREADY_STOPPED_stop(self, method, url, body, headers):
body = self.fixtures.load('stop_already_stopped.json')
return (httplib.FORBIDDEN, body, {}, httplib.responses[httplib.FORBIDDEN])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_clone(self, method, url, body, headers):
body = self.fixtures.load('servers_clone.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_open_vnc(self, method, url, body, headers):
body = self.fixtures.load('servers_open_vnc.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_action_close_vnc(self, method, url, body, headers):
body = self.fixtures.load('servers_close_vnc.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_detail(self, method, url, body, headers):
body = self.fixtures.load('drives_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913(self, method, url, body, headers):
body = self.fixtures.load('drives_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809(self, method, url, body, headers):
body = self.fixtures.load('drives_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_CREATE(self, method, url, body, headers):
body = self.fixtures.load('drives_create.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_clone(self, method, url, body, headers):
body = self.fixtures.load('drives_clone.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_5236b9ee_f735_42fd_a236_17558f9e12d3_action_clone(self, method, url, body, headers):
body = self.fixtures.load('drives_clone.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_b02311e2_a83c_4c12_af10_b30d51c86913_action_resize(self, method, url, body, headers):
body = self.fixtures.load('drives_resize.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_drives_9d1d2cf3_08c1_462f_8485_f4b073560809_action_resize(self, method, url, body, headers):
body = self.fixtures.load('drives_resize.json')
return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED])
def _api_2_0_fwpolicies_detail(self, method, url, body, headers):
body = self.fixtures.load('fwpolicies_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_fwpolicies_CREATE_NO_RULES(self, method, url, body, headers):
body = self.fixtures.load('fwpolicies_create_no_rules.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_fwpolicies_CREATE_WITH_RULES(self, method, url, body, headers):
body = self.fixtures.load('fwpolicies_create_with_rules.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_servers_9de75ed6_fd33_45e2_963f_d405f31fd911_ATTACH_POLICY(self, method, url, body, headers):
body = self.fixtures.load('servers_attach_policy.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_fwpolicies_0e339282_0cb5_41ac_a9db_727fb62ff2dc(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _api_2_0_tags_detail(self, method, url, body, headers):
body = self.fixtures.load('tags_detail.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_tags(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('tags_create.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_tags_WITH_RESOURCES(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('tags_create_with_resources.json')
return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED])
def _api_2_0_tags_a010ec41_2ead_4630_a1d0_237fa77e4d4d(self, method, url, body, headers):
if method == 'GET':
# ex_get_tag
body = self.fixtures.load('tags_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'PUT':
# ex_tag_resource
body = self.fixtures.load('tags_update.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'DELETE':
# ex_delete_tag
body = ''
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _api_2_0_balance(self, method, url, body, headers):
body = self.fixtures.load('balance.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_pricing(self, method, url, body, headers):
body = self.fixtures.load('pricing.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_currentusage(self, method, url, body, headers):
body = self.fixtures.load('currentusage.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions(self, method, url, body, headers):
body = self.fixtures.load('subscriptions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_STATUS_FILTER(self, method, url, body, headers):
self.assertUrlContainsQueryParams(url, {'status': 'active'})
body = self.fixtures.load('subscriptions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_RESOURCE_FILTER(self, method, url, body, headers):
expected_params = {'resource': 'cpu,mem', 'status': 'all'}
self.assertUrlContainsQueryParams(url, expected_params)
body = self.fixtures.load('subscriptions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_7272_action_auto_renew(self, method, url, body, headers):
body = ''
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_subscriptions_CREATE_SUBSCRIPTION(self, method, url, body, headers):
body = self.fixtures.load('create_subscription.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_capabilities(self, method, url, body, headers):
body = self.fixtures.load('capabilities.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_servers_availability_groups(self, method, url, body, headers):
body = self.fixtures.load('servers_avail_groups.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_2_0_drives_availability_groups(self, method, url, body, headers):
body = self.fixtures.load('drives_avail_groups.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
johnkeepmoving/oss-ftp | python27/win32/Lib/idlelib/rpc.py | 65 | 20225 | """RPC Implemention, originally written for the Python Idle IDE
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| SocketServer.BaseRequestHandler | | SocketIO |
+---------------------------------+ +-------------+
^ | register() |
| | unregister()|
| +-------------+
| ^ ^
| | |
| + -------------------+ |
| | |
+-------------------------+ +-----------------+
| RPCHandler | | RPCClient |
| [attribute of RPCServer]| | |
+-------------------------+ +-----------------+
The RPCServer handler class is expected to provide register/unregister methods.
RPCHandler inherits the mix-in class SocketIO, which provides these methods.
See the Idle run.main() docstring for further information on how this was
accomplished in Idle.
"""
import sys
import os
import socket
import select
import SocketServer
import struct
import cPickle as pickle
import threading
import Queue
import traceback
import copy_reg
import types
import marshal
def unpickle_code(ms):
co = marshal.loads(ms)
assert isinstance(co, types.CodeType)
return co
def pickle_code(co):
assert isinstance(co, types.CodeType)
ms = marshal.dumps(co)
return unpickle_code, (ms,)
# XXX KBK 24Aug02 function pickling capability not used in Idle
# def unpickle_function(ms):
# return ms
# def pickle_function(fn):
# assert isinstance(fn, type.FunctionType)
# return repr(fn)
copy_reg.pickle(types.CodeType, pickle_code, unpickle_code)
# copy_reg.pickle(types.FunctionType, pickle_function, unpickle_function)
BUFSIZE = 8*1024
LOCALHOST = '127.0.0.1'
class RPCServer(SocketServer.TCPServer):
def __init__(self, addr, handlerclass=None):
if handlerclass is None:
handlerclass = RPCHandler
SocketServer.TCPServer.__init__(self, addr, handlerclass)
def server_bind(self):
"Override TCPServer method, no bind() phase for connecting entity"
pass
def server_activate(self):
"""Override TCPServer method, connect() instead of listen()
Due to the reversed connection, self.server_address is actually the
address of the Idle Client to which we are connecting.
"""
self.socket.connect(self.server_address)
def get_request(self):
"Override TCPServer method, return already connected socket"
return self.socket, self.server_address
def handle_error(self, request, client_address):
"""Override TCPServer method
Error message goes to __stderr__. No error message if exiting
normally or socket raised EOF. Other exceptions not handled in
server code will cause os._exit.
"""
try:
raise
except SystemExit:
raise
except:
erf = sys.__stderr__
print>>erf, '\n' + '-'*40
print>>erf, 'Unhandled server exception!'
print>>erf, 'Thread: %s' % threading.currentThread().getName()
print>>erf, 'Client Address: ', client_address
print>>erf, 'Request: ', repr(request)
traceback.print_exc(file=erf)
print>>erf, '\n*** Unrecoverable, server exiting!'
print>>erf, '-'*40
os._exit(0)
#----------------- end class RPCServer --------------------
objecttable = {}
request_queue = Queue.Queue(0)
response_queue = Queue.Queue(0)
class SocketIO(object):
nextseq = 0
def __init__(self, sock, objtable=None, debugging=None):
self.sockthread = threading.currentThread()
if debugging is not None:
self.debugging = debugging
self.sock = sock
if objtable is None:
objtable = objecttable
self.objtable = objtable
self.responses = {}
self.cvars = {}
def close(self):
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
def exithook(self):
"override for specific exit action"
os._exit(0)
def debug(self, *args):
if not self.debugging:
return
s = self.location + " " + str(threading.currentThread().getName())
for a in args:
s = s + " " + str(a)
print>>sys.__stderr__, s
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug("localcall:", request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ("ERROR", "Bad request format")
if oid not in self.objtable:
return ("ERROR", "Unknown object id: %r" % (oid,))
obj = self.objtable[oid]
if methodname == "__methods__":
methods = {}
_getmethods(obj, methods)
return ("OK", methods)
if methodname == "__attributes__":
attributes = {}
_getattributes(obj, attributes)
return ("OK", attributes)
if not hasattr(obj, methodname):
return ("ERROR", "Unsupported method name: %r" % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ("OK", ret)
elif how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return("QUEUED", None)
else:
return ("ERROR", "Unsupported message type: %s" % how)
except SystemExit:
raise
except socket.error:
raise
except:
msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
" Object: %s \n Method: %s \n Args: %s\n"
print>>sys.__stderr__, msg % (oid, method, args)
traceback.print_exc(file=sys.__stderr__)
return ("EXCEPTION", None)
def remotecall(self, oid, methodname, args, kwargs):
self.debug("remotecall:asynccall: ", oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug("remotequeue:asyncqueue: ", oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = ("CALL", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = ("QUEUE", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.currentThread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncreturn(self, seq):
self.debug("asyncreturn:%d:call getresponse(): " % seq)
response = self.getresponse(seq, wait=0.05)
self.debug(("asyncreturn:%d:response: " % seq), response)
return self.decoderesponse(response)
def decoderesponse(self, response):
how, what = response
if how == "OK":
return what
if how == "QUEUED":
return None
if how == "EXCEPTION":
self.debug("decoderesponse: EXCEPTION")
return None
if how == "EOF":
self.debug("decoderesponse: EOF")
self.decode_interrupthook()
return None
if how == "ERROR":
self.debug("decoderesponse: Internal ERROR:", what)
raise RuntimeError, what
raise SystemError, (how, what)
def decode_interrupthook(self):
""
raise EOFError
def mainloop(self):
"""Listen on socket until I/O not ready or EOF
pollresponse() will loop looking for seq number None, which
never comes, and exit on EOFError.
"""
try:
self.getresponse(myseq=None, wait=0.05)
except EOFError:
self.debug("mainloop:return")
return
def getresponse(self, myseq, wait):
response = self._getresponse(myseq, wait)
if response is not None:
how, what = response
if how == "OK":
response = how, self._proxify(what)
return response
def _proxify(self, obj):
if isinstance(obj, RemoteProxy):
return RPCProxy(self, obj.oid)
if isinstance(obj, types.ListType):
return map(self._proxify, obj)
# XXX Check for other types -- not currently needed
return obj
def _getresponse(self, myseq, wait):
self.debug("_getresponse:myseq:", myseq)
if threading.currentThread() is self.sockthread:
# this thread does all reading of requests or responses
while 1:
response = self.pollresponse(myseq, wait)
if response is not None:
return response
else:
# wait for notification from socket handling thread
cvar = self.cvars[myseq]
cvar.acquire()
while myseq not in self.responses:
cvar.wait()
response = self.responses[myseq]
self.debug("_getresponse:%s: thread woke up: response: %s" %
(myseq, response))
del self.responses[myseq]
del self.cvars[myseq]
cvar.release()
return response
def newseq(self):
self.nextseq = seq = self.nextseq + 2
return seq
def putmessage(self, message):
self.debug("putmessage:%d:" % message[0])
try:
s = pickle.dumps(message)
except pickle.PicklingError:
print >>sys.__stderr__, "Cannot pickle:", repr(message)
raise
s = struct.pack("<i", len(s)) + s
while len(s) > 0:
try:
r, w, x = select.select([], [self.sock], [])
n = self.sock.send(s[:BUFSIZE])
except (AttributeError, TypeError):
raise IOError, "socket no longer exists"
except socket.error:
raise
else:
s = s[n:]
buffer = ""
bufneed = 4
bufstate = 0 # meaning: 0 => reading count; 1 => reading data
def pollpacket(self, wait):
self._stage0()
if len(self.buffer) < self.bufneed:
r, w, x = select.select([self.sock.fileno()], [], [], wait)
if len(r) == 0:
return None
try:
s = self.sock.recv(BUFSIZE)
except socket.error:
raise EOFError
if len(s) == 0:
raise EOFError
self.buffer += s
self._stage0()
return self._stage1()
def _stage0(self):
if self.bufstate == 0 and len(self.buffer) >= 4:
s = self.buffer[:4]
self.buffer = self.buffer[4:]
self.bufneed = struct.unpack("<i", s)[0]
self.bufstate = 1
def _stage1(self):
if self.bufstate == 1 and len(self.buffer) >= self.bufneed:
packet = self.buffer[:self.bufneed]
self.buffer = self.buffer[self.bufneed:]
self.bufneed = 4
self.bufstate = 0
return packet
def pollmessage(self, wait):
packet = self.pollpacket(wait)
if packet is None:
return None
try:
message = pickle.loads(packet)
except pickle.UnpicklingError:
print >>sys.__stderr__, "-----------------------"
print >>sys.__stderr__, "cannot unpickle packet:", repr(packet)
traceback.print_stack(file=sys.__stderr__)
print >>sys.__stderr__, "-----------------------"
raise
return message
def pollresponse(self, myseq, wait):
"""Handle messages received on the socket.
Some messages received may be asynchronous 'call' or 'queue' requests,
and some may be responses for other threads.
'call' requests are passed to self.localcall() with the expectation of
immediate execution, during which time the socket is not serviced.
'queue' requests are used for tasks (which may block or hang) to be
processed in a different thread. These requests are fed into
request_queue by self.localcall(). Responses to queued requests are
taken from response_queue and sent across the link with the associated
sequence numbers. Messages in the queues are (sequence_number,
request/response) tuples and code using this module removing messages
from the request_queue is responsible for returning the correct
sequence number in the response_queue.
pollresponse() will loop until a response message with the myseq
sequence number is received, and will save other responses in
self.responses and notify the owning thread.
"""
while 1:
# send queued response if there is one available
try:
qmsg = response_queue.get(0)
except Queue.Empty:
pass
else:
seq, response = qmsg
message = (seq, ('OK', response))
self.putmessage(message)
# poll for message on link
try:
message = self.pollmessage(wait)
if message is None: # socket not ready
return None
except EOFError:
self.handle_EOF()
return None
except AttributeError:
return None
seq, resq = message
how = resq[0]
self.debug("pollresponse:%d:myseq:%s" % (seq, myseq))
# process or queue a request
if how in ("CALL", "QUEUE"):
self.debug("pollresponse:%d:localcall:call:" % seq)
response = self.localcall(seq, resq)
self.debug("pollresponse:%d:localcall:response:%s"
% (seq, response))
if how == "CALL":
self.putmessage((seq, response))
elif how == "QUEUE":
# don't acknowledge the 'queue' request!
pass
continue
# return if completed message transaction
elif seq == myseq:
return resq
# must be a response for a different thread:
else:
cv = self.cvars.get(seq, None)
# response involving unknown sequence number is discarded,
# probably intended for prior incarnation of server
if cv is not None:
cv.acquire()
self.responses[seq] = resq
cv.notify()
cv.release()
continue
def handle_EOF(self):
"action taken upon link being closed by peer"
self.EOFhook()
self.debug("handle_EOF")
for key in self.cvars:
cv = self.cvars[key]
cv.acquire()
self.responses[key] = ('EOF', None)
cv.notify()
cv.release()
# call our (possibly overridden) exit function
self.exithook()
def EOFhook(self):
"Classes using rpc client/server can override to augment EOF action"
pass
#----------------- end class SocketIO --------------------
class RemoteObject(object):
# Token mix-in class
pass
def remoteref(obj):
oid = id(obj)
objecttable[oid] = obj
return RemoteProxy(oid)
class RemoteProxy(object):
def __init__(self, oid):
self.oid = oid
class RPCHandler(SocketServer.BaseRequestHandler, SocketIO):
debugging = False
location = "#S" # Server
def __init__(self, sock, addr, svr):
svr.current_handler = self ## cgt xxx
SocketIO.__init__(self, sock)
SocketServer.BaseRequestHandler.__init__(self, sock, addr, svr)
def handle(self):
"handle() method required by SocketServer"
self.mainloop()
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCClient(SocketIO):
debugging = False
location = "#C" # Client
nextseq = 1 # Requests coming from the client are odd numbered
def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM):
self.listening_sock = socket.socket(family, type)
self.listening_sock.bind(address)
self.listening_sock.listen(1)
def accept(self):
working_sock, address = self.listening_sock.accept()
if self.debugging:
print>>sys.__stderr__, "****** Connection request from ", address
if address[0] == LOCALHOST:
SocketIO.__init__(self, working_sock)
else:
print>>sys.__stderr__, "** Invalid host: ", address
raise socket.error
def get_remote_proxy(self, oid):
return RPCProxy(self, oid)
class RPCProxy(object):
__methods = None
__attributes = None
def __init__(self, sockio, oid):
self.sockio = sockio
self.oid = oid
def __getattr__(self, name):
if self.__methods is None:
self.__getmethods()
if self.__methods.get(name):
return MethodProxy(self.sockio, self.oid, name)
if self.__attributes is None:
self.__getattributes()
if name in self.__attributes:
value = self.sockio.remotecall(self.oid, '__getattribute__',
(name,), {})
return value
else:
raise AttributeError, name
def __getattributes(self):
self.__attributes = self.sockio.remotecall(self.oid,
"__attributes__", (), {})
def __getmethods(self):
self.__methods = self.sockio.remotecall(self.oid,
"__methods__", (), {})
def _getmethods(obj, methods):
# Helper to get a list of methods from an object
# Adds names to dictionary argument 'methods'
for name in dir(obj):
attr = getattr(obj, name)
if hasattr(attr, '__call__'):
methods[name] = 1
if type(obj) == types.InstanceType:
_getmethods(obj.__class__, methods)
if type(obj) == types.ClassType:
for super in obj.__bases__:
_getmethods(super, methods)
def _getattributes(obj, attributes):
for name in dir(obj):
attr = getattr(obj, name)
if not hasattr(attr, '__call__'):
attributes[name] = 1
class MethodProxy(object):
def __init__(self, sockio, oid, name):
self.sockio = sockio
self.oid = oid
self.name = name
def __call__(self, *args, **kwargs):
value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
return value
# XXX KBK 09Sep03 We need a proper unit test for this module. Previously
# existing test code was removed at Rev 1.27 (r34098).
| mit |
sfrenza/test-for-bot | venv/Lib/site-packages/nltk/sem/cooper_storage.py | 7 | 4067 | # Natural Language Toolkit: Cooper storage for Quantifier Ambiguity
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
from nltk.sem.logic import LambdaExpression, ApplicationExpression, Variable
from nltk.parse import load_parser
from nltk.parse.featurechart import InstantiateVarsChart
class CooperStore(object):
"""
A container for handling quantifier ambiguity via Cooper storage.
"""
def __init__(self, featstruct):
"""
:param featstruct: The value of the ``sem`` node in a tree from
``parse_with_bindops()``
:type featstruct: FeatStruct (with features ``core`` and ``store``)
"""
self.featstruct = featstruct
self.readings = []
try:
self.core = featstruct['CORE']
self.store = featstruct['STORE']
except KeyError:
print("%s is not a Cooper storage structure" % featstruct)
def _permute(self, lst):
"""
:return: An iterator over the permutations of the input list
:type lst: list
:rtype: iter
"""
remove = lambda lst0, index: lst0[:index] + lst0[index+1:]
if lst:
for index, x in enumerate(lst):
for y in self._permute(remove(lst, index)):
yield (x,)+y
else: yield ()
def s_retrieve(self, trace=False):
"""
Carry out S-Retrieval of binding operators in store. If hack=True,
serialize the bindop and core as strings and reparse. Ugh.
Each permutation of the store (i.e. list of binding operators) is
taken to be a possible scoping of quantifiers. We iterate through the
binding operators in each permutation, and successively apply them to
the current term, starting with the core semantic representation,
working from the inside out.
Binding operators are of the form::
bo(\P.all x.(man(x) -> P(x)),z1)
"""
for perm, store_perm in enumerate(self._permute(self.store)):
if trace:
print("Permutation %s" % (perm+1))
term = self.core
for bindop in store_perm:
# we just want the arguments that are wrapped by the 'bo' predicate
quant, varex = tuple(bindop.args)
# use var to make an abstraction over the current term and then
# apply the quantifier to it
term = ApplicationExpression(quant, LambdaExpression(varex.variable, term))
if trace:
print(" ", term)
term = term.simplify()
self.readings.append(term)
def parse_with_bindops(sentence, grammar=None, trace=0):
"""
Use a grammar with Binding Operators to parse a sentence.
"""
if not grammar:
grammar = 'grammars/book_grammars/storage.fcfg'
parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart)
# Parse the sentence.
tokens = sentence.split()
return list(parser.parse(tokens))
def demo():
from nltk.sem import cooper_storage as cs
sentence = "every girl chases a dog"
#sentence = "a man gives a bone to every dog"
print()
print("Analyis of sentence '%s'" % sentence)
print("=" * 50)
trees = cs.parse_with_bindops(sentence, trace=0)
for tree in trees:
semrep = cs.CooperStore(tree.label()['SEM'])
print()
print("Binding operators:")
print("-" * 15)
for s in semrep.store:
print(s)
print()
print("Core:")
print("-" * 15)
print(semrep.core)
print()
print("S-Retrieval:")
print("-" * 15)
semrep.s_retrieve(trace=True)
print("Readings:")
print("-" * 15)
for i, reading in enumerate(semrep.readings):
print("%s: %s" % (i+1, reading))
if __name__ == '__main__':
demo()
| mit |
sanjeevtripurari/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/yaml3/cyaml.py | 274 | 3294 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from .constructor import *
from .serializer import *
from .representer import *
from .resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| apache-2.0 |
clar/gyp | test/errors/gyptest-errors.py | 13 | 2537 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that two targets with the same name generates an error.
"""
import os
import sys
import TestGyp
import TestCmd
# TODO(sbc): Remove the use of match_re below, done because scons
# error messages were not consistent with other generators.
# Also remove input.py:generator_wants_absolute_build_file_paths.
test = TestGyp.TestGyp()
stderr = ('gyp: Duplicate target definitions for '
'.*duplicate_targets.gyp:foo#target\n')
test.run_gyp('duplicate_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ('.*: Unable to find targets in build file .*missing_targets.gyp.*')
test.run_gyp('missing_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = ('gyp: rule bar exists in duplicate, target '
'.*duplicate_rule.gyp:foo#target\n')
test.run_gyp('duplicate_rule.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ("gyp: Key 'targets' repeated at level 1 with key path '' while "
"reading .*duplicate_node.gyp.*")
test.run_gyp('duplicate_node.gyp', '--check', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = (".*target0.*target1.*target2.*target0.*")
test.run_gyp('dependency_cycle.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = (".*file_cycle0.*file_cycle1.*file_cycle0.*")
test.run_gyp('file_cycle0.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', status=1, stderr=stderr)
# Check if '--no-duplicate-basename-check' works.
if ((test.format == 'make' and sys.platform == 'darwin') or
(test.format == 'msvs' and
int(os.environ.get('GYP_MSVS_VERSION', 2010)) < 2010)):
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check',
status=1, stderr=stderr)
else:
test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check')
stderr = ("gyp: Dependency '.*missing_dep.gyp:missing.gyp#target' not found "
"while trying to load target .*missing_dep.gyp:foo#target\n")
test.run_gyp('missing_dep.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
test.pass_test()
| bsd-3-clause |
nikste/tensorflow | tensorflow/contrib/learn/python/learn/datasets/synthetic.py | 120 | 6827 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synthetic dataset generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets.base import Dataset
def circles(n_samples=100, noise=None, seed=None, factor=0.8, n_classes=2, *args, **kwargs):
"""Create circles separated by some value
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
factor: float, size factor of the inner circles with respect to the outer ones
n_classes: int, number of classes to generate
Returns:
Shuffled features and labels for 'circles' synthetic dataset of type `base.Dataset`
Note:
The multi-class support might not work as expected if `noise` is enabled
TODO:
- Generation of unbalanced data
Credit goes to (under BSD 3 clause):
B. Thirion,
G. Varoquaux,
A. Gramfort,
V. Michel,
O. Grisel,
G. Louppe,
J. Nothman
"""
if seed is not None:
np.random.seed(seed)
# Algo: 1) Generate initial circle, 2) For ever class generate a smaller radius circle
linspace = np.linspace(0, 2*np.pi, n_samples // n_classes)
circ_x = np.empty(0, dtype=np.int32)
circ_y = np.empty(0, dtype=np.int32)
base_cos = np.cos(linspace)
base_sin = np.sin(linspace)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
circ_x = np.append(circ_x, base_cos)
circ_y = np.append(circ_y, base_sin)
base_cos *= factor
base_sin *= factor
y = np.append(y, label*np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
circ_x = np.append(circ_x, np.cos(np.random.rand(extras)*2*np.pi))
circ_y = np.append(circ_y, np.sin(np.random.rand(extras)*2*np.pi))
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((circ_x, circ_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
def spirals(n_samples=100, noise=None, seed=None,
mode = 'archimedes',
n_loops = 2,
*args, **kwargs):
"""Create spirals
Currently only binary classification is supported for spiral generation
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
n_loops: int, number of spiral loops, doesn't play well with 'bernoulli'
mode: str, how the spiral should be generated. Current implementations:
'archimedes': a spiral with equal distances between branches
'bernoulli': logarithmic spiral with branch distances increasing
'fermat': a spiral with branch distances decreasing (sqrt)
Returns:
Shuffled features and labels for 'spirals' synthetic dataset of type `base.Dataset`
Raises:
ValueError: If the generation `mode` is not valid
TODO:
- Generation of unbalanced data
"""
n_classes = 2 # I am not sure how to make it multiclass
_modes = {
'archimedes': _archimedes_spiral,
'bernoulli': _bernoulli_spiral,
'fermat': _fermat_spiral
}
if mode is None or mode not in _modes:
raise ValueError("Cannot generate spiral with mode %s"%mode)
if seed is not None:
np.random.seed(seed)
linspace = np.linspace(0, 2*n_loops*np.pi, n_samples // n_classes)
spir_x = np.empty(0, dtype=np.int32)
spir_y = np.empty(0, dtype=np.int32)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
base_cos, base_sin = _modes[mode](linspace, label*np.pi, *args, **kwargs)
spir_x = np.append(spir_x, base_cos)
spir_y = np.append(spir_y, base_sin)
y = np.append(y, label*np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
if extras > 0:
x_exrta, y_extra = _modes[mode](np.random.rand(extras)*2*np.pi, *args, **kwargs)
spir_x = np.append(spir_x, x_extra)
spir_y = np.append(spir_y, y_extra)
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((spir_x, spir_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
def _archimedes_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Archimedes spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = theta*np.cos(theta + theta_offset), theta*np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale*theta)*np.cos(theta + theta_offset), np.exp(exp_scale*theta)*np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _fermat_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Parabolic (Fermat's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = np.sqrt(theta)*np.cos(theta + theta_offset), np.sqrt(theta)*np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
| apache-2.0 |
rauburtin/mitmproxy | libmproxy/console/window.py | 26 | 3189 | import urwid
from . import signals
class Window(urwid.Frame):
def __init__(self, master, body, header, footer, helpctx):
urwid.Frame.__init__(
self,
urwid.AttrWrap(body, "background"),
header = urwid.AttrWrap(header, "background") if header else None,
footer = urwid.AttrWrap(footer, "background") if footer else None
)
self.master = master
self.helpctx = helpctx
signals.focus.connect(self.sig_focus)
def sig_focus(self, sender, section):
self.focus_position = section
def mouse_event(self, *args, **kwargs):
# args: (size, event, button, col, row)
k = super(self.__class__, self).mouse_event(*args, **kwargs)
if not k:
if args[1] == "mouse drag":
signals.status_message.send(
message = "Hold down shift, alt or ctrl to select text.",
expire = 1
)
elif args[1] == "mouse press" and args[2] == 4:
self.keypress(args[0], "up")
elif args[1] == "mouse press" and args[2] == 5:
self.keypress(args[0], "down")
else:
return False
return True
def keypress(self, size, k):
k = super(self.__class__, self).keypress(size, k)
if k == "?":
self.master.view_help(self.helpctx)
elif k == "c":
if not self.master.client_playback:
signals.status_prompt_path.send(
self,
prompt = "Client replay",
callback = self.master.client_playback_path
)
else:
signals.status_prompt_onekey.send(
self,
prompt = "Stop current client replay?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.master.stop_client_playback_prompt,
)
elif k == "i":
signals.status_prompt.send(
self,
prompt = "Intercept filter",
text = self.master.state.intercept_txt,
callback = self.master.set_intercept
)
elif k == "o":
self.master.view_options()
elif k == "Q":
raise urwid.ExitMainLoop
elif k == "q":
signals.pop_view_state.send(self)
elif k == "S":
if not self.master.server_playback:
signals.status_prompt_path.send(
self,
prompt = "Server replay path",
callback = self.master.server_playback_path
)
else:
signals.status_prompt_onekey.send(
self,
prompt = "Stop current server replay?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.master.stop_server_playback_prompt,
)
else:
return k
| mit |
landism/pants | src/python/pants/binaries/binary_util.py | 7 | 9303 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import posixpath
from contextlib import contextmanager
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.net.http.fetcher import Fetcher
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_file
from pants.util.dirutil import chmod_plus_x, safe_delete, safe_open
from pants.util.osutil import get_os_id
_DEFAULT_PATH_BY_ID = {
('linux', 'x86_64'): ('linux', 'x86_64'),
('linux', 'amd64'): ('linux', 'x86_64'),
('linux', 'i386'): ('linux', 'i386'),
('linux', 'i686'): ('linux', 'i386'),
('darwin', '9'): ('mac', '10.5'),
('darwin', '10'): ('mac', '10.6'),
('darwin', '11'): ('mac', '10.7'),
('darwin', '12'): ('mac', '10.8'),
('darwin', '13'): ('mac', '10.9'),
('darwin', '14'): ('mac', '10.10'),
('darwin', '15'): ('mac', '10.11'),
('darwin', '16'): ('mac', '10.12'),
}
logger = logging.getLogger(__name__)
class BinaryUtil(object):
"""Wraps utility methods for finding binary executables.
:API: public
"""
class Factory(Subsystem):
"""
:API: public
"""
options_scope = 'binaries'
@classmethod
def register_options(cls, register):
register('--baseurls', type=list, advanced=True,
default=['https://dl.bintray.com/pantsbuild/bin/build-support'],
help='List of urls from which binary tools are downloaded. Urls are searched in '
'order until the requested path is found.')
register('--fetch-timeout-secs', type=int, default=30, advanced=True,
help='Timeout in seconds for url reads when fetching binary tools from the '
'repos specified by --baseurls')
register('--path-by-id', type=dict, advanced=True,
help='Maps output of uname for a machine to a binary search path. e.g. '
'{ ("darwin", "15"): ["mac", "10.11"]), ("linux", "arm32"): ["linux", "arm32"] }')
@classmethod
def create(cls):
"""
:API: public
"""
# NB: create is a class method to ~force binary fetch location to be global.
options = cls.global_instance().get_options()
return BinaryUtil(options.baseurls, options.fetch_timeout_secs, options.pants_bootstrapdir,
options.path_by_id)
class MissingMachineInfo(TaskError):
"""Indicates that pants was unable to map this machine's OS to a binary path prefix."""
pass
class BinaryNotFound(TaskError):
def __init__(self, binary, accumulated_errors):
super(BinaryUtil.BinaryNotFound, self).__init__(
'Failed to fetch binary {binary} from any source: ({sources})'
.format(binary=binary, sources=', '.join(accumulated_errors)))
class NoBaseUrlsError(TaskError):
"""Indicates that no urls were specified in pants.ini."""
pass
def _select_binary_base_path(self, supportdir, version, name, uname_func=None):
"""Calculate the base path.
Exposed for associated unit tests.
:param supportdir: the path used to make a path under --pants_bootstrapdir.
:param version: the version number of the tool used to make a path under --pants-bootstrapdir.
:param name: name of the binary to search for. (e.g 'protoc')
:param uname_func: method to use to emulate os.uname() in testing
:returns: Base path used to select the binary file.
"""
uname_func = uname_func or os.uname
os_id = get_os_id(uname_func=uname_func)
if not os_id:
raise self.MissingMachineInfo('Pants has no binaries for {}'.format(' '.join(uname_func())))
try:
middle_path = self._path_by_id[os_id]
except KeyError:
raise self.MissingMachineInfo('Update --binaries-path-by-id to find binaries for {!r}'
.format(os_id))
return os.path.join(supportdir, *(middle_path + (version, name)))
def __init__(self, baseurls, timeout_secs, bootstrapdir, path_by_id=None):
"""Creates a BinaryUtil with the given settings to define binary lookup behavior.
This constructor is primarily used for testing. Production code will usually initialize
an instance using the BinaryUtil.Factory.create() method.
:param baseurls: URL prefixes which represent repositories of binaries.
:type baseurls: list of string
:param int timeout_secs: Timeout in seconds for url reads.
:param string bootstrapdir: Directory to use for caching binaries. Uses this directory to
search for binaries in, or download binaries to if needed.
:param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool
directory naming
"""
self._baseurls = baseurls
self._timeout_secs = timeout_secs
self._pants_bootstrapdir = bootstrapdir
self._path_by_id = _DEFAULT_PATH_BY_ID.copy()
if path_by_id:
self._path_by_id.update((tuple(k), tuple(v)) for k, v in path_by_id.items())
@contextmanager
def _select_binary_stream(self, name, binary_path, fetcher=None):
"""Select a binary matching the current os and architecture.
:param string binary_path: The path to the binary to fetch.
:param fetcher: Optional argument used only for testing, to 'pretend' to open urls.
:returns: a 'stream' to download it from a support directory. The returned 'stream' is actually
a lambda function which returns the files binary contents.
:raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
and name could be found for the current platform.
"""
if not self._baseurls:
raise self.NoBaseUrlsError(
'No urls are defined for the --pants-support-baseurls option.')
downloaded_successfully = False
accumulated_errors = []
for baseurl in OrderedSet(self._baseurls): # De-dup URLS: we only want to try each URL once.
url = posixpath.join(baseurl, binary_path)
logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url))
try:
with temporary_file() as dest:
fetcher = fetcher or Fetcher(get_buildroot())
fetcher.download(url,
listener=Fetcher.ProgressListener(),
path_or_fd=dest,
timeout_secs=self._timeout_secs)
logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url))
downloaded_successfully = True
dest.seek(0)
yield lambda: dest.read()
break
except (IOError, Fetcher.Error, ValueError) as e:
accumulated_errors.append('Failed to fetch binary from {url}: {error}'
.format(url=url, error=e))
if not downloaded_successfully:
raise self.BinaryNotFound(binary_path, accumulated_errors)
def select_binary(self, supportdir, version, name):
"""Selects a binary matching the current os and architecture.
:param string supportdir: The path the `name` binaries are stored under.
:param string version: The version number of the binary to select.
:param string name: The name of the binary to fetch.
:raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
and name could be found for the current platform.
"""
# TODO(John Sirois): finish doc of the path structure expected under base_path.
binary_path = self._select_binary_base_path(supportdir, version, name)
return self._fetch_binary(name=name, binary_path=binary_path)
def select_script(self, supportdir, version, name):
"""Selects a platform-independent script.
:param string supportdir: The path the `name` scripts are stored under.
:param string version: The version number of the script to select.
:param string name: The name of the script to fetch.
:raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no script of the given version
and name could be found.
"""
binary_path = os.path.join(supportdir, version, name)
return self._fetch_binary(name=name, binary_path=binary_path)
def _fetch_binary(self, name, binary_path):
bootstrap_dir = os.path.realpath(os.path.expanduser(self._pants_bootstrapdir))
bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
if not os.path.exists(bootstrapped_binary_path):
downloadpath = bootstrapped_binary_path + '~'
try:
with self._select_binary_stream(name, binary_path) as stream:
with safe_open(downloadpath, 'wb') as bootstrapped_binary:
bootstrapped_binary.write(stream())
os.rename(downloadpath, bootstrapped_binary_path)
chmod_plus_x(bootstrapped_binary_path)
finally:
safe_delete(downloadpath)
logger.debug('Selected {binary} binary bootstrapped to: {path}'
.format(binary=name, path=bootstrapped_binary_path))
return bootstrapped_binary_path
| apache-2.0 |
trudikampfschaf/flask-microblog | flask/lib/python2.7/site-packages/migrate/tests/versioning/test_repository.py | 30 | 8285 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
from migrate import exceptions
from migrate.versioning.repository import *
from migrate.versioning.script import *
from nose.tools import raises
from migrate.tests import fixture
from datetime import datetime
class TestRepository(fixture.Pathed):
def test_create(self):
"""Repositories are created successfully"""
path = self.tmp_repos()
name = 'repository_name'
# Creating a repository that doesn't exist should succeed
repo = Repository.create(path, name)
config_path = repo.config.path
manage_path = os.path.join(repo.path, 'manage.py')
self.assert_(repo)
# Files should actually be created
self.assert_(os.path.exists(path))
self.assert_(os.path.exists(config_path))
self.assert_(os.path.exists(manage_path))
# Can't create it again: it already exists
self.assertRaises(exceptions.PathFoundError, Repository.create, path, name)
return path
def test_load(self):
"""We should be able to load information about an existing repository"""
# Create a repository to load
path = self.test_create()
repos = Repository(path)
self.assert_(repos)
self.assert_(repos.config)
self.assert_(repos.config.get('db_settings', 'version_table'))
# version_table's default isn't none
self.assertNotEquals(repos.config.get('db_settings', 'version_table'), 'None')
def test_load_notfound(self):
"""Nonexistant repositories shouldn't be loaded"""
path = self.tmp_repos()
self.assert_(not os.path.exists(path))
self.assertRaises(exceptions.InvalidRepositoryError, Repository, path)
def test_load_invalid(self):
"""Invalid repos shouldn't be loaded"""
# Here, invalid=empty directory. There may be other conditions too,
# but we shouldn't need to test all of them
path = self.tmp_repos()
os.mkdir(path)
self.assertRaises(exceptions.InvalidRepositoryError, Repository, path)
class TestVersionedRepository(fixture.Pathed):
"""Tests on an existing repository with a single python script"""
def setUp(self):
super(TestVersionedRepository, self).setUp()
Repository.clear()
self.path_repos = self.tmp_repos()
Repository.create(self.path_repos, 'repository_name')
def test_version(self):
"""We should correctly detect the version of a repository"""
repos = Repository(self.path_repos)
# Get latest version, or detect if a specified version exists
self.assertEquals(repos.latest, 0)
# repos.latest isn't an integer, but a VerNum
# (so we can't just assume the following tests are correct)
self.assert_(repos.latest >= 0)
self.assert_(repos.latest < 1)
# Create a script and test again
repos.create_script('')
self.assertEquals(repos.latest, 1)
self.assert_(repos.latest >= 0)
self.assert_(repos.latest >= 1)
self.assert_(repos.latest < 2)
# Create a new script and test again
repos.create_script('')
self.assertEquals(repos.latest, 2)
self.assert_(repos.latest >= 0)
self.assert_(repos.latest >= 1)
self.assert_(repos.latest >= 2)
self.assert_(repos.latest < 3)
def test_timestmap_numbering_version(self):
repos = Repository(self.path_repos)
repos.config.set('db_settings', 'use_timestamp_numbering', 'True')
# Get latest version, or detect if a specified version exists
self.assertEquals(repos.latest, 0)
# repos.latest isn't an integer, but a VerNum
# (so we can't just assume the following tests are correct)
self.assert_(repos.latest >= 0)
self.assert_(repos.latest < 1)
# Create a script and test again
now = int(datetime.utcnow().strftime('%Y%m%d%H%M%S'))
repos.create_script('')
print repos.latest
self.assertEquals(repos.latest, now)
def test_source(self):
"""Get a script object by version number and view its source"""
# Load repository and commit script
repo = Repository(self.path_repos)
repo.create_script('')
repo.create_script_sql('postgres', 'foo bar')
# Source is valid: script must have an upgrade function
# (not a very thorough test, but should be plenty)
source = repo.version(1).script().source()
self.assertTrue(source.find('def upgrade') >= 0)
import pprint; pprint.pprint(repo.version(2).sql)
source = repo.version(2).script('postgres', 'upgrade').source()
self.assertEqual(source.strip(), '')
def test_latestversion(self):
"""Repository.version() (no params) returns the latest version"""
repos = Repository(self.path_repos)
repos.create_script('')
self.assert_(repos.version(repos.latest) is repos.version())
self.assert_(repos.version() is not None)
def test_changeset(self):
"""Repositories can create changesets properly"""
# Create a nonzero-version repository of empty scripts
repos = Repository(self.path_repos)
for i in range(10):
repos.create_script('')
def check_changeset(params, length):
"""Creates and verifies a changeset"""
changeset = repos.changeset('postgres', *params)
self.assertEquals(len(changeset), length)
self.assertTrue(isinstance(changeset, Changeset))
uniq = list()
# Changesets are iterable
for version, change in changeset:
self.assert_(isinstance(change, BaseScript))
# Changes aren't identical
self.assert_(id(change) not in uniq)
uniq.append(id(change))
return changeset
# Upgrade to a specified version...
cs = check_changeset((0, 10), 10)
self.assertEquals(cs.keys().pop(0),0 ) # 0 -> 1: index is starting version
self.assertEquals(cs.keys().pop(), 9) # 9 -> 10: index is starting version
self.assertEquals(cs.start, 0) # starting version
self.assertEquals(cs.end, 10) # ending version
check_changeset((0, 1), 1)
check_changeset((0, 5), 5)
check_changeset((0, 0), 0)
check_changeset((5, 5), 0)
check_changeset((10, 10), 0)
check_changeset((5, 10), 5)
# Can't request a changeset of higher version than this repository
self.assertRaises(Exception, repos.changeset, 'postgres', 5, 11)
self.assertRaises(Exception, repos.changeset, 'postgres', -1, 5)
# Upgrade to the latest version...
cs = check_changeset((0,), 10)
self.assertEquals(cs.keys().pop(0), 0)
self.assertEquals(cs.keys().pop(), 9)
self.assertEquals(cs.start, 0)
self.assertEquals(cs.end, 10)
check_changeset((1,), 9)
check_changeset((5,), 5)
check_changeset((9,), 1)
check_changeset((10,), 0)
# run changes
cs.run('postgres', 'upgrade')
# Can't request a changeset of higher/lower version than this repository
self.assertRaises(Exception, repos.changeset, 'postgres', 11)
self.assertRaises(Exception, repos.changeset, 'postgres', -1)
# Downgrade
cs = check_changeset((10, 0),10)
self.assertEquals(cs.keys().pop(0), 10) # 10 -> 9
self.assertEquals(cs.keys().pop(), 1) # 1 -> 0
self.assertEquals(cs.start, 10)
self.assertEquals(cs.end, 0)
check_changeset((10, 5), 5)
check_changeset((5, 0), 5)
def test_many_versions(self):
"""Test what happens when lots of versions are created"""
repos = Repository(self.path_repos)
for i in range(1001):
repos.create_script('')
# since we normally create 3 digit ones, let's see if we blow up
self.assert_(os.path.exists('%s/versions/1000.py' % self.path_repos))
self.assert_(os.path.exists('%s/versions/1001.py' % self.path_repos))
# TODO: test manage file
# TODO: test changeset
| bsd-3-clause |
keithhendry/treadmill | treadmill/rest/api/cell.py | 3 | 3455 | """
Treadmill Cell REST api.
"""
import flask
import flask_restplus as restplus
from flask_restplus import fields
# Disable E0611: No 'name' in module
from treadmill import webutils # pylint: disable=E0611
# Old style classes, no init method.
#
# pylint: disable=W0232
def init(api, cors, impl):
"""Configures REST handlers for cell resource."""
namespace = webutils.namespace(
api, __name__, 'Cell REST operations'
)
master = api.model('Master', {
'hostname': fields.String(description='Hostname'),
'idx': fields.Integer(description='Index of master'),
'zk-followers-port': fields.Integer(description='ZK follower port'),
'zk-election-port': fields.Integer(description='ZK election port'),
'zk-jmx-port': fields.Integer(description='ZK JMX port'),
'zk-client-port': fields.Integer(description='ZK client port'),
})
partition = api.model('Partition', {
'partition': fields.String(description='Name'),
'cpu': fields.String(description='Total cpu capacity'),
'disk': fields.String(description='Total disk capacity'),
'memory': fields.String(description='Total memory capacity'),
'down-threshold': fields.String(description='Server down threshold'),
})
model = {
'_id': fields.String(description='Name'),
'username': fields.String(description='Treadmill User ID'),
'root': fields.String(description='Treadmill Root'),
'archive-server': fields.String(description='Archive Server'),
'archive-username': fields.String(description='Archive Username'),
'ssq-namespace': fields.String(description='SSQ Namespace'),
'location': fields.String(description='Location'),
'version': fields.String(description='Version'),
'masters': fields.List(fields.Nested(master)),
'partitions': fields.List(fields.Nested(partition)),
}
cell_model = api.model(
'Cell', model
)
@namespace.route('/')
class _CellList(restplus.Resource):
"""Treadmill Cell resource"""
@webutils.get_api(api, cors,
marshal=api.marshal_list_with,
resp_model=cell_model)
def get(self):
"""Returns list of configured cells."""
return impl.list()
@namespace.route('/<cell>')
@api.doc(params={'cell': 'Cell ID/name'})
class _CellResource(restplus.Resource):
"""Treadmill Cell resource."""
@webutils.get_api(api, cors,
marshal=api.marshal_with,
resp_model=cell_model)
def get(self, cell):
"""Return Treadmill cell configuration."""
return impl.get(cell)
@webutils.post_api(api, cors,
req_model=cell_model,
resp_model=cell_model)
def post(self, cell):
"""Creates Treadmill cell."""
return impl.create(cell, flask.request.json)
@webutils.put_api(api, cors,
req_model=cell_model,
resp_model=cell_model)
def put(self, cell):
"""Updates Treadmill cell configuration."""
return impl.update(cell, flask.request.json)
@webutils.delete_api(api, cors)
def delete(self, cell):
"""Deletes Treadmill cell."""
return impl.delete(cell)
| apache-2.0 |
oppo-source/Find7-4.3-kernel-source | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
schambers/civmarket | civmarket/lib/python2.7/site-packages/django/contrib/gis/gdal/tests/test_srs.py | 109 | 11159 | from django.contrib.gis.gdal import HAS_GDAL
from django.utils import unittest
from django.utils.unittest import skipUnless
if HAS_GDAL:
from django.contrib.gis.gdal import SpatialReference, CoordTransform, OGRException, SRSException
class TestSRS:
def __init__(self, wkt, **kwargs):
self.wkt = wkt
for key, value in kwargs.items():
setattr(self, key, value)
# Some Spatial Reference examples
srlist = (TestSRS('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
proj='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
epsg=4326, projected=False, geographic=True, local=False,
lin_name='unknown', ang_name='degree', lin_units=1.0, ang_units=0.0174532925199,
auth={'GEOGCS' : ('EPSG', '4326'), 'spheroid' : ('EPSG', '7030')},
attr=(('DATUM', 'WGS_1984'), (('SPHEROID', 1), '6378137'),('primem|authority', 'EPSG'),),
),
TestSRS('PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
proj=None, epsg=32140, projected=True, geographic=False, local=False,
lin_name='metre', ang_name='degree', lin_units=1.0, ang_units=0.0174532925199,
auth={'PROJCS' : ('EPSG', '32140'), 'spheroid' : ('EPSG', '7019'), 'unit' : ('EPSG', '9001'),},
attr=(('DATUM', 'North_American_Datum_1983'),(('SPHEROID', 2), '298.257222101'),('PROJECTION','Lambert_Conformal_Conic_2SP'),),
),
TestSRS('PROJCS["NAD_1983_StatePlane_Texas_South_Central_FIPS_4204_Feet",GEOGCS["GCS_North_American_1983",DATUM["North_American_Datum_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["False_Easting",1968500.0],PARAMETER["False_Northing",13123333.33333333],PARAMETER["Central_Meridian",-99.0],PARAMETER["Standard_Parallel_1",28.38333333333333],PARAMETER["Standard_Parallel_2",30.28333333333334],PARAMETER["Latitude_Of_Origin",27.83333333333333],UNIT["Foot_US",0.3048006096012192]]',
proj=None, epsg=None, projected=True, geographic=False, local=False,
lin_name='Foot_US', ang_name='Degree', lin_units=0.3048006096012192, ang_units=0.0174532925199,
auth={'PROJCS' : (None, None),},
attr=(('PROJCS|GeOgCs|spheroid', 'GRS_1980'),(('projcs', 9), 'UNIT'), (('projcs', 11), None),),
),
# This is really ESRI format, not WKT -- but the import should work the same
TestSRS('LOCAL_CS["Non-Earth (Meter)",LOCAL_DATUM["Local Datum",0],UNIT["Meter",1.0],AXIS["X",EAST],AXIS["Y",NORTH]]',
esri=True, proj=None, epsg=None, projected=False, geographic=False, local=True,
lin_name='Meter', ang_name='degree', lin_units=1.0, ang_units=0.0174532925199,
attr=(('LOCAL_DATUM', 'Local Datum'), ('unit', 'Meter')),
),
)
# Well-Known Names
well_known = (TestSRS('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]', wk='WGS84', name='WGS 84', attrs=(('GEOGCS|AUTHORITY', 1, '4326'), ('SPHEROID', 'WGS 84'))),
TestSRS('GEOGCS["WGS 72",DATUM["WGS_1972",SPHEROID["WGS 72",6378135,298.26,AUTHORITY["EPSG","7043"]],AUTHORITY["EPSG","6322"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4322"]]', wk='WGS72', name='WGS 72', attrs=(('GEOGCS|AUTHORITY', 1, '4322'), ('SPHEROID', 'WGS 72'))),
TestSRS('GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.9786982138982,AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4267"]]', wk='NAD27', name='NAD27', attrs=(('GEOGCS|AUTHORITY', 1, '4267'), ('SPHEROID', 'Clarke 1866'))),
TestSRS('GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]]', wk='NAD83', name='NAD83', attrs=(('GEOGCS|AUTHORITY', 1, '4269'), ('SPHEROID', 'GRS 1980'))),
TestSRS('PROJCS["NZGD49 / Karamea Circuit",GEOGCS["NZGD49",DATUM["New_Zealand_Geodetic_Datum_1949",SPHEROID["International 1924",6378388,297,AUTHORITY["EPSG","7022"]],TOWGS84[59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993],AUTHORITY["EPSG","6272"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4272"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",-41.28991152777778],PARAMETER["central_meridian",172.1090281944444],PARAMETER["scale_factor",1],PARAMETER["false_easting",300000],PARAMETER["false_northing",700000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","27216"]]', wk='EPSG:27216', name='NZGD49 / Karamea Circuit', attrs=(('PROJECTION','Transverse_Mercator'), ('SPHEROID', 'International 1924'))),
)
bad_srlist = ('Foobar', 'OOJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',)
@skipUnless(HAS_GDAL, "GDAL is required")
class SpatialRefTest(unittest.TestCase):
def test01_wkt(self):
"Testing initialization on valid OGC WKT."
for s in srlist:
srs = SpatialReference(s.wkt)
def test02_bad_wkt(self):
"Testing initialization on invalid WKT."
for bad in bad_srlist:
try:
srs = SpatialReference(bad)
srs.validate()
except (SRSException, OGRException):
pass
else:
self.fail('Should not have initialized on bad WKT "%s"!')
def test03_get_wkt(self):
"Testing getting the WKT."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.wkt, srs.wkt)
def test04_proj(self):
"Test PROJ.4 import and export."
for s in srlist:
if s.proj:
srs1 = SpatialReference(s.wkt)
srs2 = SpatialReference(s.proj)
self.assertEqual(srs1.proj, srs2.proj)
def test05_epsg(self):
"Test EPSG import."
for s in srlist:
if s.epsg:
srs1 = SpatialReference(s.wkt)
srs2 = SpatialReference(s.epsg)
srs3 = SpatialReference(str(s.epsg))
srs4 = SpatialReference('EPSG:%d' % s.epsg)
for srs in (srs1, srs2, srs3, srs4):
for attr, expected in s.attr:
self.assertEqual(expected, srs[attr])
def test07_boolean_props(self):
"Testing the boolean properties."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.projected, srs.projected)
self.assertEqual(s.geographic, srs.geographic)
def test08_angular_linear(self):
"Testing the linear and angular units routines."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.ang_name, srs.angular_name)
self.assertEqual(s.lin_name, srs.linear_name)
self.assertAlmostEqual(s.ang_units, srs.angular_units, 9)
self.assertAlmostEqual(s.lin_units, srs.linear_units, 9)
def test09_authority(self):
"Testing the authority name & code routines."
for s in srlist:
if hasattr(s, 'auth'):
srs = SpatialReference(s.wkt)
for target, tup in s.auth.items():
self.assertEqual(tup[0], srs.auth_name(target))
self.assertEqual(tup[1], srs.auth_code(target))
def test10_attributes(self):
"Testing the attribute retrieval routines."
for s in srlist:
srs = SpatialReference(s.wkt)
for tup in s.attr:
att = tup[0] # Attribute to test
exp = tup[1] # Expected result
self.assertEqual(exp, srs[att])
def test11_wellknown(self):
"Testing Well Known Names of Spatial References."
for s in well_known:
srs = SpatialReference(s.wk)
self.assertEqual(s.name, srs.name)
for tup in s.attrs:
if len(tup) == 2:
key = tup[0]
exp = tup[1]
elif len(tup) == 3:
key = tup[:2]
exp = tup[2]
self.assertEqual(srs[key], exp)
def test12_coordtransform(self):
"Testing initialization of a CoordTransform."
target = SpatialReference('WGS84')
for s in srlist:
if s.proj:
ct = CoordTransform(SpatialReference(s.wkt), target)
def test13_attr_value(self):
"Testing the attr_value() method."
s1 = SpatialReference('WGS84')
self.assertRaises(TypeError, s1.__getitem__, 0)
self.assertRaises(TypeError, s1.__getitem__, ('GEOGCS', 'foo'))
self.assertEqual('WGS 84', s1['GEOGCS'])
self.assertEqual('WGS_1984', s1['DATUM'])
self.assertEqual('EPSG', s1['AUTHORITY'])
self.assertEqual(4326, int(s1['AUTHORITY', 1]))
self.assertEqual(None, s1['FOOBAR'])
| apache-2.0 |
psanxiao/gtranslator | plugins/charmap/charmap/__init__.py | 3 | 4698 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Steve Frécinaux <steve@istique.net>
# 2010 Ignacio Casal Quinteiro <icq@gnome.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject, Gio, Pango, Gtk, Gtranslator, Gucharmap
from panel import CharmapPanel
import sys
import gettext
class CharmapPlugin(GObject.Object, Gtranslator.TabActivatable):
__gtype_name__ = "CharmapPlugin"
tab = GObject.property(type=Gtranslator.Tab)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
self.editor_settings = Gio.Settings.new("org.gnome.gtranslator.preferences.editor")
self.editor_settings.connect("changed::use-custom-font", self.font_changed)
self.editor_settings.connect("changed::editor-font", self.font_changed)
self.system_settings = Gio.Settings.new("org.gnome.desktop.interface")
self.system_settings.connect("changed::monospace-font-name", self.font_changed)
self.window = self.tab.get_toplevel()
self.create_charmap_panel()
self.tab.add_widget(self.panel, "GtrCharmapPanel", _("Character Map"),
"accessories-character-map", Gtranslator.TabPlacement.LEFT)
statusbar = self.window.get_statusbar()
self.context_id = statusbar.get_context_id("Character Description")
def do_deactivate(self):
self.tab.remove_widget(self.panel)
def do_update_state(self):
pass
def get_document_font(self):
if self.editor_settings.get_boolean("use-custom-font"):
font = self.editor_settings.get_string("editor-font")
else:
font = self.system_settings.get_string("monospace-font-name")
return font
def font_changed(self, settings=None, key=None):
font = self.get_document_font()
font_desc = Pango.font_description_from_string(font)
chartable = self.panel.get_chartable()
chartable.set_font_desc(font_desc)
def create_charmap_panel(self):
self.panel = CharmapPanel()
chartable = self.panel.get_chartable()
# Use the same font as the document
self.font_changed()
chartable.connect("notify::active-character", self.on_table_sync_active_char)
chartable.connect("focus-out-event", self.on_table_focus_out_event)
chartable.connect("status-message", self.on_table_status_message)
chartable.connect("activate", self.on_table_activate)
self.panel.show()
def on_table_sync_active_char(self, chartable, pspec):
uc = chartable.get_active_character()
text = "%s %s" % (uc, Gucharmap.get_unicode_name(uc))
a = Gucharmap.get_nameslist_equals(uc)
if a:
text += " = %s" % a[0]
for i in range(len (a) - 1):
text += "; %s" % a[i + 1]
a = Gucharmap.get_nameslist_stars(uc)
if a:
text += " \342\200\242 %s" % a[0]
for i in range(len (a) - 1):
text += "; %s" % a[i + 1]
self.on_table_status_message(chartable, text)
def on_table_focus_out_event(self, chartable, event):
self.on_table_status_message (chartable, None)
return False
def on_table_status_message(self, chartable, message):
statusbar = self.window.get_statusbar()
statusbar.pop(self.context_id)
if message:
statusbar.push(self.context_id, message)
def on_table_activate(self, chartable):
uc = chartable.get_active_character()
if not Gucharmap.unichar_validate(uc):
raise ValueError
view = self.window.get_active_view()
if not view or not view.get_editable():
return
document = view.get_buffer()
document.begin_user_action()
iters = document.get_selection_bounds()
if iters:
document.delete_interactive(iters[0], iters[1], view.get_editable())
document.insert_interactive_at_cursor(uc, -1, view.get_editable())
document.end_user_action()
# ex:et:ts=4:
| gpl-3.0 |
brinbois/Sick-Beard | sickbeard/clients/requests/packages/urllib3/exceptions.py | 245 | 2258 | # urllib3/exceptions.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(PoolError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
PoolError.__init__(self, pool, message)
self.url = url
class HostChangedError(PoolError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
PoolError.__init__(self, pool, message)
self.url = url
self.retries = retries
class TimeoutError(PoolError):
"Raised when a socket timeout occurs."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| gpl-3.0 |
lvdongr/spark | examples/src/main/python/mllib/k_means_example.py | 123 | 2028 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from numpy import array
from math import sqrt
# $example off$
from pyspark import SparkContext
# $example on$
from pyspark.mllib.clustering import KMeans, KMeansModel
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="KMeansExample") # SparkContext
# $example on$
# Load and parse the data
data = sc.textFile("data/mllib/kmeans_data.txt")
parsedData = data.map(lambda line: array([float(x) for x in line.split(' ')]))
# Build the model (cluster the data)
clusters = KMeans.train(parsedData, 2, maxIterations=10, initializationMode="random")
# Evaluate clustering by computing Within Set Sum of Squared Errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = parsedData.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Within Set Sum of Squared Error = " + str(WSSSE))
# Save and load model
clusters.save(sc, "target/org/apache/spark/PythonKMeansExample/KMeansModel")
sameModel = KMeansModel.load(sc, "target/org/apache/spark/PythonKMeansExample/KMeansModel")
# $example off$
sc.stop()
| apache-2.0 |
anand-c-goog/tensorflow | tensorflow/contrib/layers/python/layers/regularizers.py | 21 | 6968 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regularizers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = ['l1_regularizer',
'l2_regularizer',
'l1_l2_regularizer',
'sum_regularizer',
'apply_regularization']
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=name)
return l1
def l2_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l2(weights)` that applies L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l2(weights):
"""Applies l2 regularization to weights."""
with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=name)
return l2
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope)
def sum_regularizer(regularizer_list, scope=None):
"""Returns a function that applies the sum of multiple regularizers.
Args:
regularizer_list: A list of regularizers to apply.
scope: An optional scope name
Returns:
A function with signature `sum_reg(weights)` that applies the
sum of all the input regularizers.
"""
regularizer_list = [reg for reg in regularizer_list if reg is not None]
if not regularizer_list:
return None
def sum_reg(weights):
"""Applies the sum of all the input regularizers."""
with ops.name_scope(scope, 'sum_regularizer', [weights]) as name:
regularizer_tensors = [reg(weights) for reg in regularizer_list]
return math_ops.add_n(regularizer_tensors, name=name)
return sum_reg
def apply_regularization(regularizer, weights_list=None):
"""Returns the summed penalty by applying `regularizer` to the `weights_list`.
Adding a regularization penalty over the layer weights and embedding weights
can help prevent overfitting the training data. Regularization over layer
biases is less common/useful, but assuming proper data preprocessing/mean
subtraction, it usually shouldn't hurt much either.
Args:
regularizer: A function that takes a single `Tensor` argument and returns
a scalar `Tensor` output.
weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
Returns:
A scalar representing the overall regularization penalty.
Raises:
ValueError: If `regularizer` does not return a scalar output, or if we find
no weights.
"""
if not weights_list:
weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
if not weights_list:
raise ValueError('No weights to regularize.')
with ops.name_scope('get_regularization_penalty',
values=weights_list) as scope:
penalties = [regularizer(w) for w in weights_list]
penalties = [
p if p is not None else constant_op.constant(0.0) for p in penalties
]
for p in penalties:
if p.get_shape().ndims != 0:
raise ValueError('regularizer must return a scalar Tensor instead of a '
'Tensor with rank %d.' % p.get_shape().ndims)
summed_penalty = math_ops.add_n(penalties, name=scope)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
return summed_penalty
| apache-2.0 |
higgsmass/notify | docs/source/conf.py | 2 | 9919 | # -*- coding: utf-8 -*-
#
# pycookie documentation build configuration file, created by
# sphinx-quickstart on Sat May 28 22:40:15 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pycookie'
copyright = u'2016, Venkat Kaushik'
author = u'Venkat Kaushik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'pycookie v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycookiedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycookie.tex', u'pycookie Documentation',
u'Venkat Kaushik', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycookie', u'pycookie Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycookie', u'pycookie Documentation',
author, 'pycookie', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
| mit |
hofschroeer/gnuradio | gr-analog/python/analog/fm_demod.py | 4 | 4466 | #
# Copyright 2006,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from .fm_emph import fm_deemph
from math import pi
from . import analog_swig as analog
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float stream in the range of [-1.0, +1.0].
Args:
channel_rate: incoming sample rate of the FM baseband (integer)
deviation: maximum FM deviation (default = 5000) (float)
audio_decim: input to output decimation rate (integer)
audio_pass: audio low pass filter passband frequency (float)
audio_stop: audio low pass filter stop frequency (float)
gain: gain applied to audio output (default = 1.0) (float)
tau: deemphasis time constant (default = 75e-6), specify tau=0.0 to prevent deemphasis (float)
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate / (2*pi*deviation)
QUAD = analog.quadrature_demod_cf(k)
audio_taps = filter.optfir.low_pass(
gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60 # Stopband attenuation
)
LPF = filter.fir_filter_fff(audio_decim, audio_taps)
if tau is not None and tau > 0.0: # None should be deprecated someday
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
Args:
sample_rate: incoming sample rate of the FM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
Args:
sample_rate: incoming sample rate of the FM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain
| gpl-3.0 |
spron-in/py-bind-adm | dns/dns_health_check.py | 1 | 2742 | #
#
# dns health check
#
#
import os
import subprocess
import simplejson as json
import re
from dns.config import config
class CheckHealthActions:
def __init__(self, check):
self.check = check
self.param_dict = json.loads(check['parameter'])
return
def run(self):
func_name = 'check_' + self.param_dict['type']
getattr(self, func_name)()
return self.check
def check_ssh(self):
return
def check_rndc(self):
p = subprocess.Popen(["{timeout} 2s {rndc} -k {remote_key} -p {port} -s {server} status".format(
timeout=config['dns']['exec']['timeout'],
rndc=config['dns']['exec']['rndc'],
remote_key=config['dns']['files']['rndc_remote_key'],
port=config['dns']['system']['rndc_remote_port'],
server=self.param_dict['server'])],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
err = ''
for line in p.stderr.readlines():
err += str(line.strip().decode("utf-8"))
if err:
self.check['message'] = err
self.check['result'] = 1
return
def check_directory(self):
tmpfile = self.param_dict['dir'] + '/check_health_file.tmp'
try:
f = open(tmpfile, 'w')
except:
self.check['message'] = 'Failed to create file'
self.check['result'] = 1
return
f.close()
p = subprocess.call([config['dns']['exec']['rm'], tmpfile], stderr=subprocess.PIPE)
if p:
self.check['message'] = 'Failed to remove file'
self.check['result'] = 1
return
def check_transfer(self):
p = subprocess.Popen(['{dig} +noedns +time=2 AXFR {zone} @{server}'.format(
dig=config['dns']['exec']['dig'],
zone=self.param_dict['zone'],
server=self.param_dict['server'])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
for line in p.stdout.readlines():
line = str(line.strip().decode("utf-8"))
if re.search(r'connection refused', line, re.IGNORECASE):
self.check['message'] = 'connection refused'
self.check['result'] = 1
break
elif re.search(r'connection timed out', line, re.IGNORECASE):
self.check['message'] = 'connection timed out'
self.check['result'] = 1
break
elif re.search(r'Transfer failed', line, re.IGNORECASE):
self.check['message'] = 'Transfer failed'
self.check['result'] = 1
break
return
| mit |
brianrodri/oppia | core/domain/opportunity_services.py | 2 | 35551 | # coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on opportunity models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from constants import constants
from core.domain import exp_fetchers
from core.domain import opportunity_domain
from core.domain import question_fetchers
from core.domain import story_fetchers
from core.domain import topic_fetchers
from core.platform import models
import utils
(opportunity_models,) = models.Registry.import_models(
[models.NAMES.opportunity])
# NOTE TO DEVELOPERS: The functions:
# - delete_all_exploration_opportunity_summary_models()
# - delete_all_skill_opportunity_models()
# were removed in #13021 as part of the migration to Apache Beam. Please refer
# to that PR if you need to reinstate them.
def is_exploration_available_for_contribution(exp_id):
"""Checks whether a given exploration id belongs to a curated list of
exploration i.e, whether it's used as the chapter of any story.
Args:
exp_id: str. The id of the exploration which is needed to be checked.
Returns:
bool. Whether the given exp_id belongs to the curated explorations.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(
exp_id, strict=False)
return True if model is not None else False
def get_exploration_opportunity_summary_from_model(model):
"""Returns the ExplorationOpportunitySummary object out of the model.
Args:
model: ExplorationOpportunitySummaryModel. The exploration opportunity
summary model.
Returns:
ExplorationOpportunitySummary. The corresponding
ExplorationOpportunitySummary object.
"""
# We're making sure that the audio language codes in any exploration
# opportunity domain object match the ones in
# constants.SUPPORTED_AUDIO_LANGUAGES.
set_of_all_languages = set(
model.incomplete_translation_language_codes +
model.language_codes_needing_voice_artists +
model.language_codes_with_assigned_voice_artists)
supported_language_codes = set([language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)])
missing_language_codes = list(
supported_language_codes - set_of_all_languages)
if missing_language_codes:
logging.info(
'Missing language codes %s in exploration opportunity model with '
'id %s' % (missing_language_codes, model.id))
new_incomplete_translation_language_codes = (
model.incomplete_translation_language_codes + missing_language_codes)
return opportunity_domain.ExplorationOpportunitySummary(
model.id, model.topic_id, model.topic_name, model.story_id,
model.story_title, model.chapter_title, model.content_count,
new_incomplete_translation_language_codes, model.translation_counts,
model.language_codes_needing_voice_artists,
model.language_codes_with_assigned_voice_artists)
def _save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list):
"""Stores multiple ExplorationOpportunitySummary into datastore as a
ExplorationOpportunitySummaryModel.
Args:
exploration_opportunity_summary_list: list(
ExplorationOpportunitySummary). A list of exploration opportunity
summary object.
"""
exploration_opportunity_summary_model_list = []
for opportunity_summary in exploration_opportunity_summary_list:
model = opportunity_models.ExplorationOpportunitySummaryModel(
id=opportunity_summary.id,
topic_id=opportunity_summary.topic_id,
topic_name=opportunity_summary.topic_name,
story_id=opportunity_summary.story_id,
story_title=opportunity_summary.story_title,
chapter_title=opportunity_summary.chapter_title,
content_count=opportunity_summary.content_count,
incomplete_translation_language_codes=(
opportunity_summary.incomplete_translation_language_codes),
translation_counts=opportunity_summary.translation_counts,
language_codes_needing_voice_artists=(
opportunity_summary.language_codes_needing_voice_artists),
language_codes_with_assigned_voice_artists=(
opportunity_summary.language_codes_with_assigned_voice_artists)
)
exploration_opportunity_summary_model_list.append(model)
(
opportunity_models.ExplorationOpportunitySummaryModel
.update_timestamps_multi(exploration_opportunity_summary_model_list))
opportunity_models.ExplorationOpportunitySummaryModel.put_multi(
exploration_opportunity_summary_model_list)
def _create_exploration_opportunity_summary(topic, story, exploration):
"""Create an ExplorationOpportunitySummary object with the given topic,
story and exploration object.
Args:
topic: Topic. The topic object to which the opportunity belongs.
story: Story. The story object to which the opportunity belongs.
exploration: Exploration. The exploration object to which the
opportunity belongs.
Returns:
ExplorationOpportunitySummary. The exploration opportunity summary
object.
"""
audio_language_codes = set([
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES])
complete_translation_languages = set(
exploration.get_languages_with_complete_translation())
incomplete_translation_language_codes = (
audio_language_codes - complete_translation_languages)
language_codes_needing_voice_artists = complete_translation_languages
if exploration.language_code in incomplete_translation_language_codes:
# Removing exploration language from incomplete translation
# languages list as exploration does not need any translation in
# its own language.
incomplete_translation_language_codes.discard(
exploration.language_code)
# Adding exploration language to voiceover required languages
# list as exploration can be voiceovered in it's own language.
language_codes_needing_voice_artists.add(exploration.language_code)
content_count = exploration.get_content_count()
translation_counts = exploration.get_translation_counts()
story_node = story.story_contents.get_node_with_corresponding_exp_id(
exploration.id)
# TODO(#7376): Once the voiceover application functionality is
# implemented change this method such that it also populates the
# language_codes_with_assigned_voice_artists with the required data.
exploration_opportunity_summary = (
opportunity_domain.ExplorationOpportunitySummary(
exploration.id, topic.id, topic.name, story.id, story.title,
story_node.title, content_count,
list(incomplete_translation_language_codes), translation_counts,
list(language_codes_needing_voice_artists), []))
return exploration_opportunity_summary
def add_new_exploration_opportunities(story_id, exp_ids):
"""Adds new exploration opportunity into the model.
Args:
story_id: str. ID of the story.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(story.corresponding_topic_id)
_create_exploration_opportunities(story, topic, exp_ids)
def create_exploration_opportunities_for_story(story_id, topic_id):
"""Creates exploration opportunities corresponding to the supplied published
story ID iff the topic linked to the story is published.
Args:
story_id: str. The ID of the story domain object.
topic_id: str. The ID of the topic domain object corresponding to the
supplied story.
Raises:
Exception. A topic with the given ID doesn't exist.
Exception. The topic rights could not be found.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(topic_id)
topic_rights = topic_fetchers.get_topic_rights(topic.id)
if topic_rights.topic_is_published:
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
_create_exploration_opportunities(story, topic, exp_ids_in_story)
def create_exploration_opportunities_for_topic(topic_id):
"""Creates exploration opportunities corresponding to each of the supplied
published topic's published stories.
Args:
topic_id: str. The ID of the topic domain object.
"""
topic = topic_fetchers.get_topic_by_id(topic_id)
for story_reference in topic.get_all_story_references():
if not story_reference.story_is_published:
continue
story = story_fetchers.get_story_by_id(
story_reference.story_id, strict=False)
if story is not None:
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
_create_exploration_opportunities(story, topic, exp_ids_in_story)
def _create_exploration_opportunities(story, topic, exp_ids):
"""Creates new exploration opportunities corresponding to the supplied
story, topic, and exploration IDs.
Args:
story: Story. The story domain object corresponding to the exploration
opportunities.
topic: Topic. The topic domain object corresponding to the exploration
opportunities.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
explorations = exp_fetchers.get_multiple_explorations_by_id(exp_ids)
exploration_opportunity_summary_list = []
for exploration in explorations.values():
exploration_opportunity_summary_list.append(
_create_exploration_opportunity_summary(
topic, story, exploration))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_opportunity_with_updated_exploration(exp_id):
"""Updates the opportunities models with the changes made in the
exploration.
Args:
exp_id: str. The exploration id which is also the id of the opportunity
model.
"""
updated_exploration = exp_fetchers.get_exploration_by_id(exp_id)
content_count = updated_exploration.get_content_count()
translation_counts = updated_exploration.get_translation_counts()
complete_translation_language_list = (
updated_exploration.get_languages_with_complete_translation())
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.content_count = content_count
exploration_opportunity_summary.translation_counts = translation_counts
exploration_opportunity_summary.incomplete_translation_language_codes = (
utils.compute_list_difference(
exploration_opportunity_summary
.incomplete_translation_language_codes,
complete_translation_language_list
)
)
new_languages_for_voiceover = set(complete_translation_language_list) - set(
exploration_opportunity_summary.
language_codes_with_assigned_voice_artists)
# We only append new languages to language_codes_needing_voice_artists(
# instead of adding all of the complete_translation_language_list), as the
# complete translation languages list will be dynamic based on some
# content text are changed, where as the voiceover is a long term work and
# we can allow a voice_artist to work for an exploration which needs a
# little bit update in text translation.
language_codes_needing_voice_artists_set = set(
exploration_opportunity_summary.language_codes_needing_voice_artists)
language_codes_needing_voice_artists_set |= set(new_languages_for_voiceover)
exploration_opportunity_summary.language_codes_needing_voice_artists = list(
language_codes_needing_voice_artists_set)
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def update_exploration_opportunities_with_story_changes(story, exp_ids):
"""Updates the opportunities models with the story changes.
Args:
story: Story. The new story object.
exp_ids: list(str). A list of exploration IDs whose exploration
opportunity summary models need to be updated.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.story_title = story.title
node = story.story_contents.get_node_with_corresponding_exp_id(
exploration_opportunity_summary.id)
exploration_opportunity_summary.chapter_title = node.title
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_exploration_voiceover_opportunities(
exp_id, assigned_voice_artist_in_language_code):
"""Updates the language_codes_with_assigned_voice_artists of exploration
opportunity model.
Args:
exp_id: str. The ID of the exploration.
assigned_voice_artist_in_language_code: str. The language code in which
a voice artist is assigned to the exploration.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.language_codes_needing_voice_artists.remove(
assigned_voice_artist_in_language_code)
(
exploration_opportunity_summary
.language_codes_with_assigned_voice_artists.append(
assigned_voice_artist_in_language_code))
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def delete_exploration_opportunities(exp_ids):
"""Deletes the ExplorationOpportunitySummaryModel models corresponding to
the given exp_ids.
Args:
exp_ids: list(str). A list of exploration IDs whose opportunity summary
models are to be deleted.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exp_opportunity_models_to_be_deleted = [
model for model in exp_opportunity_models
if model is not None]
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models_to_be_deleted)
def delete_exploration_opportunities_corresponding_to_topic(topic_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given topic_id.
Args:
topic_id: str. The ID of the topic.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
def get_exploration_opportunity_ids_corresponding_to_topic(topic_id):
"""Returns the exploration IDs corresponding to the
ExplorationOpportunitySummaryModels that are associated with the supplied
topic ID.
Args:
topic_id: str. The ID of the topic.
Returns:
list(str). The exploration IDs.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
return [model.id for model in exp_opportunity_models if model is not None]
def update_exploration_opportunities(old_story, new_story):
"""Updates the opportunities models according to the changes made in the
story.
Args:
old_story: Story. The old story object which is now updated.
new_story: Story. The new story object.
"""
model_ids_need_update = set([])
exp_ids_in_old_story = old_story.story_contents.get_all_linked_exp_ids()
exp_ids_in_new_story = new_story.story_contents.get_all_linked_exp_ids()
new_added_exp_ids = set(exp_ids_in_new_story) - set(exp_ids_in_old_story)
deleted_exp_ids = set(exp_ids_in_old_story) - set(exp_ids_in_new_story)
unchanged_exp_ids = set(exp_ids_in_new_story) - new_added_exp_ids
if old_story.title != new_story.title:
model_ids_need_update |= set(unchanged_exp_ids)
else:
for exp_id in unchanged_exp_ids:
new_node = (
new_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
old_node = (
old_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
if old_node.title != new_node.title:
model_ids_need_update.add(exp_id)
update_exploration_opportunities_with_story_changes(
new_story, list(model_ids_need_update))
add_new_exploration_opportunities(new_story.id, new_added_exp_ids)
delete_exploration_opportunities(list(deleted_exp_ids))
def delete_exp_opportunities_corresponding_to_story(story_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given story_id.
Args:
story_id: str. The ID of the story.
"""
exp_opprtunity_model_class = (
opportunity_models.ExplorationOpportunitySummaryModel)
exp_opportunity_models = exp_opprtunity_model_class.get_all().filter(
exp_opprtunity_model_class.story_id == story_id
)
exp_opprtunity_model_class.delete_multi(exp_opportunity_models)
def get_translation_opportunities(language_code, cursor):
"""Returns a list of opportunities available for translation in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which translation opportunities
should be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next batch of
results. If there are no more results, this might be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models
.ExplorationOpportunitySummaryModel.get_all_translation_opportunities(
page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, cursor, more
def get_voiceover_opportunities(language_code, cursor):
"""Returns a list of opportunities available for voiceover in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which voiceover opportunities
to be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, cursor, more
def get_exploration_opportunity_summaries_by_ids(ids):
"""Returns a dict with key as id and value representing
ExplorationOpportunitySummary objects corresponding to the opportunity id.
Args:
ids: list(str). A list of opportunity ids.
Returns:
dict(str, ExplorationOpportunitySummary|None). A dict with key as the
opportunity id and values representing the ExplorationOpportunitySummary
domain objects corresponding to the opportunity id if exist else None.
"""
opportunities = {opportunity_id: None for opportunity_id in ids}
exp_opportunity_summary_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(ids))
for exp_opportunity_summary_model in exp_opportunity_summary_models:
if exp_opportunity_summary_model is not None:
opportunities[exp_opportunity_summary_model.id] = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
return opportunities
def update_opportunities_with_new_topic_name(topic_id, topic_name):
"""Updates the exploration opportunity summary models with new topic name.
Args:
topic_id: str. The corresponding topic id of the opportunity.
topic_name: str. The new topic name.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.topic_name = topic_name
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def get_skill_opportunity_from_model(model):
"""Returns a SkillOpportunity domain object from a SkillOpportunityModel.
Args:
model: SkillOpportunityModel. The skill opportunity model.
Returns:
SkillOpportunity. The corresponding SkillOpportunity object.
"""
return opportunity_domain.SkillOpportunity(
model.id, model.skill_description, model.question_count)
def get_skill_opportunities(cursor):
"""Returns a list of skill opportunities available for questions.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
skill_opportunity_models, cursor, more = (
opportunity_models.SkillOpportunityModel
.get_skill_opportunities(page_size, cursor))
opportunities = []
for skill_opportunity_model in skill_opportunity_models:
skill_opportunity = (
get_skill_opportunity_from_model(skill_opportunity_model))
opportunities.append(skill_opportunity)
return opportunities, cursor, more
def get_skill_opportunities_by_ids(ids):
"""Returns a list of SkillOpportunity domain objects corresponding to the
given list of ids.
Args:
ids: list(str). A list of the opportunity ids.
Returns:
dict(str, SkillOpportunity|None). A dict with key as the
opportunity id and values representing the SkillOpportunity
domain objects corresponding to the opportunity id if exist else None.
"""
opportunities = {opportunity_id: None for opportunity_id in ids}
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
opportunities[skill_opportunity_model.id] = (
get_skill_opportunity_from_model(skill_opportunity_model))
return opportunities
def create_skill_opportunity(skill_id, skill_description):
"""Creates a SkillOpportunityModel entity in the datastore.
Args:
skill_id: str. The skill_id of the opportunity.
skill_description: str. The skill_description of the opportunity.
Raises:
Exception. If a SkillOpportunityModel corresponding to the supplied
skill_id already exists.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
raise Exception(
'SkillOpportunity corresponding to skill ID %s already exists.' % (
skill_id))
questions, _ = (
question_fetchers.get_questions_and_skill_descriptions_by_skill_ids(
constants.MAX_QUESTIONS_PER_SKILL, [skill_id], 0))
skill_opportunity = opportunity_domain.SkillOpportunity(
skill_id=skill_id,
skill_description=skill_description,
question_count=len(questions)
)
_save_skill_opportunities([skill_opportunity])
def _save_skill_opportunities(skill_opportunities):
"""Saves SkillOpportunity domain objects into datastore as
SkillOpportunityModel objects.
Args:
skill_opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
"""
skill_opportunity_models = []
for skill_opportunity in skill_opportunities:
skill_opportunity.validate()
model = opportunity_models.SkillOpportunityModel(
id=skill_opportunity.id,
skill_description=skill_opportunity.skill_description,
question_count=skill_opportunity.question_count,
)
skill_opportunity_models.append(model)
opportunity_models.SkillOpportunityModel.update_timestamps_multi(
skill_opportunity_models)
opportunity_models.SkillOpportunityModel.put_multi(skill_opportunity_models)
def update_skill_opportunity_skill_description(skill_id, new_description):
"""Updates the skill_description of the SkillOpportunityModel with
new_description.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
new_description: str. The new skill_description.
"""
skill_opportunity = _get_skill_opportunity(skill_id)
if skill_opportunity is not None:
skill_opportunity.skill_description = new_description
_save_skill_opportunities([skill_opportunity])
def _get_skill_opportunity(skill_id):
"""Returns the SkillOpportunity domain object representing a
SkillOpportunityModel with the supplied skill_id in the datastore.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
Returns:
SkillOpportunity or None. The domain object representing a
SkillOpportunity with the supplied skill_id, or None if it does not
exist.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
return get_skill_opportunity_from_model(skill_opportunity_model)
return None
def delete_skill_opportunity(skill_id):
"""Deletes the SkillOpportunityModel corresponding to the supplied skill_id.
Args:
skill_id: str. The skill_id corresponding to the to-be-deleted
SkillOpportunityModel.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
opportunity_models.SkillOpportunityModel.delete(skill_opportunity_model)
def increment_question_counts(skill_ids, delta):
"""Increments question_count(s) of SkillOpportunityModel(s) with
corresponding skill_ids.
Args:
skill_ids: list(str). A list of skill_ids corresponding to
SkillOpportunityModel(s).
delta: int. The delta for which to increment each question_count.
"""
updated_skill_opportunities = (
_get_skill_opportunities_with_updated_question_counts(skill_ids, delta))
_save_skill_opportunities(updated_skill_opportunities)
def update_skill_opportunities_on_question_linked_skills_change(
old_skill_ids, new_skill_ids):
"""Updates question_count(s) of SkillOpportunityModel(s) corresponding to
the change in linked skill IDs for a question from old_skill_ids to
new_skill_ids, e.g. if skill_id1 is in old_skill_ids, but not in
new_skill_ids, the question_count of the SkillOpportunityModel for skill_id1
would be decremented.
NOTE: Since this method is updating the question_counts based on the change
of skill_ids from old_skill_ids to new_skill_ids, the input skill_id lists
must be related.
Args:
old_skill_ids: list(str). A list of old skill_id(s).
new_skill_ids: list(str). A list of new skill_id(s).
"""
old_skill_ids_set = set(old_skill_ids)
new_skill_ids_set = set(new_skill_ids)
new_skill_ids_added_to_question = new_skill_ids_set - old_skill_ids_set
skill_ids_removed_from_question = old_skill_ids_set - new_skill_ids_set
updated_skill_opportunities = []
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
new_skill_ids_added_to_question, 1))
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
skill_ids_removed_from_question, -1))
_save_skill_opportunities(updated_skill_opportunities)
def _get_skill_opportunities_with_updated_question_counts(skill_ids, delta):
"""Returns a list of SkillOpportunities with corresponding skill_ids
with question_count(s) updated by delta.
Args:
skill_ids: iterable(str). The IDs of the matching SkillOpportunityModels
in the datastore.
delta: int. The delta by which to update each question_count (can be
negative).
Returns:
list(SkillOpportunity). The updated SkillOpportunities.
"""
updated_skill_opportunities = []
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(skill_ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
skill_opportunity = get_skill_opportunity_from_model(
skill_opportunity_model)
skill_opportunity.question_count += delta
updated_skill_opportunities.append(skill_opportunity)
return updated_skill_opportunities
def regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=False):
"""Regenerates opportunity models which belongs to a given topic.
Args:
topic_id: str. The ID of the topic.
delete_existing_opportunities: bool. Whether to delete all the existing
opportunities related to the given topic.
Returns:
int. The number of opportunity models created.
"""
if delete_existing_opportunities:
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
topic = topic_fetchers.get_topic_by_id(topic_id)
story_ids = topic.get_canonical_story_ids()
stories = story_fetchers.get_stories_by_ids(story_ids)
exp_ids = []
non_existing_story_ids = []
for index, story in enumerate(stories):
if story is None:
non_existing_story_ids.append(story_ids[index])
else:
exp_ids += story.story_contents.get_all_linked_exp_ids()
exp_ids_to_exp = exp_fetchers.get_multiple_explorations_by_id(
exp_ids, strict=False)
non_existing_exp_ids = set(exp_ids) - set(exp_ids_to_exp.keys())
if len(non_existing_exp_ids) > 0 or len(non_existing_story_ids) > 0:
raise Exception(
'Failed to regenerate opportunities for topic id: %s, '
'missing_exp_with_ids: %s, missing_story_with_ids: %s' % (
topic_id, list(non_existing_exp_ids), non_existing_story_ids))
exploration_opportunity_summary_list = []
for story in stories:
for exp_id in story.story_contents.get_all_linked_exp_ids():
exploration_opportunity_summary_list.append(
_create_exploration_opportunity_summary(
topic, story, exp_ids_to_exp[exp_id]))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
return len(exploration_opportunity_summary_list)
| apache-2.0 |
jeanmask/opps | opps/contrib/feeds/views.py | 4 | 5705 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.contrib.sites.models import get_current_site
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils import feedgenerator
from opps.containers.models import Container
from opps.channels.models import Channel
OPPS_FEED_FILTER_DEFAULT = getattr(settings, 'OPPS_FEED_FILTER_DEFAULT', {})
OPPS_FEED_EXCLUDE_DEFAULT = getattr(settings, 'OPPS_FEED_EXCLUDE_DEFAULT', {})
class ItemFeed(Feed):
feed_type = feedgenerator.Rss201rev2Feed
description_template = 'articles/feed_item_description.html'
item_enclosure_length = 1
item_enclosure_mime_type = "image/jpeg"
def item_categories(self, obj):
cats = []
if obj.channel:
cats.append(obj.channel.name)
if getattr(obj, 'tags', None) is not None:
cats.extend(obj.get_tags() or [])
return cats
def item_title(self, item):
return item.title
def item_pubdate(self, item):
return item.date_available
def item_updateddate(self, item):
return item.date_update
def item_link(self, item):
return item.get_absolute_url()
def item_enclosure_url(self, item):
if item.main_image:
if item.main_image.archive:
i_url = item.main_image.archive.url
elif item.main_image.archive_link:
i_url = item.main_image.archive_link
else:
i_url = item.main_image.image_url()
m_url = getattr(settings, 'MEDIA_URL', '')
if not m_url.startswith('http') and not i_url.startswith('http'):
i_url = "http://" + self.site.domain + i_url
return i_url
def build_filters(self):
if not hasattr(self, 'request'):
return {}
default = {
"filter": OPPS_FEED_FILTER_DEFAULT,
"exclude": OPPS_FEED_EXCLUDE_DEFAULT, }
data = {"filter": {}, "exclude": {}, }
r_data = self.request.GET.dict()
for k, v in r_data.items():
if k.startswith(('filter', 'exclude')):
v = json.loads(v)
for lookup, value in v.items():
if lookup.endswith('__in'):
v[lookup] = value.split(',')
data[k].update(v)
# merges defaults with request.GET data.
for k, v in data.items():
data[k] = dict(default[k].items() + v.items())
return data
class ContainerFeed(ItemFeed):
link = "/rss"
def __init__(self, child_class=False):
self.child_class = child_class
def __call__(self, request, *args, **kwargs):
self.site = get_current_site(request)
self.request = request
return super(ContainerFeed, self).__call__(request, *args, **kwargs)
def title(self):
return _("{0}'s news".format(self.site.name))
def description(self):
return _("Latest news on {0}'s".format(self.site.name))
def items(self):
container = Container.objects.filter(
site=self.site,
date_available__lte=timezone.now(),
published=True,
channel__include_in_main_rss=True,
channel__published=True
)
if self.child_class:
container = container.filter(child_class=self.child_class)
container = container.exclude(child_class__in=['Mirror', 'Entry'])
return container.order_by('-date_available')[:40]
class ChannelFeed(ItemFeed):
"""
Items can be filtered using "filter" and "exclude" querystring args.
examples:
- get only entries with images
rss?filter={"main_image__isnull": false}
- exclude specific child_class
rss?exclude={"child_class__in": "Album,Poll"}
The format is json
"""
def get_object(self, request, long_slug):
self.site = get_current_site(request)
self.request = request
channel = get_object_or_404(Channel,
site=self.site,
long_slug=long_slug)
self.channel_descendants = channel.get_descendants(include_self=True)
return channel
def link(self, obj):
return _("{0}RSS".format(obj.get_absolute_url()))
def title(self, obj):
return _(u"{0}'s news on channel {1}".format(self.site.name,
obj.name))
def description(self, obj):
return _(u"Latest news on {0}'s channel {1}".format(self.site.name,
obj.name))
def items(self, obj):
filters = self.build_filters().get('filter', {})
excludes = self.build_filters().get('exclude', {})
channel_long_slugs = [
children.long_slug for children in
self.channel_descendants
]
qs = Container.objects.filter(
site=self.site,
channel_long_slug__in=channel_long_slugs,
date_available__lte=timezone.now(),
published=True,
**filters
).exclude(
child_class__in=['Mirror', 'Entry'],
).exclude(
**excludes
)
qs = qs.order_by(
'-date_available'
).select_related('publisher')[:40]
return qs
class ContainerAtomFeed(ContainerFeed):
link = "/atom"
feed_type = feedgenerator.Atom1Feed
class ChannelAtomFeed(ChannelFeed):
feed_type = feedgenerator.Atom1Feed
| mit |
pbrady/sympy | sympy/utilities/decorator.py | 3 | 5889 | """Useful utility decorators. """
from __future__ import print_function, division
import sys
import types
import inspect
from sympy.core.decorators import wraps
from sympy.core.compatibility import class_types, get_function_globals, get_function_name, iterable
def threaded_factory(func, use_add):
"""A factory for ``threaded`` decorators. """
from sympy.core import sympify
from sympy.matrices import Matrix
@wraps(func)
def threaded_func(expr, *args, **kwargs):
if isinstance(expr, Matrix):
return expr.applyfunc(lambda f: func(f, *args, **kwargs))
elif iterable(expr):
try:
return expr.__class__([func(f, *args, **kwargs) for f in expr])
except TypeError:
return expr
else:
expr = sympify(expr)
if use_add and expr.is_Add:
return expr.__class__(*[ func(f, *args, **kwargs) for f in expr.args ])
elif expr.is_Relational:
return expr.__class__(func(expr.lhs, *args, **kwargs),
func(expr.rhs, *args, **kwargs))
else:
return func(expr, *args, **kwargs)
return threaded_func
def threaded(func):
"""Apply ``func`` to sub--elements of an object, including :class:`Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator allows threading over
elements of :class:`Add` class. If this behavior is not desirable
use :func:`xthreaded` decorator.
Functions using this decorator must have the following signature::
@threaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, True)
def xthreaded(func):
"""Apply ``func`` to sub--elements of an object, excluding :class:`Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator disallows threading over
elements of :class:`Add` class. If this behavior is not desirable
use :func:`threaded` decorator.
Functions using this decorator must have the following signature::
@xthreaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, False)
def conserve_mpmath_dps(func):
"""After the function finishes, resets the value of mpmath.mp.dps to
the value it had before the function was run."""
import functools
import mpmath
def func_wrapper():
dps = mpmath.mp.dps
try:
func()
finally:
mpmath.mp.dps = dps
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
class no_attrs_in_subclass(object):
"""Don't 'inherit' certain attributes from a base class
>>> from sympy.utilities.decorator import no_attrs_in_subclass
>>> class A(object):
... x = 'test'
>>> A.x = no_attrs_in_subclass(A, A.x)
>>> class B(A):
... pass
>>> hasattr(A, 'x')
True
>>> hasattr(B, 'x')
False
"""
def __init__(self, cls, f):
self.cls = cls
self.f = f
def __get__(self, instance, owner=None):
if owner == self.cls:
if hasattr(self.f, '__get__'):
return self.f.__get__(instance, owner)
return self.f
raise AttributeError
def doctest_depends_on(exe=None, modules=None, disable_viewers=None):
"""Adds metadata about the depenencies which need to be met for doctesting
the docstrings of the decorated objects."""
pyglet = False
if modules is not None and 'pyglet' in modules:
pyglet = True
def depends_on_deco(fn):
fn._doctest_depends_on = dict(exe=exe, modules=modules,
disable_viewers=disable_viewers,
pyglet=pyglet)
# once we drop py2.5 support and use class decorators this evaluates
# to True
if inspect.isclass(fn):
fn._doctest_depdends_on = no_attrs_in_subclass(fn, fn._doctest_depends_on)
return fn
return depends_on_deco
def public(obj):
"""
Append ``obj``'s name to global ``__all__`` variable (call site).
By using this decorator on functions or classes you achieve the same goal
as by filling ``__all__`` variables manually, you just don't have to repeat
yourself (object's name). You also know if object is public at definition
site, not at some random location (where ``__all__`` was set).
Note that in multiple decorator setup (in almost all cases) ``@public``
decorator must be applied before any other decorators, because it relies
on the pointer to object's global namespace. If you apply other decorators
first, ``@public`` may end up modifying the wrong namespace.
Examples
========
>>> from sympy.utilities.decorator import public
>>> __all__
Traceback (most recent call last):
...
NameError: name '__all__' is not defined
>>> @public
... def some_function():
... pass
>>> __all__
['some_function']
"""
if isinstance(obj, types.FunctionType):
ns = get_function_globals(obj)
name = get_function_name(obj)
elif isinstance(obj, (type(type), class_types)):
ns = sys.modules[obj.__module__].__dict__
name = obj.__name__
else:
raise TypeError("expected a function or a class, got %s" % obj)
if "__all__" not in ns:
ns["__all__"] = [name]
else:
ns["__all__"].append(name)
return obj
| bsd-3-clause |
jzuhone/spectral-cube | spectral_cube/ytcube.py | 5 | 11170 | from __future__ import print_function, absolute_import, division
import six
import os
import subprocess
import numpy as np
import time
from astropy.utils.console import ProgressBar
from astropy import log
import warnings
__all__ = ['ytCube']
class ytCube(object):
""" Light wrapper of a yt object with ability to translate yt<->wcs
coordinates """
def __init__(self, cube, dataset, spectral_factor=1.0):
self.cube = cube
self.wcs = cube.wcs
self.dataset = dataset
self.spectral_factor = spectral_factor
def world2yt(self, world_coord, first_index=0):
"""
Convert a position in world coordinates to the coordinates used by a
yt dataset that has been generated using the ``to_yt`` method.
Parameters
----------
world_coord: `astropy.wcs.WCS.wcs_world2pix`-valid input
The world coordinates
first_index: 0 or 1
The first index of the data. In python and yt, this should be
zero, but for the FITS coordinates, use 1
"""
yt_coord = self.wcs.wcs_world2pix([world_coord], first_index)[0]
yt_coord[2] = (yt_coord[2] - 0.5)*self.spectral_factor+0.5
return yt_coord
def yt2world(self, yt_coord, first_index=0):
"""
Convert a position in yt's coordinates to world coordinates from a
yt dataset that has been generated using the ``to_yt`` method.
Parameters
----------
world_coord: `astropy.wcs.WCS.wcs_pix2world`-valid input
The yt pixel coordinates to convert back to world coordinates
first_index: 0 or 1
The first index of the data. In python and yt, this should be
zero, but for the FITS coordinates, use 1
"""
yt_coord = np.array(yt_coord) # stripping off units
yt_coord[2] = (yt_coord[2] - 0.5)/self.spectral_factor+0.5
world_coord = self.wcs.wcs_pix2world([yt_coord], first_index)[0]
return world_coord
def quick_render_movie(self, outdir, size=256, nframes=30,
camera_angle=(0,0,1), north_vector=(0,0,1),
rot_vector=(1,0,0),
colormap='doom',
cmap_range='auto',
transfer_function='auto',
start_index=0,
image_prefix="",
output_filename='out.mp4',
log_scale=False,
rescale=True):
"""
Create a movie rotating the cube 360 degrees from
PP -> PV -> PP -> PV -> PP
Parameters
----------
outdir: str
The output directory in which the individual image frames and the
resulting output mp4 file should be stored
size: int
The size of the individual output frame in pixels (i.e., size=256
will result in a 256x256 image)
nframes: int
The number of frames in the resulting movie
camera_angle: 3-tuple
The initial angle of the camera
north_vector: 3-tuple
The vector of 'north' in the data cube. Default is coincident with
the spectral axis
rot_vector: 3-tuple
The vector around which the camera will be rotated
colormap: str
A valid colormap. See `yt.show_colormaps`
transfer_function: 'auto' or `yt.visualization.volume_rendering.TransferFunction`
Either 'auto' to use the colormap specified, or a valid
TransferFunction instance
log_scale: bool
Should the colormap be log scaled?
rescale: bool
If True, the images will be rescaled to have a common 95th
percentile brightness, which can help reduce flickering from having
a single bright pixel in some projections
start_index : int
The number of the first image to save
image_prefix : str
A string to prepend to the image name for each image that is output
output_filename : str
The movie file name to output. The suffix may affect the file type
created. Defaults to 'out.mp4'. Will be placed in ``outdir``
Returns
-------
"""
try:
import yt
except ImportError:
raise ImportError("yt could not be imported. Cube renderings are not possible.")
scale = np.max(self.cube.shape)
if not os.path.exists(outdir):
os.makedirs(outdir)
elif not os.path.isdir(outdir):
raise OSError("Output directory {0} exists and is not a directory.".format(outdir))
if cmap_range == 'auto':
upper = self.cube.max().value
lower = self.cube.std().value * 3
cmap_range = [lower,upper]
if transfer_function == 'auto':
tfh = self.auto_transfer_function(cmap_range, log=log_scale)
tfh.tf.map_to_colormap(cmap_range[0], cmap_range[1], colormap=colormap)
tf = tfh.tf
else:
tf = transfer_function
center = self.dataset.domain_center
cam = self.dataset.h.camera(center, camera_angle, scale, size, tf,
north_vector=north_vector, fields='flux')
im = cam.snapshot()
images = [im]
pb = ProgressBar(nframes)
for ii,im in enumerate(cam.rotation(2 * np.pi, nframes,
rot_vector=rot_vector)):
images.append(im)
im.write_png(os.path.join(outdir,"%s%04i.png" % (image_prefix,
ii+start_index)),
rescale=False)
pb.update(ii+1)
log.info("Rendering complete in {0}s".format(time.time() - pb._start_time))
if rescale:
_rescale_images(images, os.path.join(outdir, image_prefix))
pipe = _make_movie(outdir, prefix=image_prefix,
filename=output_filename)
return images
def auto_transfer_function(self, cmap_range, log=False, colormap='doom',
**kwargs):
from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper
tfh = TransferFunctionHelper(self.dataset)
tfh.set_field('flux')
tfh.set_bounds(bounds=cmap_range)
tfh.set_log(log)
tfh.build_transfer_function()
return tfh
def quick_isocontour(self, level='3 sigma', title='', description='',
color_map='hot', color_log=False,
export_to='sketchfab', filename=None,
**kwargs):
"""
Export isocontours to sketchfab
Requires that you have an account on https://sketchfab.com and are
logged in
Parameters
----------
level: str or float
The level of the isocontours to create. Can be specified as
n-sigma with strings like '3.3 sigma' or '2 sigma' (there must be a
space between the number and the word)
title: str
A title for the uploaded figure
description: str
A short description for the uploaded figure
color_map: str
Any valid colormap. See `yt.show_colormaps`
color_log: bool
Whether the colormap should be log scaled. With the default
parameters, this has no effect.
export_to: 'sketchfab', 'obj', 'ply'
You can export to sketchfab, to a .obj file (and accompanying .mtl
file), or a .ply file. The latter two require ``filename``
specification
filename: None or str
Optional - prefix for output filenames if ``export_to`` is 'obj',
or the full filename when ``export_to`` is 'ply'. Ignored for
'sketchfab'
kwargs: dict
Keyword arguments are passed to the appropriate yt function
Returns
-------
The result of the `yt.surface.export_sketchfab` function
"""
if isinstance(level, six.string_types):
sigma = self.cube.std().value
level = float(level.split()[0]) * sigma
self.dataset.periodicity = (True,True,True)
surface = self.dataset.surface(self.dataset.all_data(),
"flux",
level)
if export_to == 'sketchfab':
if filename is not None:
warnings.warn("sketchfab export does not expect a filename entry")
return surface.export_sketchfab(title=title,
description=description,
color_map=color_map,
color_log=color_log, **kwargs)
elif export_to == 'obj':
if filename is None:
raise ValueError("If export_to is not 'sketchfab',"
" a filename must be specified")
surface.export_obj(filename, color_field='ones',
color_map=color_map, color_log=color_log,
**kwargs)
elif export_to == 'ply':
if filename is None:
raise ValueError("If export_to is not 'sketchfab',"
" a filename must be specified")
surface.export_ply(filename, color_field='ones',
color_map=color_map, color_log=color_log,
**kwargs)
else:
raise ValueError("export_to must be one of sketchfab,obj,ply")
def _rescale_images(images, prefix):
"""
Save a sequence of images, at a common scaling
Reduces flickering
"""
cmax = max(np.percentile(i[:, :, :3].sum(axis=2), 99.5) for i in images)
amax = max(np.percentile(i[:, :, 3], 95) for i in images)
for i, image in enumerate(images):
image = image.rescale(cmax=cmax, amax=amax).swapaxes(0,1)
image.write_png("%s%04i.png" % (prefix, i), rescale=False)
def _make_movie(moviepath, prefix="", filename='out.mp4', overwrite=True):
"""
Use ffmpeg to generate a movie from the image series
"""
outpath = os.path.join(moviepath, filename)
if os.path.exists(outpath) and overwrite:
command = ['ffmpeg', '-y', '-r','5','-i',
os.path.join(moviepath,prefix+'%04d.png'),
'-r','30','-pix_fmt', 'yuv420p',
outpath]
elif os.path.exists(outpath):
log.info("File {0} exists - skipping".format(outpath))
else:
command = ['ffmpeg', '-r', '5', '-i',
os.path.join(moviepath,prefix+'%04d.png'),
'-r','30','-pix_fmt', 'yuv420p',
outpath]
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, close_fds=True)
pipe.wait()
return pipe
| bsd-3-clause |
ghchinoy/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py | 16 | 38554 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for combined DNN + GBDT training model.
The combined model trains a DNN first, then trains boosted trees to boost the
logits of the DNN. The input layer of the DNN (including the embeddings learned
over sparse features) can optionally be provided to the boosted trees as
an additional input feature.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.estimator_batch import distillation_loss
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_DNN_LEARNING_RATE = 0.001
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_tree_combined_model_fn(
features,
labels,
mode,
head,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
output_type=model.ModelBuilderOutputType.MODEL_FN_OPS,
override_global_step_value=None):
"""DNN and GBDT combined model_fn.
Args:
features: `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
head: A `Head` instance.
dnn_hidden_units: List of hidden units per layer.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate of 0.001.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
if not isinstance(features, dict):
raise ValueError("features should be a dictionary of `Tensor`s. "
"Given type: {}".format(type(features)))
if not dnn_feature_columns:
raise ValueError("dnn_feature_columns must be specified")
if dnn_to_tree_distillation_param:
if not predict_with_tree_only:
logging.warning("update predict_with_tree_only to True since distillation"
"is specified.")
predict_with_tree_only = True
# Build DNN Logits.
dnn_parent_scope = "dnn"
dnn_partitioner = dnn_input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))
if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC and
not use_core_versions):
raise ValueError("You must use core versions with Estimator Spec")
global_step = training_util.get_global_step()
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner) as input_layer_scope:
if use_core_versions:
input_layer = feature_column_lib.input_layer(
features=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope])
else:
input_layer = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope],
scope=input_layer_scope)
def dnn_logits_fn():
"""Builds the logits from the input layer."""
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
return dnn_logits
if predict_with_tree_only and mode == model_fn.ModeKeys.INFER:
dnn_logits = array_ops.constant(0.0)
dnn_train_op_fn = control_flow_ops.no_op
elif predict_with_tree_only and mode == model_fn.ModeKeys.EVAL:
dnn_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: array_ops.constant(0.0),
dnn_logits_fn)
dnn_train_op_fn = control_flow_ops.no_op
else:
dnn_logits = dnn_logits_fn()
def dnn_train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=training_util.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
name=dnn_parent_scope,
variables=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
# Build Tree Logits.
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
tree_features = features.copy()
if dnn_input_layer_to_tree:
tree_features["dnn_input_layer"] = input_layer
tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=tree_center_bias,
examples_per_layer=tree_examples_per_layer,
learner_config=tree_learner_config,
feature_columns=tree_feature_columns,
logits_dimension=head.logits_dimension,
features=tree_features,
use_core_columns=use_core_versions)
with ops.name_scope("gbdt"):
predictions_dict = gbdt_model.predict(mode)
tree_logits = predictions_dict["predictions"]
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
if dnn_to_tree_distillation_param:
loss_weight, loss_fn = dnn_to_tree_distillation_param
# pylint: disable=protected-access
if use_core_versions:
weight_tensor = head_lib._weight_tensor(features, head._weight_column)
else:
weight_tensor = head_lib._weight_tensor(
features, head.weight_column_name)
# pylint: enable=protected-access
dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
if loss_fn is None:
# we create the loss_fn similar to the head loss_fn for
# multi_class_head used previously as the default one.
n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
n_classes)
dnn_to_tree_distillation_loss = loss_weight * loss_fn(
dnn_logits_fixed, tree_logits, weight_tensor)
summary.scalar("dnn_to_tree_distillation_loss",
dnn_to_tree_distillation_loss)
loss += dnn_to_tree_distillation_loss
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
if predict_with_tree_only:
if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
tree_train_logits = tree_logits
else:
tree_train_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: tree_logits,
lambda: dnn_logits)
else:
tree_train_logits = dnn_logits + tree_logits
def _no_train_op_fn(loss):
"""Returns a no-op."""
del loss
return control_flow_ops.no_op()
if tree_center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
if output_type == model.ModelBuilderOutputType.MODEL_FN_OPS:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
if mode != model_fn.ModeKeys.TRAIN:
return model_fn_ops
dnn_train_op = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=dnn_train_op_fn,
logits=dnn_logits).train_op
tree_train_op = head.create_model_fn_ops(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits).train_op
# Add the hooks
model_fn_ops.training_hooks.extend([
trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
tree_train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
])
return model_fn_ops
elif output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC:
fusion_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
if mode != model_fn.ModeKeys.TRAIN:
return fusion_spec
dnn_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=dnn_train_op_fn,
logits=dnn_logits)
tree_spec = head.create_estimator_spec(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits)
training_hooks = [
trainer_hooks.SwitchTrainOp(dnn_spec.train_op, dnn_steps_to_train,
tree_spec.train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
]
fusion_spec = fusion_spec._replace(training_hooks=training_hooks +
list(fusion_spec.training_hooks))
return fusion_spec
class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
"""A classifier that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
n_classes=2,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_keys=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedClassifier instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
n_classes: The number of label classes.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.multi_class_head(
n_classes=n_classes,
label_name=label_name,
label_keys=label_keys,
weight_column_name=weight_column_name,
enable_centered_bias=False)
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
"""A regressor that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_dimension=1,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedRegressor instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.regression_head(
label_name=label_name,
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=False)
# num_classes needed for GradientBoostedDecisionTreeModel
if label_dimension == 1:
tree_learner_config.num_classes = 2
else:
tree_learner_config.num_classes = label_dimension
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
"""An estimator that uses a combined DNN/GBDT model.
Useful for training with user specified `Head`.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedEstimator instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class CoreDNNBoostedTreeCombinedEstimator(core_estimator.Estimator):
"""Initializes a core version of DNNBoostedTreeCombinedEstimator.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None):
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC,
use_core_versions=True,
override_global_step_value=None)
super(CoreDNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| apache-2.0 |
brightchen/Impala | tests/beeswax/impala_beeswax.py | 14 | 17540 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Talk to an impalad through beeswax.
# Usage:
# * impalad is a string with the host and port of the impalad
# with which the connection should be established.
# The format is "<hostname>:<port>"
# * query_string is the query to be executed, as a string.
# client = ImpalaBeeswaxClient(impalad)
# client.connect()
# result = client.execute(query_string)
# where result is an object of the class ImpalaBeeswaxResult.
import time
import sys
import shlex
import traceback
import getpass
import re
from beeswaxd import BeeswaxService
from beeswaxd.BeeswaxService import QueryState
from datetime import datetime
try:
# If Exec Summary is not implemented in Impala, this cannot be imported
from ExecStats.ttypes import TExecStats
except ImportError:
pass
from ImpalaService import ImpalaService
from ImpalaService.ImpalaService import TImpalaQueryOptions, TResetTableReq
from tests.util.thrift_util import create_transport
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport, TTransportException
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TApplicationException
# Custom exception wrapper.
# All exceptions coming from thrift/beeswax etc. go through this wrapper.
# __str__ preserves the exception type.
# TODO: Add the ability to print some of the stack.
class ImpalaBeeswaxException(Exception):
__name__ = "ImpalaBeeswaxException"
def __init__(self, message, inner_exception):
self.__message = message
self.inner_exception = inner_exception
def __str__(self):
return "%s:\n %s" % (self.__name__, self.__message)
class ImpalaBeeswaxResult(object):
def __init__(self, **kwargs):
self.query = kwargs.get('query', None)
self.success = kwargs.get('success', False)
# Insert returns an int, convert into list to have a uniform data type.
# TODO: We should revisit this if we have more datatypes to deal with.
self.data = kwargs.get('data', None)
if not isinstance(self.data, list):
self.data = str(self.data)
self.data = [self.data]
self.log = None
self.time_taken = kwargs.get('time_taken', 0)
self.summary = kwargs.get('summary', str())
self.schema = kwargs.get('schema', None)
self.runtime_profile = kwargs.get('runtime_profile', str())
self.exec_summary = kwargs.get('exec_summary', None)
def get_data(self):
return self.__format_data()
def __format_data(self):
if self.data:
return '\n'.join(self.data)
return ''
def __str__(self):
message = ('Summary: %s\n'
'Success: %s\n'
'Took: %s(s)\n'
'Data:\n%s\n'
% (self.summary, self.success, self.time_taken,
self.__format_data())
)
return message
# Interface to beeswax. Responsible for executing queries, fetching results.
class ImpalaBeeswaxClient(object):
# Regex applied to all tokens of a query to detect the query type.
INSERT_REGEX = re.compile("^insert$", re.I)
def __init__(self, impalad, use_kerberos=False, user=None, password=None,
use_ssl=False):
self.connected = False
self.impalad = impalad
self.imp_service = None
self.transport = None
self.use_kerberos = use_kerberos
self.use_ssl = use_ssl
self.user, self.password = user, password
self.use_ldap = (self.user is not None)
self.__query_options = {}
self.query_states = QueryState._NAMES_TO_VALUES
def __options_to_string_list(self):
return ["%s=%s" % (k,v) for (k,v) in self.__query_options.iteritems()]
def get_query_options(self):
return self.__query_options
def set_query_option(self, name, value):
self.__query_options[name.upper()] = value
def set_query_options(self, query_option_dict):
if query_option_dict is None:
raise ValueError, 'Cannot pass None value for query options'
self.clear_query_options()
for name, value in query_option_dict.iteritems():
self.set_query_option(name, value)
def get_query_option(self, name):
return self.__query_options.get(name.upper())
def clear_query_options(self):
self.__query_options.clear()
def connect(self):
"""Connect to impalad specified in intializing this object
Raises an exception if the connection is unsuccesful.
"""
try:
self.impalad = self.impalad.split(':')
self.transport = self.__get_transport()
self.transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.imp_service = ImpalaService.Client(protocol)
self.connected = True
except Exception, e:
raise ImpalaBeeswaxException(self.__build_error_message(e), e)
def close_connection(self):
"""Close the transport if it's still open"""
if self.transport:
self.transport.close()
def __get_transport(self):
"""Creates the proper transport type based environment (secure vs unsecure)"""
trans_type = 'buffered'
if self.use_kerberos:
trans_type = 'kerberos'
elif self.use_ldap:
trans_type = 'plain_sasl'
return create_transport(host=self.impalad[0], port=int(self.impalad[1]),
service='impala', transport_type=trans_type, user=self.user,
password=self.password, use_ssl=self.use_ssl)
def execute(self, query_string, user=None):
"""Re-directs the query to its appropriate handler, returns ImpalaBeeswaxResult"""
# Take care of leading/trailing whitespaces.
query_string = query_string.strip()
start = time.time()
start_time = datetime.now()
handle = self.__execute_query(query_string.strip(), user=user)
if self.__get_query_type(query_string) == 'insert':
# DML queries are finished by this point.
time_taken = time.time() - start
# fetch_results() will close the query after which there is no guarantee that
# profile and log will be available so fetch them first.
runtime_profile = self.get_runtime_profile(handle)
exec_summary = self.get_exec_summary(handle)
log = self.get_log(handle.log_context)
result = self.fetch_results(query_string, handle)
result.time_taken, result.start_time, result.runtime_profile, result.log = \
time_taken, start_time, runtime_profile, log
result.exec_summary = exec_summary
else:
# For SELECT queries, execution might still be ongoing. fetch_results() will block
# until the query is completed.
result = self.fetch_results(query_string, handle)
result.time_taken = time.time() - start
result.start_time = start_time
result.exec_summary = self.get_exec_summary(handle)
result.log = self.get_log(handle.log_context)
result.runtime_profile = self.get_runtime_profile(handle)
self.close_query(handle)
return result
def get_exec_summary(self, handle):
"""Calls GetExecSummary() for the last query handle"""
try:
summary = self.__do_rpc(lambda: self.imp_service.GetExecSummary(handle))
except ImpalaBeeswaxException:
summary = None
if summary is None or summary.nodes is None:
return None
# If exec summary is not implemented in Impala, this function returns, so we do not
# get the function __build_summary_table which requires TExecStats to be imported.
output = []
self.__build_summary_table(summary, 0, False, 0, False, output)
return output
def __build_summary_table(self, summary, idx, is_fragment_root, indent_level,
new_indent_level, output):
"""NOTE: This was taken impala_shell.py. This method will be a placed in a library
that is shared between impala_shell and this file.
Direct translation of Coordinator::PrintExecSummary() to recursively build a list
of rows of summary statistics, one per exec node
summary: the TExecSummary object that contains all the summary data
idx: the index of the node to print
is_fragment_root: true if the node to print is the root of a fragment (and therefore
feeds into an exchange)
indent_level: the number of spaces to print before writing the node's label, to give
the appearance of a tree. The 0th child of a node has the same indent_level as its
parent. All other children have an indent_level of one greater than their parent.
new_indent_level: If true, this indent level is different from the previous row's.
output: the list of rows into which to append the rows produced for this node and its
children.
Returns the index of the next exec node in summary.exec_nodes that should be
processed, used internally to this method only.
"""
attrs = ["latency_ns", "cpu_time_ns", "cardinality", "memory_used"]
# Initialise aggregate and maximum stats
agg_stats, max_stats = TExecStats(), TExecStats()
for attr in attrs:
setattr(agg_stats, attr, 0)
setattr(max_stats, attr, 0)
node = summary.nodes[idx]
for stats in node.exec_stats:
for attr in attrs:
val = getattr(stats, attr)
if val is not None:
setattr(agg_stats, attr, getattr(agg_stats, attr) + val)
setattr(max_stats, attr, max(getattr(max_stats, attr), val))
if len(node.exec_stats) > 0:
avg_time = agg_stats.latency_ns / len(node.exec_stats)
else:
avg_time = 0
# If the node is a broadcast-receiving exchange node, the cardinality of rows produced
# is the max over all instances (which should all have received the same number of
# rows). Otherwise, the cardinality is the sum over all instances which process
# disjoint partitions.
if node.is_broadcast and is_fragment_root:
cardinality = max_stats.cardinality
else:
cardinality = agg_stats.cardinality
est_stats = node.estimated_stats
label_prefix = ""
if indent_level > 0:
label_prefix = "|"
label_prefix += " |" * (indent_level - 1)
if new_indent_level:
label_prefix += "--"
else:
label_prefix += " "
row = {}
row["prefix"] = label_prefix
row["operator"] = node.label
row["num_hosts"] = len(node.exec_stats)
row["avg_time"] = avg_time
row["max_time"] = max_stats.latency_ns
row["num_rows"] = cardinality
row["est_num_rows"] = est_stats.cardinality
row["peak_mem"] = max_stats.memory_used
row["est_peak_mem"] = est_stats.memory_used
row["detail"] = node.label_detail
output.append(row)
try:
sender_idx = summary.exch_to_sender_map[idx]
# This is an exchange node, so the sender is a fragment root, and should be printed
# next.
self.__build_summary_table(summary, sender_idx, True, indent_level, False, output)
except (KeyError, TypeError):
# Fall through if idx not in map, or if exch_to_sender_map itself is not set
pass
idx += 1
if node.num_children > 0:
first_child_output = []
idx = \
self.__build_summary_table(
summary, idx, False, indent_level, False, first_child_output)
for child_idx in xrange(1, node.num_children):
# All other children are indented (we only have 0, 1 or 2 children for every exec
# node at the moment)
idx = self.__build_summary_table(
summary, idx, False, indent_level + 1, True, output)
output += first_child_output
return idx
def get_runtime_profile(self, handle):
return self.__do_rpc(lambda: self.imp_service.GetRuntimeProfile(handle))
def execute_query_async(self, query_string, user=None):
"""
Executes a query asynchronously
Issues a query and returns the query handle to the caller for processing.
"""
query = BeeswaxService.Query()
query.query = query_string
query.hadoop_user = user if user is not None else getpass.getuser()
query.configuration = self.__options_to_string_list()
return self.__do_rpc(lambda: self.imp_service.query(query,))
def __execute_query(self, query_string, user=None):
"""Executes a query and waits for completion"""
handle = self.execute_query_async(query_string, user=user)
# Wait for the query to finish execution.
self.wait_for_completion(handle)
return handle
def cancel_query(self, query_id):
return self.__do_rpc(lambda: self.imp_service.Cancel(query_id))
def close_query(self, handle):
self.__do_rpc(lambda: self.imp_service.close(handle))
def wait_for_completion(self, query_handle):
"""Given a query handle, polls the coordinator waiting for the query to complete"""
while True:
query_state = self.get_state(query_handle)
# if the rpc succeeded, the output is the query state
if query_state == self.query_states["FINISHED"]:
break
elif query_state == self.query_states["EXCEPTION"]:
try:
error_log = self.__do_rpc(
lambda: self.imp_service.get_log(query_handle.log_context))
raise ImpalaBeeswaxException("Query aborted:" + error_log, None)
finally:
self.close_query(query_handle)
time.sleep(0.05)
def get_default_configuration(self):
return self.__do_rpc(lambda: self.imp_service.get_default_configuration(False))
def get_state(self, query_handle):
return self.__do_rpc(lambda: self.imp_service.get_state(query_handle))
def get_log(self, query_handle):
return self.__do_rpc(lambda: self.imp_service.get_log(query_handle))
def refresh(self):
"""Invalidate the Impalad catalog"""
return self.execute("invalidate metadata")
def refresh_table(self, db_name, table_name):
"""Refresh a specific table from the catalog"""
return self.execute("refresh %s.%s" % (db_name, table_name))
def fetch_results(self, query_string, query_handle, max_rows = -1):
"""Fetches query results given a handle and query type (insert, use, other)"""
query_type = self.__get_query_type(query_string)
if query_type == 'use':
# TODO: "use <database>" does not currently throw an error. Need to update this
# to handle the error case once that behavior has been changed.
return ImpalaBeeswaxResult(query=query_string, success=True, data=[''])
# Result fetching for insert is different from other queries.
exec_result = None
if query_type == 'insert':
exec_result = self.__fetch_insert_results(query_handle)
else:
exec_result = self.__fetch_results(query_handle, max_rows)
exec_result.query = query_string
return exec_result
def __fetch_results(self, handle, max_rows = -1):
"""Handles query results, returns a ImpalaBeeswaxResult object"""
schema = self.__do_rpc(lambda: self.imp_service.get_results_metadata(handle)).schema
# The query has finished, we can fetch the results
result_rows = []
while len(result_rows) < max_rows or max_rows < 0:
fetch_rows = -1 if max_rows < 0 else max_rows - len(result_rows)
results = self.__do_rpc(lambda: self.imp_service.fetch(handle, False, fetch_rows))
result_rows.extend(results.data)
if not results.has_more:
break
# The query executed successfully and all the data was fetched.
exec_result = ImpalaBeeswaxResult(success=True, data=result_rows, schema=schema)
exec_result.summary = 'Returned %d rows' % (len(result_rows))
return exec_result
def __fetch_insert_results(self, handle):
"""Executes an insert query"""
result = self.__do_rpc(lambda: self.imp_service.CloseInsert(handle))
# The insert was successful
num_rows = sum(map(int, result.rows_appended.values()))
data = ["%s: %s" % row for row in result.rows_appended.iteritems()]
exec_result = ImpalaBeeswaxResult(success=True, data=data)
exec_result.summary = "Inserted %d rows" % (num_rows,)
return exec_result
def __get_query_type(self, query_string):
# Set posix=True and add "'" to escaped quotes
# to deal with escaped quotes in string literals
lexer = shlex.shlex(query_string.lstrip(), posix=True)
lexer.escapedquotes += "'"
tokens = list(lexer)
# Do not classify explain queries as 'insert'
if (tokens[0].lower() == "explain"):
return tokens[0].lower()
# Because the WITH clause may precede INSERT or SELECT queries,
# just checking the first token is insufficient.
if filter(self.INSERT_REGEX.match, tokens):
return "insert"
return tokens[0].lower()
def __build_error_message(self, exception):
"""Construct a meaningful exception string"""
message = str(exception)
if isinstance(exception, BeeswaxService.BeeswaxException):
message = exception.message
return 'INNER EXCEPTION: %s\n MESSAGE: %s' % (exception.__class__, message)
def __do_rpc(self, rpc):
"""Executes the RPC lambda provided with some error checking.
Catches all the relevant exceptions and re throws them wrapped
in a custom exception [ImpalaBeeswaxException].
"""
if not self.connected:
raise ImpalaBeeswaxException("Not connected", None)
try:
return rpc()
except BeeswaxService.BeeswaxException, b:
raise ImpalaBeeswaxException(self.__build_error_message(b), b)
except TTransportException, e:
self.connected = False
raise ImpalaBeeswaxException(self.__build_error_message(e), e)
except TApplicationException, t:
raise ImpalaBeeswaxException(self.__build_error_message(t), t)
except Exception, u:
raise ImpalaBeeswaxException(self.__build_error_message(u), u)
| apache-2.0 |
HybridF5/jacket | jacket/compute/scheduler/weights/io_ops.py | 1 | 1442 | # Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Io Ops Weigher. Weigh hosts by their io ops number.
The default is to preferably choose light workload compute hosts. If you prefer
choosing heavy workload compute hosts, you can set 'io_ops_weight_multiplier'
option to a positive number and the weighing has the opposite effect of the
default.
"""
import jacket.compute.conf
from jacket.compute.scheduler import weights
CONF = jacket.compute.conf.CONF
class IoOpsWeigher(weights.BaseHostWeigher):
minval = 0
def weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.io_ops_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want to choose light workload host
to be the default.
"""
return host_state.num_io_ops
| apache-2.0 |
yland/coala-bears | bears/natural_language/LanguageToolBear.py | 4 | 2440 | import shutil
from guess_language import guess_language
from language_check import LanguageTool, correct
from coalib.bears.LocalBear import LocalBear
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from coalib.settings.Setting import typed_list
class LanguageToolBear(LocalBear):
@classmethod
def check_prerequisites(cls):
if shutil.which("java") is None:
return "java is not installed."
else:
return True
def run(self,
filename,
file,
locale: str='auto',
languagetool_disable_rules: typed_list(str)=()):
'''
Checks the code with LanguageTool.
:param locale: A locale representing the language
you want to have checked. If set to
'auto' the language is guessed.
If the language cannot be guessed,
'en-US' is used.
:param languagetool_disable_rules: List of rules to disable checks for.
'''
joined_text = "".join(file)
locale = guess_language(joined_text) if locale == 'auto' else locale
locale = 'en-US' if not locale else locale
tool = LanguageTool(locale, motherTongue="en_US")
tool.disabled.update(languagetool_disable_rules)
matches = tool.check(joined_text)
for match in matches:
if not match.replacements:
diffs = None
else:
replaced = correct(joined_text, [match]).splitlines(True)
diffs = {filename:
Diff.from_string_arrays(file, replaced)}
rule_id = match.ruleId
if match.subId is not None:
rule_id += '[{}]'.format(match.subId)
message = match.msg + ' (' + rule_id + ')'
source_range = SourceRange.from_values(filename,
match.fromy+1,
match.fromx+1,
match.toy+1,
match.tox+1)
yield Result(self, message, diffs=diffs,
affected_code=(source_range,))
| agpl-3.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/twisted/conch/ssh/transport.py | 11 | 72343 | # -*- test-case-name: twisted.conch.test.test_transport -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The lowest level SSH protocol. This handles the key negotiation, the
encryption and the compression. The transport layer is described in
RFC 4253.
Maintainer: Paul Swartz
"""
from __future__ import absolute_import, division
import binascii
import hmac
import struct
import zlib
from hashlib import md5, sha1, sha256, sha384, sha512
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher
from cryptography.hazmat.primitives.asymmetric import ec
from twisted.internet import protocol, defer
from twisted.python import log, randbytes
from twisted.python.compat import networkString, iterbytes, _bytesChr as chr
# This import is needed if SHA256 hashing is used.
#from twisted.python.compat import nativeString
from twisted.conch.ssh import address, keys, _kex
from twisted.conch.ssh.common import (
NS, getNS, MP, getMP, _MPpow, ffs, int_from_bytes
)
def _getRandomNumber(random, bits):
"""
Generate a random number in the range [0, 2 ** bits).
@type random: L{callable}
@param random: A callable taking a count of bytes and returning that many
random bytes.
@type bits: L{int}
@param bits: The number of bits in the result.
@rtype: L{int} or L{long}
@return: The newly generated random number.
@raise ValueError: if C{bits} is not a multiple of 8.
"""
if bits % 8:
raise ValueError("bits (%d) must be a multiple of 8" % (bits,))
return int_from_bytes(random(bits // 8), 'big')
def _generateX(random, bits):
"""
Generate a new value for the private key x.
From RFC 2631, section 2.2::
X9.42 requires that the private key x be in the interval
[2, (q - 2)]. x should be randomly generated in this interval.
@type random: L{callable}
@param random: A callable taking a count of bytes and returning that many
random bytes.
@type bits: L{int}
@param bits: The size of the key to generate, in bits.
@rtype: L{int}
@return: A suitable 'x' value.
"""
while True:
x = _getRandomNumber(random, bits)
if 2 <= x <= (2 ** bits) - 2:
return x
class _MACParams(tuple):
"""
L{_MACParams} represents the parameters necessary to compute SSH MAC
(Message Authenticate Codes).
L{_MACParams} is a L{tuple} subclass to maintain compatibility with older
versions of the code. The elements of a L{_MACParams} are::
0. The digest object used for the MAC
1. The inner pad ("ipad") string
2. The outer pad ("opad") string
3. The size of the digest produced by the digest object
L{_MACParams} is also an object lesson in why tuples are a bad type for
public APIs.
@ivar key: The HMAC key which will be used.
"""
class SSHCiphers:
"""
SSHCiphers represents all the encryption operations that need to occur
to encrypt and authenticate the SSH connection.
@cvar cipherMap: A dictionary mapping SSH encryption names to 3-tuples of
(<cryptography.hazmat.primitives.interfaces.CipherAlgorithm>,
<block size>, <cryptography.hazmat.primitives.interfaces.Mode>)
@cvar macMap: A dictionary mapping SSH MAC names to hash modules.
@ivar outCipType: the string type of the outgoing cipher.
@ivar inCipType: the string type of the incoming cipher.
@ivar outMACType: the string type of the incoming MAC.
@ivar inMACType: the string type of the incoming MAC.
@ivar encBlockSize: the block size of the outgoing cipher.
@ivar decBlockSize: the block size of the incoming cipher.
@ivar verifyDigestSize: the size of the incoming MAC.
@ivar outMAC: a tuple of (<hash module>, <inner key>, <outer key>,
<digest size>) representing the outgoing MAC.
@ivar inMAc: see outMAC, but for the incoming MAC.
"""
cipherMap = {
b'3des-cbc': (algorithms.TripleDES, 24, modes.CBC),
b'blowfish-cbc': (algorithms.Blowfish, 16, modes.CBC),
b'aes256-cbc': (algorithms.AES, 32, modes.CBC),
b'aes192-cbc': (algorithms.AES, 24, modes.CBC),
b'aes128-cbc': (algorithms.AES, 16, modes.CBC),
b'cast128-cbc': (algorithms.CAST5, 16, modes.CBC),
b'aes128-ctr': (algorithms.AES, 16, modes.CTR),
b'aes192-ctr': (algorithms.AES, 24, modes.CTR),
b'aes256-ctr': (algorithms.AES, 32, modes.CTR),
b'3des-ctr': (algorithms.TripleDES, 24, modes.CTR),
b'blowfish-ctr': (algorithms.Blowfish, 16, modes.CTR),
b'cast128-ctr': (algorithms.CAST5, 16, modes.CTR),
b'none': (None, 0, modes.CBC),
}
macMap = {
b'hmac-sha2-512': sha512,
b'hmac-sha2-384': sha384,
b'hmac-sha2-256': sha256,
b'hmac-sha1': sha1,
b'hmac-md5': md5,
b'none': None
}
def __init__(self, outCip, inCip, outMac, inMac):
self.outCipType = outCip
self.inCipType = inCip
self.outMACType = outMac
self.inMACType = inMac
self.encBlockSize = 0
self.decBlockSize = 0
self.verifyDigestSize = 0
self.outMAC = (None, b'', b'', 0)
self.inMAC = (None, b'', b'', 0)
def setKeys(self, outIV, outKey, inIV, inKey, outInteg, inInteg):
"""
Set up the ciphers and hashes using the given keys,
@param outIV: the outgoing initialization vector
@param outKey: the outgoing encryption key
@param inIV: the incoming initialization vector
@param inKey: the incoming encryption key
@param outInteg: the outgoing integrity key
@param inInteg: the incoming integrity key.
"""
o = self._getCipher(self.outCipType, outIV, outKey)
self.encryptor = o.encryptor()
self.encBlockSize = o.algorithm.block_size // 8
o = self._getCipher(self.inCipType, inIV, inKey)
self.decryptor = o.decryptor()
self.decBlockSize = o.algorithm.block_size // 8
self.outMAC = self._getMAC(self.outMACType, outInteg)
self.inMAC = self._getMAC(self.inMACType, inInteg)
if self.inMAC:
self.verifyDigestSize = self.inMAC[3]
def _getCipher(self, cip, iv, key):
"""
Creates an initialized cipher object.
@param cip: the name of the cipher, maps into cipherMap
@param iv: the initialzation vector
@param key: the encryption key
@return: the cipher object.
"""
algorithmClass, keySize, modeClass = self.cipherMap[cip]
if algorithmClass is None:
return _DummyCipher()
return Cipher(
algorithmClass(key[:keySize]),
modeClass(iv[:algorithmClass.block_size // 8]),
backend=default_backend(),
)
def _getMAC(self, mac, key):
"""
Gets a 4-tuple representing the message authentication code.
(<hash module>, <inner hash value>, <outer hash value>,
<digest size>)
@type mac: L{bytes}
@param mac: a key mapping into macMap
@type key: L{bytes}
@param key: the MAC key.
@rtype: L{bytes}
@return: The MAC components.
"""
mod = self.macMap[mac]
if not mod:
return (None, b'', b'', 0)
# With stdlib we can only get attributes fron an instantiated object.
hashObject = mod()
digestSize = hashObject.digest_size
blockSize = hashObject.block_size
# Truncation here appears to contravene RFC 2104, section 2. However,
# implementing the hashing behavior prescribed by the RFC breaks
# interoperability with OpenSSH (at least version 5.5p1).
key = key[:digestSize] + (b'\x00' * (blockSize - digestSize))
i = key.translate(hmac.trans_36)
o = key.translate(hmac.trans_5C)
result = _MACParams((mod, i, o, digestSize))
result.key = key
return result
def encrypt(self, blocks):
"""
Encrypt some data.
@type blocks: L{bytes}
@param blocks: The data to encrypt.
@rtype: L{bytes}
@return: The encrypted data.
"""
return self.encryptor.update(blocks)
def decrypt(self, blocks):
"""
Decrypt some data.
@type blocks: L{bytes}
@param blocks: The data to decrypt.
@rtype: L{bytes}
@return: The decrypted data.
"""
return self.decryptor.update(blocks)
def makeMAC(self, seqid, data):
"""
Create a message authentication code (MAC) for the given packet using
the outgoing MAC values.
@type seqid: L{int}
@param seqid: The sequence ID of the outgoing packet.
@type data: L{bytes}
@param data: The data to create a MAC for.
@rtype: L{str}
@return: The serialized MAC.
"""
if not self.outMAC[0]:
return b''
data = struct.pack('>L', seqid) + data
return hmac.HMAC(self.outMAC.key, data, self.outMAC[0]).digest()
def verify(self, seqid, data, mac):
"""
Verify an incoming MAC using the incoming MAC values.
@type seqid: L{int}
@param seqid: The sequence ID of the incoming packet.
@type data: L{bytes}
@param data: The packet data to verify.
@type mac: L{bytes}
@param mac: The MAC sent with the packet.
@rtype: L{bool}
@return: C{True} if the MAC is valid.
"""
if not self.inMAC[0]:
return mac == b''
data = struct.pack('>L', seqid) + data
outer = hmac.HMAC(self.inMAC.key, data, self.inMAC[0]).digest()
return mac == outer
def _getSupportedCiphers():
"""
Build a list of ciphers that are supported by the backend in use.
@return: a list of supported ciphers.
@rtype: L{list} of L{str}
"""
supportedCiphers = []
cs = [b'aes256-ctr', b'aes256-cbc', b'aes192-ctr', b'aes192-cbc',
b'aes128-ctr', b'aes128-cbc', b'cast128-ctr', b'cast128-cbc',
b'blowfish-ctr', b'blowfish-cbc', b'3des-ctr', b'3des-cbc']
for cipher in cs:
algorithmClass, keySize, modeClass = SSHCiphers.cipherMap[cipher]
try:
Cipher(
algorithmClass(b' ' * keySize),
modeClass(b' ' * (algorithmClass.block_size // 8)),
backend=default_backend(),
).encryptor()
except UnsupportedAlgorithm:
pass
else:
supportedCiphers.append(cipher)
return supportedCiphers
class SSHTransportBase(protocol.Protocol):
"""
Protocol supporting basic SSH functionality: sending/receiving packets
and message dispatch. To connect to or run a server, you must use
SSHClientTransport or SSHServerTransport.
@ivar protocolVersion: A string representing the version of the SSH
protocol we support. Currently defaults to '2.0'.
@ivar version: A string representing the version of the server or client.
Currently defaults to 'Twisted'.
@ivar comment: An optional string giving more information about the
server or client.
@ivar supportedCiphers: A list of strings representing the encryption
algorithms supported, in order from most-preferred to least.
@ivar supportedMACs: A list of strings representing the message
authentication codes (hashes) supported, in order from most-preferred
to least. Both this and supportedCiphers can include 'none' to use
no encryption or authentication, but that must be done manually,
@ivar supportedKeyExchanges: A list of strings representing the
key exchanges supported, in order from most-preferred to least.
@ivar supportedPublicKeys: A list of strings representing the
public key types supported, in order from most-preferred to least.
@ivar supportedCompressions: A list of strings representing compression
types supported, from most-preferred to least.
@ivar supportedLanguages: A list of strings representing languages
supported, from most-preferred to least.
@ivar supportedVersions: A container of strings representing supported ssh
protocol version numbers.
@ivar isClient: A boolean indicating whether this is a client or server.
@ivar gotVersion: A boolean indicating whether we have received the
version string from the other side.
@ivar buf: Data we've received but hasn't been parsed into a packet.
@ivar outgoingPacketSequence: the sequence number of the next packet we
will send.
@ivar incomingPacketSequence: the sequence number of the next packet we
are expecting from the other side.
@ivar outgoingCompression: an object supporting the .compress(str) and
.flush() methods, or None if there is no outgoing compression. Used to
compress outgoing data.
@ivar outgoingCompressionType: A string representing the outgoing
compression type.
@ivar incomingCompression: an object supporting the .decompress(str)
method, or None if there is no incoming compression. Used to
decompress incoming data.
@ivar incomingCompressionType: A string representing the incoming
compression type.
@ivar ourVersionString: the version string that we sent to the other side.
Used in the key exchange.
@ivar otherVersionString: the version string sent by the other side. Used
in the key exchange.
@ivar ourKexInitPayload: the MSG_KEXINIT payload we sent. Used in the key
exchange.
@ivar otherKexInitPayload: the MSG_KEXINIT payload we received. Used in
the key exchange
@ivar sessionID: a string that is unique to this SSH session. Created as
part of the key exchange, sessionID is used to generate the various
encryption and authentication keys.
@ivar service: an SSHService instance, or None. If it's set to an object,
it's the currently running service.
@ivar kexAlg: the agreed-upon key exchange algorithm.
@ivar keyAlg: the agreed-upon public key type for the key exchange.
@ivar currentEncryptions: an SSHCiphers instance. It represents the
current encryption and authentication options for the transport.
@ivar nextEncryptions: an SSHCiphers instance. Held here until the
MSG_NEWKEYS messages are exchanged, when nextEncryptions is
transitioned to currentEncryptions.
@ivar first: the first bytes of the next packet. In order to avoid
decrypting data twice, the first bytes are decrypted and stored until
the whole packet is available.
@ivar _keyExchangeState: The current protocol state with respect to key
exchange. This is either C{_KEY_EXCHANGE_NONE} if no key exchange is
in progress (and returns to this value after any key exchange
completqes), C{_KEY_EXCHANGE_REQUESTED} if this side of the connection
initiated a key exchange, and C{_KEY_EXCHANGE_PROGRESSING} if the other
side of the connection initiated a key exchange. C{_KEY_EXCHANGE_NONE}
is the initial value (however SSH connections begin with key exchange,
so it will quickly change to another state).
@ivar _blockedByKeyExchange: Whenever C{_keyExchangeState} is not
C{_KEY_EXCHANGE_NONE}, this is a C{list} of pending messages which were
passed to L{sendPacket} but could not be sent because it is not legal
to send them while a key exchange is in progress. When the key
exchange completes, another attempt is made to send these messages.
"""
protocolVersion = b'2.0'
version = b'Twisted'
comment = b''
ourVersionString = (b'SSH-' + protocolVersion + b'-' + version + b' '
+ comment).strip()
# L{None} is supported as cipher and hmac. For security they are disabled
# by default. To enable them, subclass this class and add it, or do:
# SSHTransportBase.supportedCiphers.append('none')
# List ordered by preference.
supportedCiphers = _getSupportedCiphers()
supportedMACs = [
b'hmac-sha2-512',
b'hmac-sha2-384',
b'hmac-sha2-256',
b'hmac-sha1',
b'hmac-md5',
# `none`,
]
supportedKeyExchanges = _kex.getSupportedKeyExchanges()
supportedPublicKeys = []
# Add the supported EC keys, and change the name from ecdh* to ecdsa*
for eckey in supportedKeyExchanges:
if eckey.find(b'ecdh') != -1:
supportedPublicKeys += [eckey.replace(b'ecdh', b'ecdsa')]
supportedPublicKeys += [b'ssh-rsa', b'ssh-dss']
supportedCompressions = [b'none', b'zlib']
supportedLanguages = ()
supportedVersions = (b'1.99', b'2.0')
isClient = False
gotVersion = False
buf = b''
outgoingPacketSequence = 0
incomingPacketSequence = 0
outgoingCompression = None
incomingCompression = None
sessionID = None
service = None
# There is no key exchange activity in progress.
_KEY_EXCHANGE_NONE = '_KEY_EXCHANGE_NONE'
# Key exchange is in progress and we started it.
_KEY_EXCHANGE_REQUESTED = '_KEY_EXCHANGE_REQUESTED'
# Key exchange is in progress and both sides have sent KEXINIT messages.
_KEY_EXCHANGE_PROGRESSING = '_KEY_EXCHANGE_PROGRESSING'
# There is a fourth conceptual state not represented here: KEXINIT received
# but not sent. Since we always send a KEXINIT as soon as we get it, we
# can't ever be in that state.
# The current key exchange state.
_keyExchangeState = _KEY_EXCHANGE_NONE
_blockedByKeyExchange = None
def connectionLost(self, reason):
"""
When the underlying connection is closed, stop the running service (if
any), and log out the avatar (if any).
@type reason: L{twisted.python.failure.Failure}
@param reason: The cause of the connection being closed.
"""
if self.service:
self.service.serviceStopped()
if hasattr(self, 'avatar'):
self.logoutFunction()
log.msg('connection lost')
def connectionMade(self):
"""
Called when the connection is made to the other side. We sent our
version and the MSG_KEXINIT packet.
"""
self.transport.write(self.ourVersionString + b'\r\n')
self.currentEncryptions = SSHCiphers(b'none', b'none', b'none',
b'none')
self.currentEncryptions.setKeys(b'', b'', b'', b'', b'', b'')
self.sendKexInit()
def sendKexInit(self):
"""
Send a I{KEXINIT} message to initiate key exchange or to respond to a
key exchange initiated by the peer.
@raise RuntimeError: If a key exchange has already been started and it
is not appropriate to send a I{KEXINIT} message at this time.
@return: L{None}
"""
if self._keyExchangeState != self._KEY_EXCHANGE_NONE:
raise RuntimeError(
"Cannot send KEXINIT while key exchange state is %r" % (
self._keyExchangeState,))
self.ourKexInitPayload = b''.join([
chr(MSG_KEXINIT),
randbytes.secureRandom(16),
NS(b','.join(self.supportedKeyExchanges)),
NS(b','.join(self.supportedPublicKeys)),
NS(b','.join(self.supportedCiphers)),
NS(b','.join(self.supportedCiphers)),
NS(b','.join(self.supportedMACs)),
NS(b','.join(self.supportedMACs)),
NS(b','.join(self.supportedCompressions)),
NS(b','.join(self.supportedCompressions)),
NS(b','.join(self.supportedLanguages)),
NS(b','.join(self.supportedLanguages)),
b'\000\000\000\000\000'])
self.sendPacket(MSG_KEXINIT, self.ourKexInitPayload[1:])
self._keyExchangeState = self._KEY_EXCHANGE_REQUESTED
self._blockedByKeyExchange = []
def _allowedKeyExchangeMessageType(self, messageType):
"""
Determine if the given message type may be sent while key exchange is
in progress.
@param messageType: The type of message
@type messageType: L{int}
@return: C{True} if the given type of message may be sent while key
exchange is in progress, C{False} if it may not.
@rtype: L{bool}
@see: U{http://tools.ietf.org/html/rfc4253#section-7.1}
"""
# Written somewhat peculularly to reflect the way the specification
# defines the allowed message types.
if 1 <= messageType <= 19:
return messageType not in (MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT)
if 20 <= messageType <= 29:
return messageType not in (MSG_KEXINIT,)
return 30 <= messageType <= 49
def sendPacket(self, messageType, payload):
"""
Sends a packet. If it's been set up, compress the data, encrypt it,
and authenticate it before sending. If key exchange is in progress and
the message is not part of key exchange, queue it to be sent later.
@param messageType: The type of the packet; generally one of the
MSG_* values.
@type messageType: L{int}
@param payload: The payload for the message.
@type payload: L{str}
"""
if self._keyExchangeState != self._KEY_EXCHANGE_NONE:
if not self._allowedKeyExchangeMessageType(messageType):
self._blockedByKeyExchange.append((messageType, payload))
return
payload = chr(messageType) + payload
if self.outgoingCompression:
payload = (self.outgoingCompression.compress(payload)
+ self.outgoingCompression.flush(2))
bs = self.currentEncryptions.encBlockSize
# 4 for the packet length and 1 for the padding length
totalSize = 5 + len(payload)
lenPad = bs - (totalSize % bs)
if lenPad < 4:
lenPad = lenPad + bs
packet = (struct.pack('!LB',
totalSize + lenPad - 4, lenPad) +
payload + randbytes.secureRandom(lenPad))
encPacket = (
self.currentEncryptions.encrypt(packet) +
self.currentEncryptions.makeMAC(
self.outgoingPacketSequence, packet))
self.transport.write(encPacket)
self.outgoingPacketSequence += 1
def getPacket(self):
"""
Try to return a decrypted, authenticated, and decompressed packet
out of the buffer. If there is not enough data, return None.
@rtype: L{str} or L{None}
@return: The decoded packet, if any.
"""
bs = self.currentEncryptions.decBlockSize
ms = self.currentEncryptions.verifyDigestSize
if len(self.buf) < bs:
# Not enough data for a block
return
if not hasattr(self, 'first'):
first = self.currentEncryptions.decrypt(self.buf[:bs])
else:
first = self.first
del self.first
packetLen, paddingLen = struct.unpack('!LB', first[:5])
if packetLen > 1048576: # 1024 ** 2
self.sendDisconnect(
DISCONNECT_PROTOCOL_ERROR,
networkString('bad packet length %s' % (packetLen,)))
return
if len(self.buf) < packetLen + 4 + ms:
# Not enough data for a packet
self.first = first
return
if (packetLen + 4) % bs != 0:
self.sendDisconnect(
DISCONNECT_PROTOCOL_ERROR,
networkString(
'bad packet mod (%i%%%i == %i)' % (
packetLen + 4, bs,(packetLen + 4) % bs)))
return
encData, self.buf = self.buf[:4 + packetLen], self.buf[4 + packetLen:]
packet = first + self.currentEncryptions.decrypt(encData[bs:])
if len(packet) != 4 + packetLen:
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
b'bad decryption')
return
if ms:
macData, self.buf = self.buf[:ms], self.buf[ms:]
if not self.currentEncryptions.verify(self.incomingPacketSequence,
packet, macData):
self.sendDisconnect(DISCONNECT_MAC_ERROR, b'bad MAC')
return
payload = packet[5:-paddingLen]
if self.incomingCompression:
try:
payload = self.incomingCompression.decompress(payload)
except:
# Tolerate any errors in decompression
log.err()
self.sendDisconnect(DISCONNECT_COMPRESSION_ERROR,
b'compression error')
return
self.incomingPacketSequence += 1
return payload
def _unsupportedVersionReceived(self, remoteVersion):
"""
Called when an unsupported version of the ssh protocol is received from
the remote endpoint.
@param remoteVersion: remote ssh protocol version which is unsupported
by us.
@type remoteVersion: L{str}
"""
self.sendDisconnect(DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED,
b'bad version ' + remoteVersion)
def dataReceived(self, data):
"""
First, check for the version string (SSH-2.0-*). After that has been
received, this method adds data to the buffer, and pulls out any
packets.
@type data: L{bytes}
@param data: The data that was received.
"""
self.buf = self.buf + data
if not self.gotVersion:
if self.buf.find(b'\n', self.buf.find(b'SSH-')) == -1:
return
lines = self.buf.split(b'\n')
for p in lines:
if p.startswith(b'SSH-'):
self.gotVersion = True
self.otherVersionString = p.strip()
remoteVersion = p.split(b'-')[1]
if remoteVersion not in self.supportedVersions:
self._unsupportedVersionReceived(remoteVersion)
return
i = lines.index(p)
self.buf = b'\n'.join(lines[i + 1:])
packet = self.getPacket()
while packet:
messageNum = ord(packet[0:1])
self.dispatchMessage(messageNum, packet[1:])
packet = self.getPacket()
def dispatchMessage(self, messageNum, payload):
"""
Send a received message to the appropriate method.
@type messageNum: L{int}
@param messageNum: The message number.
@type payload: L{bytes}
@param payload: The message payload.
"""
if messageNum < 50 and messageNum in messages:
messageType = messages[messageNum][4:]
f = getattr(self, 'ssh_%s' % (messageType,), None)
if f is not None:
f(payload)
else:
log.msg("couldn't handle %s" % messageType)
log.msg(repr(payload))
self.sendUnimplemented()
elif self.service:
log.callWithLogger(self.service, self.service.packetReceived,
messageNum, payload)
else:
log.msg("couldn't handle %s" % messageNum)
log.msg(repr(payload))
self.sendUnimplemented()
def getPeer(self):
"""
Returns an L{SSHTransportAddress} corresponding to the other (peer)
side of this transport.
@return: L{SSHTransportAddress} for the peer
@rtype: L{SSHTransportAddress}
@since: 12.1
"""
return address.SSHTransportAddress(self.transport.getPeer())
def getHost(self):
"""
Returns an L{SSHTransportAddress} corresponding to the this side of
transport.
@return: L{SSHTransportAddress} for the peer
@rtype: L{SSHTransportAddress}
@since: 12.1
"""
return address.SSHTransportAddress(self.transport.getHost())
@property
def kexAlg(self):
"""
The key exchange algorithm name agreed between client and server.
"""
return self._kexAlg
@kexAlg.setter
def kexAlg(self, value):
"""
Set the key exchange algorithm name.
"""
self._kexAlg = value
# Client-initiated rekeying looks like this:
#
# C> MSG_KEXINIT
# S> MSG_KEXINIT
# C> MSG_KEX_DH_GEX_REQUEST or MSG_KEXDH_INIT
# S> MSG_KEX_DH_GEX_GROUP or MSG_KEXDH_REPLY
# C> MSG_KEX_DH_GEX_INIT or --
# S> MSG_KEX_DH_GEX_REPLY or --
# C> MSG_NEWKEYS
# S> MSG_NEWKEYS
#
# Server-initiated rekeying is the same, only the first two messages are
# switched.
def ssh_KEXINIT(self, packet):
"""
Called when we receive a MSG_KEXINIT message. Payload::
bytes[16] cookie
string keyExchangeAlgorithms
string keyAlgorithms
string incomingEncryptions
string outgoingEncryptions
string incomingAuthentications
string outgoingAuthentications
string incomingCompressions
string outgoingCompressions
string incomingLanguages
string outgoingLanguages
bool firstPacketFollows
unit32 0 (reserved)
Starts setting up the key exchange, keys, encryptions, and
authentications. Extended by ssh_KEXINIT in SSHServerTransport and
SSHClientTransport.
@type packet: L{bytes}
@param packet: The message data.
@return: A L{tuple} of negotiated key exchange algorithms, key
algorithms, and unhandled data, or L{None} if something went wrong.
"""
self.otherKexInitPayload = chr(MSG_KEXINIT) + packet
# This is useless to us:
# cookie = packet[: 16]
k = getNS(packet[16:], 10)
strings, rest = k[:-1], k[-1]
(kexAlgs, keyAlgs, encCS, encSC, macCS, macSC, compCS, compSC, langCS,
langSC) = [s.split(b',') for s in strings]
# These are the server directions
outs = [encSC, macSC, compSC]
ins = [encCS, macSC, compCS]
if self.isClient:
outs, ins = ins, outs # Switch directions
server = (self.supportedKeyExchanges, self.supportedPublicKeys,
self.supportedCiphers, self.supportedCiphers,
self.supportedMACs, self.supportedMACs,
self.supportedCompressions, self.supportedCompressions)
client = (kexAlgs, keyAlgs, outs[0], ins[0], outs[1], ins[1],
outs[2], ins[2])
if self.isClient:
server, client = client, server
self.kexAlg = ffs(client[0], server[0])
self.keyAlg = ffs(client[1], server[1])
self.nextEncryptions = SSHCiphers(
ffs(client[2], server[2]),
ffs(client[3], server[3]),
ffs(client[4], server[4]),
ffs(client[5], server[5]))
self.outgoingCompressionType = ffs(client[6], server[6])
self.incomingCompressionType = ffs(client[7], server[7])
if None in (self.kexAlg, self.keyAlg, self.outgoingCompressionType,
self.incomingCompressionType):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
b"couldn't match all kex parts")
return
if None in self.nextEncryptions.__dict__.values():
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
b"couldn't match all kex parts")
return
log.msg('kex alg, key alg: %r %r' % (self.kexAlg, self.keyAlg))
log.msg('outgoing: %r %r %r' % (self.nextEncryptions.outCipType,
self.nextEncryptions.outMACType,
self.outgoingCompressionType))
log.msg('incoming: %r %r %r' % (self.nextEncryptions.inCipType,
self.nextEncryptions.inMACType,
self.incomingCompressionType))
if self._keyExchangeState == self._KEY_EXCHANGE_REQUESTED:
self._keyExchangeState = self._KEY_EXCHANGE_PROGRESSING
else:
self.sendKexInit()
return kexAlgs, keyAlgs, rest # For SSHServerTransport to use
def ssh_DISCONNECT(self, packet):
"""
Called when we receive a MSG_DISCONNECT message. Payload::
long code
string description
This means that the other side has disconnected. Pass the message up
and disconnect ourselves.
@type packet: L{bytes}
@param packet: The message data.
"""
reasonCode = struct.unpack('>L', packet[: 4])[0]
description, foo = getNS(packet[4:])
self.receiveError(reasonCode, description)
self.transport.loseConnection()
def ssh_IGNORE(self, packet):
"""
Called when we receive a MSG_IGNORE message. No payload.
This means nothing; we simply return.
@type packet: L{bytes}
@param packet: The message data.
"""
def ssh_UNIMPLEMENTED(self, packet):
"""
Called when we receive a MSG_UNIMPLEMENTED message. Payload::
long packet
This means that the other side did not implement one of our packets.
@type packet: L{bytes}
@param packet: The message data.
"""
seqnum, = struct.unpack('>L', packet)
self.receiveUnimplemented(seqnum)
def ssh_DEBUG(self, packet):
"""
Called when we receive a MSG_DEBUG message. Payload::
bool alwaysDisplay
string message
string language
This means the other side has passed along some debugging info.
@type packet: L{bytes}
@param packet: The message data.
"""
alwaysDisplay = bool(packet[0])
message, lang, foo = getNS(packet[1:], 2)
self.receiveDebug(alwaysDisplay, message, lang)
def setService(self, service):
"""
Set our service to service and start it running. If we were
running a service previously, stop it first.
@type service: C{SSHService}
@param service: The service to attach.
"""
log.msg('starting service %r' % (service.name,))
if self.service:
self.service.serviceStopped()
self.service = service
service.transport = self
self.service.serviceStarted()
def sendDebug(self, message, alwaysDisplay=False, language=b''):
"""
Send a debug message to the other side.
@param message: the message to send.
@type message: L{str}
@param alwaysDisplay: if True, tell the other side to always
display this message.
@type alwaysDisplay: L{bool}
@param language: optionally, the language the message is in.
@type language: L{str}
"""
self.sendPacket(MSG_DEBUG, chr(alwaysDisplay) + NS(message) +
NS(language))
def sendIgnore(self, message):
"""
Send a message that will be ignored by the other side. This is
useful to fool attacks based on guessing packet sizes in the
encrypted stream.
@param message: data to send with the message
@type message: L{str}
"""
self.sendPacket(MSG_IGNORE, NS(message))
def sendUnimplemented(self):
"""
Send a message to the other side that the last packet was not
understood.
"""
seqnum = self.incomingPacketSequence
self.sendPacket(MSG_UNIMPLEMENTED, struct.pack('!L', seqnum))
def sendDisconnect(self, reason, desc):
"""
Send a disconnect message to the other side and then disconnect.
@param reason: the reason for the disconnect. Should be one of the
DISCONNECT_* values.
@type reason: L{int}
@param desc: a descrption of the reason for the disconnection.
@type desc: L{str}
"""
self.sendPacket(
MSG_DISCONNECT, struct.pack('>L', reason) + NS(desc) + NS(b''))
log.msg('Disconnecting with error, code %s\nreason: %s' % (reason,
desc))
self.transport.loseConnection()
def _getKey(self, c, sharedSecret, exchangeHash):
"""
Get one of the keys for authentication/encryption.
@type c: L{bytes}
@param c: The letter identifying which key this is.
@type sharedSecret: L{bytes}
@param sharedSecret: The shared secret K.
@type exchangeHash: L{bytes}
@param exchangeHash: The hash H from key exchange.
@rtype: L{bytes}
@return: The derived key.
"""
hashProcessor = _kex.getHashProcessor(self.kexAlg)
k1 = hashProcessor(sharedSecret + exchangeHash + c + self.sessionID)
k1 = k1.digest()
k2 = hashProcessor(sharedSecret + exchangeHash + k1).digest()
return k1 + k2
def _keySetup(self, sharedSecret, exchangeHash):
"""
Set up the keys for the connection and sends MSG_NEWKEYS when
finished,
@param sharedSecret: a secret string agreed upon using a Diffie-
Hellman exchange, so it is only shared between
the server and the client.
@type sharedSecret: L{str}
@param exchangeHash: A hash of various data known by both sides.
@type exchangeHash: L{str}
"""
if not self.sessionID:
self.sessionID = exchangeHash
initIVCS = self._getKey(b'A', sharedSecret, exchangeHash)
initIVSC = self._getKey(b'B', sharedSecret, exchangeHash)
encKeyCS = self._getKey(b'C', sharedSecret, exchangeHash)
encKeySC = self._getKey(b'D', sharedSecret, exchangeHash)
integKeyCS = self._getKey(b'E', sharedSecret, exchangeHash)
integKeySC = self._getKey(b'F', sharedSecret, exchangeHash)
outs = [initIVSC, encKeySC, integKeySC]
ins = [initIVCS, encKeyCS, integKeyCS]
if self.isClient: # Reverse for the client
log.msg('REVERSE')
outs, ins = ins, outs
self.nextEncryptions.setKeys(outs[0], outs[1], ins[0], ins[1],
outs[2], ins[2])
self.sendPacket(MSG_NEWKEYS, b'')
def _newKeys(self):
"""
Called back by a subclass once a I{MSG_NEWKEYS} message has been
received. This indicates key exchange has completed and new encryption
and compression parameters should be adopted. Any messages which were
queued during key exchange will also be flushed.
"""
log.msg('NEW KEYS')
self.currentEncryptions = self.nextEncryptions
if self.outgoingCompressionType == b'zlib':
self.outgoingCompression = zlib.compressobj(6)
if self.incomingCompressionType == b'zlib':
self.incomingCompression = zlib.decompressobj()
self._keyExchangeState = self._KEY_EXCHANGE_NONE
messages = self._blockedByKeyExchange
self._blockedByKeyExchange = None
for (messageType, payload) in messages:
self.sendPacket(messageType, payload)
def isEncrypted(self, direction="out"):
"""
Check if the connection is encrypted in the given direction.
@type direction: L{str}
@param direction: The direction: one of 'out', 'in', or 'both'.
@rtype: L{bool}
@return: C{True} if it is encrypted.
"""
if direction == "out":
return self.currentEncryptions.outCipType != b'none'
elif direction == "in":
return self.currentEncryptions.inCipType != b'none'
elif direction == "both":
return self.isEncrypted("in") and self.isEncrypted("out")
else:
raise TypeError('direction must be "out", "in", or "both"')
def isVerified(self, direction="out"):
"""
Check if the connection is verified/authentication in the given direction.
@type direction: L{str}
@param direction: The direction: one of 'out', 'in', or 'both'.
@rtype: L{bool}
@return: C{True} if it is verified.
"""
if direction == "out":
return self.currentEncryptions.outMACType != b'none'
elif direction == "in":
return self.currentEncryptions.inMACType != b'none'
elif direction == "both":
return self.isVerified("in") and self.isVerified("out")
else:
raise TypeError('direction must be "out", "in", or "both"')
def loseConnection(self):
"""
Lose the connection to the other side, sending a
DISCONNECT_CONNECTION_LOST message.
"""
self.sendDisconnect(DISCONNECT_CONNECTION_LOST,
b"user closed connection")
# Client methods
def receiveError(self, reasonCode, description):
"""
Called when we receive a disconnect error message from the other
side.
@param reasonCode: the reason for the disconnect, one of the
DISCONNECT_ values.
@type reasonCode: L{int}
@param description: a human-readable description of the
disconnection.
@type description: L{str}
"""
log.msg('Got remote error, code %s\nreason: %s' % (reasonCode,
description))
def receiveUnimplemented(self, seqnum):
"""
Called when we receive an unimplemented packet message from the other
side.
@param seqnum: the sequence number that was not understood.
@type seqnum: L{int}
"""
log.msg('other side unimplemented packet #%s' % (seqnum,))
def receiveDebug(self, alwaysDisplay, message, lang):
"""
Called when we receive a debug message from the other side.
@param alwaysDisplay: if True, this message should always be
displayed.
@type alwaysDisplay: L{bool}
@param message: the debug message
@type message: L{str}
@param lang: optionally the language the message is in.
@type lang: L{str}
"""
if alwaysDisplay:
log.msg('Remote Debug Message: %s' % (message,))
class SSHServerTransport(SSHTransportBase):
"""
SSHServerTransport implements the server side of the SSH protocol.
@ivar isClient: since we are never the client, this is always False.
@ivar ignoreNextPacket: if True, ignore the next key exchange packet. This
is set when the client sends a guessed key exchange packet but with
an incorrect guess.
@ivar dhGexRequest: the KEX_DH_GEX_REQUEST(_OLD) that the client sent.
The key generation needs this to be stored.
@ivar g: the Diffie-Hellman group generator.
@ivar p: the Diffie-Hellman group prime.
"""
isClient = False
ignoreNextPacket = 0
def ssh_KEXINIT(self, packet):
"""
Called when we receive a MSG_KEXINIT message. For a description
of the packet, see SSHTransportBase.ssh_KEXINIT(). Additionally,
this method checks if a guessed key exchange packet was sent. If
it was sent, and it guessed incorrectly, the next key exchange
packet MUST be ignored.
"""
retval = SSHTransportBase.ssh_KEXINIT(self, packet)
if not retval: # Disconnected
return
else:
kexAlgs, keyAlgs, rest = retval
if ord(rest[0:1]): # Flag first_kex_packet_follows?
if (kexAlgs[0] != self.supportedKeyExchanges[0] or
keyAlgs[0] != self.supportedPublicKeys[0]):
self.ignoreNextPacket = True # Guess was wrong
def _ssh_KEX_ECDH_INIT(self, packet):
"""
Called from L{ssh_KEX_DH_GEX_REQUEST_OLD} to handle
elliptic curve key exchanges.
Payload::
string client Elliptic Curve Diffie-Hellman public key
Just like L{_ssh_KEXDH_INIT} this message type is also not dispatched
directly. Extra check to determine if this is really KEX_ECDH_INIT
is required.
First we load the host's public/private keys.
Then we generate the ECDH public/private keypair for the given curve.
With that we generate the shared secret key.
Then we compute the hash to sign and send back to the client
Along with the server's public key and the ECDH public key.
@type packet: L{bytes}
@param packet: The message data.
@return: None.
"""
# Get the raw client public key.
pktPub, packet = getNS(packet)
# Get the host's public and private keys
pubHostKey = self.factory.publicKeys[self.keyAlg]
privHostKey = self.factory.privateKeys[self.keyAlg]
# Get the curve instance
try:
curve = keys._curveTable[b'ecdsa' + self.kexAlg[4:]]
except KeyError:
raise UnsupportedAlgorithm('unused-key')
# Generate the private key
ecPriv = ec.generate_private_key(curve, default_backend())
# Get the public key
ecPub = ecPriv.public_key()
encPub = ecPub.public_numbers().encode_point()
# Take the provided public key and transform it into
# a format for the cryptography module
theirECPub = ec.EllipticCurvePublicNumbers.from_encoded_point(
curve, pktPub).public_key(default_backend())
# We need to convert to hex,
# so we can convert to an int
# so we can make it a multiple precision int.
sharedSecret = MP(
int(
binascii.hexlify(
ecPriv.exchange(ec.ECDH(), theirECPub)), 16))
# Finish update and digest
h = _kex.getHashProcessor(self.kexAlg)()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.otherKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(pubHostKey.blob()))
h.update(NS(pktPub))
h.update(NS(encPub))
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(
MSG_KEXDH_REPLY,
NS(pubHostKey.blob()) + NS(encPub) +
NS(privHostKey.sign(exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
def _ssh_KEXDH_INIT(self, packet):
"""
Called to handle the beginning of a non-group key exchange.
Unlike other message types, this is not dispatched automatically. It
is called from C{ssh_KEX_DH_GEX_REQUEST_OLD} because an extra check is
required to determine if this is really a KEXDH_INIT message or if it
is a KEX_DH_GEX_REQUEST_OLD message.
The KEXDH_INIT payload::
integer e (the client's Diffie-Hellman public key)
We send the KEXDH_REPLY with our host key and signature.
@type packet: L{bytes}
@param packet: The message data.
"""
clientDHpublicKey, foo = getMP(packet)
y = _getRandomNumber(randbytes.secureRandom, 512)
self.g, self.p = _kex.getDHGeneratorAndPrime(self.kexAlg)
serverDHpublicKey = _MPpow(self.g, y, self.p)
sharedSecret = _MPpow(clientDHpublicKey, y, self.p)
h = sha1()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.otherKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.factory.publicKeys[self.keyAlg].blob()))
h.update(MP(clientDHpublicKey))
h.update(serverDHpublicKey)
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(
MSG_KEXDH_REPLY,
NS(self.factory.publicKeys[self.keyAlg].blob()) +
serverDHpublicKey +
NS(self.factory.privateKeys[self.keyAlg].sign(exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
def ssh_KEX_DH_GEX_REQUEST_OLD(self, packet):
"""
This represents different key exchange methods that share the same
integer value. If the message is determined to be a KEXDH_INIT,
L{_ssh_KEXDH_INIT} is called to handle it. If it is a KEX_ECDH_INIT,
L{_ssh_KEX_ECDH_INIT} is called.
Otherwise, for KEX_DH_GEX_REQUEST_OLD payload::
integer ideal (ideal size for the Diffie-Hellman prime)
We send the KEX_DH_GEX_GROUP message with the group that is
closest in size to ideal.
If we were told to ignore the next key exchange packet by ssh_KEXINIT,
drop it on the floor and return.
@type packet: L{bytes}
@param packet: The message data.
"""
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
# KEXDH_INIT, KEX_ECDH_INIT, and KEX_DH_GEX_REQUEST_OLD
# have the same value, so use another cue
# to decide what kind of message the peer sent us.
if _kex.isFixedGroup(self.kexAlg):
return self._ssh_KEXDH_INIT(packet)
elif _kex.isEllipticCurve(self.kexAlg):
return self._ssh_KEX_ECDH_INIT(packet)
else:
self.dhGexRequest = packet
ideal = struct.unpack('>L', packet)[0]
self.g, self.p = self.factory.getDHPrime(ideal)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p) + MP(self.g))
def ssh_KEX_DH_GEX_REQUEST(self, packet):
"""
Called when we receive a MSG_KEX_DH_GEX_REQUEST message. Payload::
integer minimum
integer ideal
integer maximum
The client is asking for a Diffie-Hellman group between minimum and
maximum size, and close to ideal if possible. We reply with a
MSG_KEX_DH_GEX_GROUP message.
If we were told to ignore the next key exchange packet by ssh_KEXINIT,
drop it on the floor and return.
@type packet: L{bytes}
@param packet: The message data.
"""
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
self.dhGexRequest = packet
min, ideal, max = struct.unpack('>3L', packet)
self.g, self.p = self.factory.getDHPrime(ideal)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p) + MP(self.g))
def ssh_KEX_DH_GEX_INIT(self, packet):
"""
Called when we get a MSG_KEX_DH_GEX_INIT message. Payload::
integer e (client DH public key)
We send the MSG_KEX_DH_GEX_REPLY message with our host key and
signature.
@type packet: L{bytes}
@param packet: The message data.
"""
clientDHpublicKey, foo = getMP(packet)
# TODO: we should also look at the value they send to us and reject
# insecure values of f (if g==2 and f has a single '1' bit while the
# rest are '0's, then they must have used a small y also).
# TODO: This could be computed when self.p is set up
# or do as openssh does and scan f for a single '1' bit instead
pSize = self.p.bit_length()
y = _getRandomNumber(randbytes.secureRandom, pSize)
serverDHpublicKey = _MPpow(self.g, y, self.p)
sharedSecret = _MPpow(clientDHpublicKey, y, self.p)
h = _kex.getHashProcessor(self.kexAlg)()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.otherKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.factory.publicKeys[self.keyAlg].blob()))
h.update(self.dhGexRequest)
h.update(MP(self.p))
h.update(MP(self.g))
h.update(MP(clientDHpublicKey))
h.update(serverDHpublicKey)
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(
MSG_KEX_DH_GEX_REPLY,
NS(self.factory.publicKeys[self.keyAlg].blob()) +
serverDHpublicKey +
NS(self.factory.privateKeys[self.keyAlg].sign(exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
def ssh_NEWKEYS(self, packet):
"""
Called when we get a MSG_NEWKEYS message. No payload.
When we get this, the keys have been set on both sides, and we
start using them to encrypt and authenticate the connection.
@type packet: L{bytes}
@param packet: The message data.
"""
if packet != b'':
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
b"NEWKEYS takes no data")
return
self._newKeys()
def ssh_SERVICE_REQUEST(self, packet):
"""
Called when we get a MSG_SERVICE_REQUEST message. Payload::
string serviceName
The client has requested a service. If we can start the service,
start it; otherwise, disconnect with
DISCONNECT_SERVICE_NOT_AVAILABLE.
@type packet: L{bytes}
@param packet: The message data.
"""
service, rest = getNS(packet)
cls = self.factory.getService(self, service)
if not cls:
self.sendDisconnect(DISCONNECT_SERVICE_NOT_AVAILABLE,
b"don't have service " + service)
return
else:
self.sendPacket(MSG_SERVICE_ACCEPT, NS(service))
self.setService(cls())
class SSHClientTransport(SSHTransportBase):
"""
SSHClientTransport implements the client side of the SSH protocol.
@ivar isClient: since we are always the client, this is always True.
@ivar _gotNewKeys: if we receive a MSG_NEWKEYS message before we are
ready to transition to the new keys, this is set to True so we
can transition when the keys are ready locally.
@ivar x: our Diffie-Hellman private key.
@ivar e: our Diffie-Hellman public key.
@ivar g: the Diffie-Hellman group generator.
@ivar p: the Diffie-Hellman group prime
@ivar instance: the SSHService object we are requesting.
@ivar _dhMinimalGroupSize: Minimal acceptable group size advertised by the
client in MSG_KEX_DH_GEX_REQUEST.
@type _dhMinimalGroupSize: int
@ivar _dhMaximalGroupSize: Maximal acceptable group size advertised by the
client in MSG_KEX_DH_GEX_REQUEST.
@type _dhMaximalGroupSize: int
@ivar _dhPreferredGroupSize: Preferred group size advertised by the client
in MSG_KEX_DH_GEX_REQUEST.
@type _dhPreferredGroupSize: int
"""
isClient = True
# Recommended minimal and maximal values from RFC 4419, 3.
_dhMinimalGroupSize = 1024
_dhMaximalGroupSize = 8192
# FIXME: https://twistedmatrix.com/trac/ticket/8103
# This may need to be more dynamic; compare kexgex_client in
# OpenSSH.
_dhPreferredGroupSize = 2048
def connectionMade(self):
"""
Called when the connection is started with the server. Just sets
up a private instance variable.
"""
SSHTransportBase.connectionMade(self)
self._gotNewKeys = 0
def ssh_KEXINIT(self, packet):
"""
Called when we receive a MSG_KEXINIT message. For a description
of the packet, see SSHTransportBase.ssh_KEXINIT(). Additionally,
this method sends the first key exchange packet.
If the agreed-upon exchange is ECDH, generate a key pair for the
corresponding curve and send the public key.
If the agreed-upon exchange has a fixed prime/generator group,
generate a public key and send it in a MSG_KEXDH_INIT message.
Otherwise, ask for a 2048 bit group with a MSG_KEX_DH_GEX_REQUEST
message.
"""
if SSHTransportBase.ssh_KEXINIT(self, packet) is None:
# Connection was disconnected while doing base processing.
# Maybe no common protocols were agreed.
return
# Are we using ECDH?
if _kex.isEllipticCurve(self.kexAlg):
# Find the base curve info
self.curve = keys._curveTable[b'ecdsa' + self.kexAlg[4:]]
# Generate the keys
self.ecPriv = ec.generate_private_key(self.curve,
default_backend())
self.ecPub = self.ecPriv.public_key()
# DH_GEX_REQUEST_OLD is the same number we need.
self.sendPacket(
MSG_KEX_DH_GEX_REQUEST_OLD,
NS(self.ecPub.public_numbers().encode_point()))
elif _kex.isFixedGroup(self.kexAlg):
# We agreed on a fixed group key exchange algorithm.
self.x = _generateX(randbytes.secureRandom, 512)
self.g, self.p = _kex.getDHGeneratorAndPrime(self.kexAlg)
self.e = _MPpow(self.g, self.x, self.p)
self.sendPacket(MSG_KEXDH_INIT, self.e)
else:
# We agreed on a dynamic group. Tell the server what range of
# group sizes we accept, and what size we prefer; the server
# will then select a group.
self.sendPacket(
MSG_KEX_DH_GEX_REQUEST,
struct.pack(
'!LLL',
self._dhMinimalGroupSize,
self._dhPreferredGroupSize,
self._dhMaximalGroupSize,
))
def _ssh_KEX_ECDH_REPLY(self, packet):
"""
Called to handle a reply to a ECDH exchange message(KEX_ECDH_INIT).
Like the handler for I{KEXDH_INIT}, this message type has an
overlapping value. This method is called from C{ssh_KEX_DH_GEX_GROUP}
if that method detects a non-group key exchange is in progress.
Payload::
string serverHostKey
string server Elliptic Curve Diffie-Hellman public key
string signature
We verify the host key and continue if it passes verificiation.
Otherwise raise an exception and return.
@type packet: L{bytes}
@param packet: The message data.
@return: A deferred firing when key exchange is complete.
"""
def _continue_KEX_ECDH_REPLY(ignored, hostKey, pubKey, signature):
# Save off the host public key.
theirECHost = hostKey
# Take the provided public key and transform it into a format
# for the cryptography module
theirECPub = ec.EllipticCurvePublicNumbers.from_encoded_point(
self.curve, pubKey).public_key(
default_backend())
# We need to convert to hex,
# so we can convert to an int
# so we can make a multiple precision int.
sharedSecret = MP(
int(
binascii.hexlify(
self.ecPriv.exchange(ec.ECDH(), theirECPub)), 16))
h = _kex.getHashProcessor(self.kexAlg)()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.otherKexInitPayload))
h.update(NS(theirECHost))
h.update(NS(self.ecPub.public_numbers().encode_point()))
h.update(NS(pubKey))
h.update(sharedSecret)
exchangeHash = h.digest()
if not keys.Key.fromString(theirECHost).verify(
signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
b'bad signature')
else:
self._keySetup(sharedSecret, exchangeHash)
# Get the host public key,
# the raw ECDH public key bytes and the signature
hostKey, pubKey, signature, packet = getNS(packet, 3)
# Easier to comment this out for now than to update all of the tests.
#fingerprint = nativeString(base64.b64encode(
# sha256(hostKey).digest()))
fingerprint = b':'.join(
[binascii.hexlify(ch) for ch in iterbytes(md5(hostKey).digest())])
d = self.verifyHostKey(hostKey, fingerprint)
d.addCallback(_continue_KEX_ECDH_REPLY, hostKey, pubKey, signature)
d.addErrback(
lambda unused: self.sendDisconnect(
DISCONNECT_HOST_KEY_NOT_VERIFIABLE, b'bad host key'))
return d
def _ssh_KEXDH_REPLY(self, packet):
"""
Called to handle a reply to a non-group key exchange message
(KEXDH_INIT).
Like the handler for I{KEXDH_INIT}, this message type has an
overlapping value. This method is called from C{ssh_KEX_DH_GEX_GROUP}
if that method detects a non-group key exchange is in progress.
Payload::
string serverHostKey
integer f (server Diffie-Hellman public key)
string signature
We verify the host key by calling verifyHostKey, then continue in
_continueKEXDH_REPLY.
@type packet: L{bytes}
@param packet: The message data.
@return: A deferred firing when key exchange is complete.
"""
pubKey, packet = getNS(packet)
f, packet = getMP(packet)
signature, packet = getNS(packet)
fingerprint = b':'.join([binascii.hexlify(ch) for ch in
iterbytes(md5(pubKey).digest())])
d = self.verifyHostKey(pubKey, fingerprint)
d.addCallback(self._continueKEXDH_REPLY, pubKey, f, signature)
d.addErrback(
lambda unused: self.sendDisconnect(
DISCONNECT_HOST_KEY_NOT_VERIFIABLE, b'bad host key'))
return d
def ssh_KEX_DH_GEX_GROUP(self, packet):
"""
This handles different messages which share an integer value.
If the key exchange does not have a fixed prime/generator group,
we generate a Diffie-Hellman public key and send it in a
MSG_KEX_DH_GEX_INIT message.
Payload::
string g (group generator)
string p (group prime)
@type packet: L{bytes}
@param packet: The message data.
"""
if _kex.isFixedGroup(self.kexAlg):
return self._ssh_KEXDH_REPLY(packet)
elif _kex.isEllipticCurve(self.kexAlg):
return self._ssh_KEX_ECDH_REPLY(packet)
else:
self.p, rest = getMP(packet)
self.g, rest = getMP(rest)
self.x = _generateX(randbytes.secureRandom, 320)
self.e = _MPpow(self.g, self.x, self.p)
self.sendPacket(MSG_KEX_DH_GEX_INIT, self.e)
def _continueKEXDH_REPLY(self, ignored, pubKey, f, signature):
"""
The host key has been verified, so we generate the keys.
@param ignored: Ignored.
@param pubKey: the public key blob for the server's public key.
@type pubKey: L{str}
@param f: the server's Diffie-Hellman public key.
@type f: L{long}
@param signature: the server's signature, verifying that it has the
correct private key.
@type signature: L{str}
"""
serverKey = keys.Key.fromString(pubKey)
sharedSecret = _MPpow(f, self.x, self.p)
h = sha1()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.otherKexInitPayload))
h.update(NS(pubKey))
h.update(self.e)
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
if not serverKey.verify(signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
b'bad signature')
return
self._keySetup(sharedSecret, exchangeHash)
def ssh_KEX_DH_GEX_REPLY(self, packet):
"""
Called when we receive a MSG_KEX_DH_GEX_REPLY message. Payload::
string server host key
integer f (server DH public key)
We verify the host key by calling verifyHostKey, then continue in
_continueGEX_REPLY.
@type packet: L{bytes}
@param packet: The message data.
@return: A deferred firing once key exchange is complete.
"""
pubKey, packet = getNS(packet)
f, packet = getMP(packet)
signature, packet = getNS(packet)
fingerprint = b':'.join(
[binascii.hexlify(c) for c in iterbytes(md5(pubKey).digest())])
d = self.verifyHostKey(pubKey, fingerprint)
d.addCallback(self._continueGEX_REPLY, pubKey, f, signature)
d.addErrback(
lambda unused: self.sendDisconnect(
DISCONNECT_HOST_KEY_NOT_VERIFIABLE, b'bad host key'))
return d
def _continueGEX_REPLY(self, ignored, pubKey, f, signature):
"""
The host key has been verified, so we generate the keys.
@param ignored: Ignored.
@param pubKey: the public key blob for the server's public key.
@type pubKey: L{str}
@param f: the server's Diffie-Hellman public key.
@type f: L{long}
@param signature: the server's signature, verifying that it has the
correct private key.
@type signature: L{str}
"""
serverKey = keys.Key.fromString(pubKey)
sharedSecret = _MPpow(f, self.x, self.p)
h = _kex.getHashProcessor(self.kexAlg)()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.otherKexInitPayload))
h.update(NS(pubKey))
h.update(struct.pack(
'!LLL',
self._dhMinimalGroupSize,
self._dhPreferredGroupSize,
self._dhMaximalGroupSize,
))
h.update(MP(self.p))
h.update(MP(self.g))
h.update(self.e)
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
if not serverKey.verify(signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED,
b'bad signature')
return
self._keySetup(sharedSecret, exchangeHash)
def _keySetup(self, sharedSecret, exchangeHash):
"""
See SSHTransportBase._keySetup().
"""
SSHTransportBase._keySetup(self, sharedSecret, exchangeHash)
if self._gotNewKeys:
self.ssh_NEWKEYS(b'')
def ssh_NEWKEYS(self, packet):
"""
Called when we receive a MSG_NEWKEYS message. No payload.
If we've finished setting up our own keys, start using them.
Otherwise, remember that we've received this message.
@type packet: L{bytes}
@param packet: The message data.
"""
if packet != b'':
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR,
b"NEWKEYS takes no data")
return
if not self.nextEncryptions.encBlockSize:
self._gotNewKeys = 1
return
self._newKeys()
self.connectionSecure()
def ssh_SERVICE_ACCEPT(self, packet):
"""
Called when we receive a MSG_SERVICE_ACCEPT message. Payload::
string service name
Start the service we requested.
@type packet: L{bytes}
@param packet: The message data.
"""
if packet == b'':
log.msg('got SERVICE_ACCEPT without payload')
else:
name = getNS(packet)[0]
if name != self.instance.name:
self.sendDisconnect(
DISCONNECT_PROTOCOL_ERROR,
b"received accept for service we did not request")
self.setService(self.instance)
def requestService(self, instance):
"""
Request that a service be run over this transport.
@type instance: subclass of L{twisted.conch.ssh.service.SSHService}
@param instance: The service to run.
"""
self.sendPacket(MSG_SERVICE_REQUEST, NS(instance.name))
self.instance = instance
# Client methods
def verifyHostKey(self, hostKey, fingerprint):
"""
Returns a Deferred that gets a callback if it is a valid key, or
an errback if not.
@type hostKey: L{bytes}
@param hostKey: The host key to verify.
@type fingerprint: L{bytes}
@param fingerprint: The fingerprint of the key.
@return: A deferred firing with C{True} if the key is valid.
"""
return defer.fail(NotImplementedError())
def connectionSecure(self):
"""
Called when the encryption has been set up. Generally,
requestService() is called to run another service over the transport.
"""
raise NotImplementedError()
class _NullEncryptionContext(object):
"""
An encryption context that does not actually encrypt anything.
"""
def update(self, data):
"""
'Encrypt' new data by doing nothing.
@type data: L{bytes}
@param data: The data to 'encrypt'.
@rtype: L{bytes}
@return: The 'encrypted' data.
"""
return data
class _DummyAlgorithm(object):
"""
An encryption algorithm that does not actually encrypt anything.
"""
block_size = 64
class _DummyCipher(object):
"""
A cipher for the none encryption method.
@ivar block_size: the block size of the encryption. In the case of the
none cipher, this is 8 bytes.
"""
algorithm = _DummyAlgorithm()
def encryptor(self):
"""
Construct a noop encryptor.
@return: The encryptor.
"""
return _NullEncryptionContext()
def decryptor(self):
"""
Construct a noop decryptor.
@return: The decryptor.
"""
return _NullEncryptionContext()
DH_GENERATOR, DH_PRIME = _kex.getDHGeneratorAndPrime(
b'diffie-hellman-group1-sha1')
MSG_DISCONNECT = 1
MSG_IGNORE = 2
MSG_UNIMPLEMENTED = 3
MSG_DEBUG = 4
MSG_SERVICE_REQUEST = 5
MSG_SERVICE_ACCEPT = 6
MSG_KEXINIT = 20
MSG_NEWKEYS = 21
MSG_KEXDH_INIT = 30
MSG_KEXDH_REPLY = 31
MSG_KEX_DH_GEX_REQUEST_OLD = 30
MSG_KEX_DH_GEX_REQUEST = 34
MSG_KEX_DH_GEX_GROUP = 31
MSG_KEX_DH_GEX_INIT = 32
MSG_KEX_DH_GEX_REPLY = 33
DISCONNECT_HOST_NOT_ALLOWED_TO_CONNECT = 1
DISCONNECT_PROTOCOL_ERROR = 2
DISCONNECT_KEY_EXCHANGE_FAILED = 3
DISCONNECT_RESERVED = 4
DISCONNECT_MAC_ERROR = 5
DISCONNECT_COMPRESSION_ERROR = 6
DISCONNECT_SERVICE_NOT_AVAILABLE = 7
DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED = 8
DISCONNECT_HOST_KEY_NOT_VERIFIABLE = 9
DISCONNECT_CONNECTION_LOST = 10
DISCONNECT_BY_APPLICATION = 11
DISCONNECT_TOO_MANY_CONNECTIONS = 12
DISCONNECT_AUTH_CANCELLED_BY_USER = 13
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE = 14
DISCONNECT_ILLEGAL_USER_NAME = 15
messages = {}
for name, value in list(globals().items()):
# Avoid legacy messages which overlap with never ones
if name.startswith('MSG_') and not name.startswith('MSG_KEXDH_'):
messages[value] = name
# Check for regressions (#5352)
if 'MSG_KEXDH_INIT' in messages or 'MSG_KEXDH_REPLY' in messages:
raise RuntimeError(
"legacy SSH mnemonics should not end up in messages dict")
| gpl-2.0 |
cchurch/ansible | lib/ansible/template/__init__.py | 2 | 35014 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import datetime
import os
import pkgutil
import pwd
import re
import time
from numbers import Number
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.loaders import FileSystemLoader
from jinja2.runtime import Context, StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.six import iteritems, string_types, text_type
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.common._collections_compat import Sequence, Mapping, MutableMapping
from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import wrap_var
# HACK: keep Python 2.6 controller tests happy in CI until they're properly split
try:
from importlib import import_module
except ImportError:
import_module = __import__
display = Display()
__all__ = ['Templar', 'generate_ansible_template_vars']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = (bool, Number)
JINJA2_OVERRIDE = '#jinja2:'
USE_JINJA2_NATIVE = False
if C.DEFAULT_JINJA2_NATIVE:
try:
from jinja2.nativetypes import NativeEnvironment as Environment
from ansible.template.native_helpers import ansible_native_concat as j2_concat
USE_JINJA2_NATIVE = True
except ImportError:
from jinja2 import Environment
from jinja2.utils import concat as j2_concat
from jinja2 import __version__ as j2_version
display.warning(
'jinja2_native requires Jinja 2.10 and above. '
'Version detected: %s. Falling back to default.' % j2_version
)
else:
from jinja2 import Environment
from jinja2.utils import concat as j2_concat
JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_end'))
def generate_ansible_template_vars(path, dest_path=None):
b_path = to_bytes(path)
try:
template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
except (KeyError, TypeError):
template_uid = os.stat(b_path).st_uid
temp_vars = {
'template_host': to_text(os.uname()[1]),
'template_path': path,
'template_mtime': datetime.datetime.fromtimestamp(os.path.getmtime(b_path)),
'template_uid': to_text(template_uid),
'template_fullpath': os.path.abspath(path),
'template_run_date': datetime.datetime.now(),
'template_destpath': to_native(dest_path) if dest_path else None,
}
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host=temp_vars['template_host'],
uid=temp_vars['template_uid'],
file=temp_vars['template_path'],
)
temp_vars['ansible_managed'] = to_text(time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path))))
return temp_vars
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\', '\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def is_template(data, jinja_env):
"""This function attempts to quickly detect whether a value is a jinja2
template. To do so, we look for the first 2 matching jinja2 tokens for
start and end delimiters.
"""
found = None
start = True
comment = False
d2 = jinja_env.preprocess(data)
# This wraps a lot of code, but this is due to lex returing a generator
# so we may get an exception at any part of the loop
try:
for token in jinja_env.lex(d2):
if token[1] in JINJA2_BEGIN_TOKENS:
if start and token[1] == 'comment_begin':
# Comments can wrap other token types
comment = True
start = False
# Example: variable_end -> variable
found = token[1].split('_')[0]
elif token[1] in JINJA2_END_TOKENS:
if token[1].split('_')[0] == found:
return True
elif comment:
continue
return False
except TemplateSyntaxError:
return False
return False
def _count_newlines_from_end(in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
try:
i = len(in_str)
j = i - 1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
def recursive_check_defined(item):
from jinja2.runtime import Undefined
if isinstance(item, MutableMapping):
for key in item:
recursive_check_defined(item[key])
elif isinstance(item, list):
for i in item:
recursive_check_defined(i)
else:
if isinstance(item, Undefined):
raise AnsibleFilterError("{0} is undefined".format(item))
class AnsibleUndefined(StrictUndefined):
'''
A custom Undefined class, which returns further Undefined objects on access,
rather than throwing an exception.
'''
def __getattr__(self, name):
# Return original Undefined object to preserve the first failure context
return self
def __getitem__(self, key):
# Return original Undefined object to preserve the first failure context
return self
def __repr__(self):
return 'AnsibleUndefined'
class AnsibleContext(Context):
'''
A custom context, which intercepts resolve() calls and sets a flag
internally if any variable lookup returns an AnsibleUnsafe value. This
flag is checked post-templating, and (when set) will result in the
final templated result being wrapped in AnsibleUnsafe.
'''
def __init__(self, *args, **kwargs):
super(AnsibleContext, self).__init__(*args, **kwargs)
self.unsafe = False
def _is_unsafe(self, val):
'''
Our helper function, which will also recursively check dict and
list entries due to the fact that they may be repr'd and contain
a key or value which contains jinja2 syntax and would otherwise
lose the AnsibleUnsafe value.
'''
if isinstance(val, dict):
for key in val.keys():
if self._is_unsafe(val[key]):
return True
elif isinstance(val, list):
for item in val:
if self._is_unsafe(item):
return True
elif isinstance(val, string_types) and hasattr(val, '__UNSAFE__'):
return True
return False
def _update_unsafe(self, val):
if val is not None and not self.unsafe and self._is_unsafe(val):
self.unsafe = True
def resolve(self, key):
'''
The intercepted resolve(), which uses the helper above to set the
internal flag whenever an unsafe variable value is returned.
'''
val = super(AnsibleContext, self).resolve(key)
self._update_unsafe(val)
return val
def resolve_or_missing(self, key):
val = super(AnsibleContext, self).resolve_or_missing(key)
self._update_unsafe(val)
return val
class JinjaPluginIntercept(MutableMapping):
def __init__(self, delegatee, pluginloader, *args, **kwargs):
super(JinjaPluginIntercept, self).__init__(*args, **kwargs)
self._delegatee = delegatee
self._pluginloader = pluginloader
if self._pluginloader.class_name == 'FilterModule':
self._method_map_name = 'filters'
self._dirname = 'filter'
elif self._pluginloader.class_name == 'TestModule':
self._method_map_name = 'tests'
self._dirname = 'test'
self._collection_jinja_func_cache = {}
# FUTURE: we can cache FQ filter/test calls for the entire duration of a run, since a given collection's impl's
# aren't supposed to change during a run
def __getitem__(self, key):
if not isinstance(key, string_types):
raise ValueError('key must be a string')
key = to_native(key)
if '.' not in key: # might be a built-in value, delegate to base dict
return self._delegatee.__getitem__(key)
func = self._collection_jinja_func_cache.get(key)
if func:
return func
components = key.split('.')
if len(components) != 3:
raise KeyError('invalid plugin name: {0}'.format(key))
collection_name = '.'.join(components[0:2])
collection_pkg = 'ansible_collections.{0}.plugins.{1}'.format(collection_name, self._dirname)
# FIXME: error handling for bogus plugin name, bogus impl, bogus filter/test
# FIXME: move this capability into the Jinja plugin loader
pkg = import_module(collection_pkg)
for dummy, module_name, ispkg in pkgutil.iter_modules(pkg.__path__, prefix=collection_name + '.'):
if ispkg:
continue
plugin_impl = self._pluginloader.get(module_name)
method_map = getattr(plugin_impl, self._method_map_name)
for f in iteritems(method_map()):
fq_name = '.'.join((collection_name, f[0]))
self._collection_jinja_func_cache[fq_name] = f[1]
function_impl = self._collection_jinja_func_cache[key]
# FIXME: detect/warn on intra-collection function name collisions
return function_impl
def __setitem__(self, key, value):
return self._delegatee.__setitem__(key, value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
# not strictly accurate since we're not counting dynamically-loaded values
return iter(self._delegatee)
def __len__(self):
# not strictly accurate since we're not counting dynamically-loaded values
return len(self._delegatee)
class AnsibleEnvironment(Environment):
'''
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
'''
context_class = AnsibleContext
template_class = AnsibleJ2Template
def __init__(self, *args, **kwargs):
super(AnsibleEnvironment, self).__init__(*args, **kwargs)
self.filters = JinjaPluginIntercept(self.filters, filter_loader)
self.tests = JinjaPluginIntercept(self.tests, test_loader)
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=None):
variables = {} if variables is None else variables
self._loader = loader
self._filters = None
self._tests = None
self._available_variables = variables
self._cached_result = {}
if loader:
self._basedir = loader.get_basedir()
else:
self._basedir = './'
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._test_loader = getattr(shared_loader_obj, 'test_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
else:
self._filter_loader = filter_loader
self._test_loader = test_loader
self._lookup_loader = lookup_loader
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
self.environment = AnsibleEnvironment(
trim_blocks=True,
undefined=AnsibleUndefined,
extensions=self._get_extensions(),
finalize=self._finalize,
loader=FileSystemLoader(self._basedir),
)
# the current rendering context under which the templar class is working
self.cur_context = None
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (
self.environment.variable_start_string,
self.environment.block_start_string,
self.environment.block_end_string,
self.environment.variable_end_string
))
self._no_type_regex = re.compile(r'.*?\|\s*(?:%s)(?:\([^\|]*\))?\s*\)?\s*(?:%s)' %
('|'.join(C.STRING_TYPE_FILTERS), self.environment.variable_end_string))
def _get_filters(self):
'''
Returns filter plugins, after loading and caching them if need be
'''
if self._filters is not None:
return self._filters.copy()
self._filters = dict()
for fp in self._filter_loader.all():
self._filters.update(fp.filters())
return self._filters.copy()
def _get_tests(self):
'''
Returns tests plugins, after loading and caching them if need be
'''
if self._tests is not None:
return self._tests.copy()
self._tests = dict()
for fp in self._test_loader.all():
self._tests.update(fp.tests())
return self._tests.copy()
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
@property
def available_variables(self):
return self._available_variables
@available_variables.setter
def available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods. We also clear the template cache here, as the variables
are being changed.
'''
if not isinstance(variables, dict):
raise AnsibleAssertionError("the type of 'variables' should be a dict but was a %s" % (type(variables)))
self._available_variables = variables
self._cached_result = {}
def set_available_variables(self, variables):
display.deprecated(
'set_available_variables is being deprecated. Use "@available_variables.setter" instead.',
version='2.13'
)
self.available_variables = variables
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
convert_data=True, static_vars=None, cache=True, disable_lookups=False):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
static_vars = [''] if static_vars is None else static_vars
# Don't template unsafe variables, just return them.
if hasattr(variable, '__UNSAFE__'):
return variable
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
result = variable
if self.is_possibly_template(variable):
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
# Using a cache in order to prevent template calls with already templated variables
sha1_hash = None
if cache:
variable_hash = sha1(text_type(variable).encode('utf-8'))
options_hash = sha1(
(
text_type(preserve_trailing_newlines) +
text_type(escape_backslashes) +
text_type(fail_on_undefined) +
text_type(overrides)
).encode('utf-8')
)
sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
if cache and sha1_hash in self._cached_result:
result = self._cached_result[sha1_hash]
else:
result = self.do_template(
variable,
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
if not USE_JINJA2_NATIVE:
unsafe = hasattr(result, '__UNSAFE__')
if convert_data and not self._no_type_regex.match(variable):
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
eval_results = safe_eval(result, include_exceptions=True)
if eval_results[1] is None:
result = eval_results[0]
if unsafe:
result = wrap_var(result)
else:
# FIXME: if the safe_eval raised an error, should we do something with it?
pass
# we only cache in the case where we have a single variable
# name, to make sure we're not putting things which may otherwise
# be dynamic in the cache (filters, lookups, etc.)
if cache:
self._cached_result[sha1_hash] = result
return result
elif isinstance(variable, (list, tuple)):
return [self.template(
v,
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
) for v in variable]
elif isinstance(variable, (dict, Mapping)):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
if k not in static_vars:
d[k] = self.template(
variable[k],
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
else:
d[k] = variable[k]
return d
else:
return variable
except AnsibleFilterError:
if self._fail_on_filter_errors:
raise
else:
return variable
def is_template(self, data):
'''lets us know if data has a template'''
if isinstance(data, string_types):
return is_template(data, self.environment)
elif isinstance(data, (list, tuple)):
for v in data:
if self.is_template(v):
return True
elif isinstance(data, dict):
for k in data:
if self.is_template(k) or self.is_template(data[k]):
return True
return False
templatable = is_template
def is_possibly_template(self, data):
'''Determines if a string looks like a template, by seeing if it
contains a jinja2 start delimiter. Does not guarantee that the string
is actually a template.
This is different than ``is_template`` which is more strict.
This method may return ``True`` on a string that is not templatable.
Useful when guarding passing a string for templating, but when
you want to allow the templating engine to make the final
assessment which may result in ``TemplateSyntaxError``.
'''
env = self.environment
if isinstance(data, string_types):
for marker in (env.block_start_string, env.variable_start_string, env.comment_start_string):
if marker in data:
return True
return False
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _finalize(self, thing):
'''
A custom finalize method for jinja2, which prevents None from being returned. This
avoids a string of ``"None"`` as ``None`` has no importance in YAML.
If using ANSIBLE_JINJA2_NATIVE we bypass this and return the actual value always
'''
if USE_JINJA2_NATIVE:
return thing
return thing if thing is not None else ''
def _fail_lookup(self, name, *args, **kwargs):
raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
def _now_datetime(self, utc=False, fmt=None):
'''jinja2 global function to return current datetime, potentially formatted via strftime'''
if utc:
now = datetime.datetime.utcnow()
else:
now = datetime.datetime.now()
if fmt:
return now.strftime(fmt)
return now
def _query_lookup(self, name, *args, **kwargs):
''' wrapper for lookup, force wantlist true'''
kwargs['wantlist'] = True
return self._lookup(name, *args, **kwargs)
def _lookup(self, name, *args, **kwargs):
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
wantlist = kwargs.pop('wantlist', False)
allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
errors = kwargs.pop('errors', 'strict')
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
(name, type(e), to_text(e))
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
raise AnsibleError(to_native(msg))
ran = [] if wantlist else None
if ran and not allow_unsafe:
if wantlist:
ran = wrap_var(ran)
else:
try:
ran = wrap_var(",".join(ran))
except TypeError:
# Lookup Plugins should always return lists. Throw an error if that's not
# the case:
if not isinstance(ran, Sequence):
raise AnsibleError("The lookup plugin '%s' did not return a list."
% name)
# The TypeError we can recover from is when the value *inside* of the list
# is not a string
if len(ran) == 1:
ran = wrap_var(ran[0])
else:
ran = wrap_var(ran)
if self.cur_context:
self.cur_context.unsafe = True
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
if USE_JINJA2_NATIVE and not isinstance(data, string_types):
return data
# For preserving the number of input newlines in the output (used
# later in this method)
data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# allows template header overrides to change jinja2 options.
if overrides is None:
myenv = self.environment.overlay()
else:
myenv = self.environment.overlay(overrides)
# Get jinja env overrides from template
if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol + 1:]
for pair in line.split(','):
(key, val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
# Adds Ansible custom filters and tests
myenv.filters.update(self._get_filters())
myenv.tests.update(self._get_tests())
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
except Exception as e:
if 'recursion' in to_native(e):
raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
else:
return data
# jinja2 global is inconsistent across versions, this normalizes them
t.globals['dict'] = dict
if disable_lookups:
t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
else:
t.globals['lookup'] = self._lookup
t.globals['query'] = t.globals['q'] = self._query_lookup
t.globals['now'] = self._now_datetime
t.globals['finalize'] = self._finalize
jvars = AnsibleJ2Vars(self, t.globals)
self.cur_context = new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
res = j2_concat(rf)
if getattr(new_context, 'unsafe', False):
res = wrap_var(res)
except TypeError as te:
if 'AnsibleUndefined' in to_native(te):
errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
raise AnsibleUndefinedVariable(errmsg)
else:
display.debug("failing because of a type error, template data is: %s" % to_text(data))
raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))
if USE_JINJA2_NATIVE and not isinstance(res, string_types):
return res
if preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# jinja2 added a keep_trailing_newline option in 2.7 when
# creating an Environment. That would let us make this code
# better (remove a single newline if
# preserve_trailing_newlines is False). Once we can depend on
# that version being present, modify our code to set that when
# initializing self.environment and remove a single trailing
# newline here if preserve_newlines is False.
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += self.environment.newline_sequence * (data_newlines - res_newlines)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
display.debug("Ignoring undefined failure: %s" % to_text(e))
return data
# for backwards compatibility in case anyone is using old private method directly
_do_template = do_template
| gpl-3.0 |
SamR1/FitTrackee | fittrackee/tests/workouts/test_stats_api.py | 1 | 28228 | import json
from flask import Flask
from fittrackee.users.models import User
from fittrackee.workouts.models import Sport, Workout
from ..api_test_case import ApiTestCaseMixin
class TestGetStatsByTime(ApiTestCaseMixin):
def test_it_gets_no_stats_when_user_has_no_workouts(
self, app: Flask, user_1: User
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {}
def test_it_returns_error_when_user_does_not_exists(
self, app: Flask, user_1: User
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
'/api/stats/1000/by_time',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 404
assert 'not found' in data['status']
assert 'User does not exist.' in data['message']
def test_it_returns_error_if_date_format_is_invalid(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
(
f'/api/stats/{user_1.username}/by_time'
f'?from="2018-04-01&to=2018-04-30'
),
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 500
assert 'error' in data['status']
assert (
'Error. Please try again or contact the administrator.'
in data['message']
)
def test_it_returns_error_if_period_is_invalid(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?from=2018-04-01&to=2018-04-30&time=day', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'fail' in data['status']
assert 'Invalid time period.' in data['message']
def test_it_gets_stats_by_time_all_workouts(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2017': {
'1': {
'nb_workouts': 2,
'total_distance': 15.0,
'total_duration': 4480,
}
},
'2018': {
'1': {
'nb_workouts': 5,
'total_distance': 39.0,
'total_duration': 11624,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
},
}
def test_it_gets_stats_for_april_2018(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?from=2018-04-01&to=2018-04-30', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
def test_it_gets_stats_for_april_2018_with_paris_timezone(
self,
app: Flask,
user_1_paris: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1_paris.username}/by_time?'
f'from=2018-04-01&to=2018-04-30',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
def test_it_gets_stats_by_year(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?time=year',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2017': {
'1': {
'nb_workouts': 2,
'total_distance': 15.0,
'total_duration': 4480,
}
},
'2018': {
'1': {
'nb_workouts': 5,
'total_distance': 39.0,
'total_duration': 11624,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
},
}
def test_it_gets_stats_by_year_for_april_2018(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?from=2018-04-01&to=2018-04-30&time=year', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
def test_it_gets_stats_by_year_for_april_2018_with_paris_timezone(
self,
app: Flask,
user_1_paris: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1_paris.username}/by_time?from=2018-04-01&to=2018-04-30&time=year', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
def test_it_gets_stats_by_month(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?time=month',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2017-03': {
'1': {
'nb_workouts': 1,
'total_distance': 5.0,
'total_duration': 1024,
}
},
'2017-06': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3456,
}
},
'2018-01': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 1024,
}
},
'2018-02': {
'1': {
'nb_workouts': 2,
'total_distance': 11.0,
'total_duration': 1600,
}
},
'2018-04': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
},
'2018-05': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3000,
}
},
}
def test_it_gets_stats_by_month_with_new_york_timezone(
self,
app: Flask,
user_1_full: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1_full.username}/by_time?time=month',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2017-03': {
'1': {
'nb_workouts': 1,
'total_distance': 5.0,
'total_duration': 1024,
}
},
'2017-06': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3456,
}
},
'2018-01': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 1024,
}
},
'2018-02': {
'1': {
'nb_workouts': 2,
'total_distance': 11.0,
'total_duration': 1600,
}
},
'2018-04': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
},
'2018-05': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3000,
}
},
}
def test_it_gets_stats_by_month_for_april_2018(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?from=2018-04-01&to=2018-04-30&time=month', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018-04': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
def test_it_gets_stats_by_week(
self,
app: Flask,
user_1_full: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1_full.username}/by_time?time=week',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2017-03-19': {
'1': {
'nb_workouts': 1,
'total_distance': 5.0,
'total_duration': 1024,
}
},
'2017-05-28': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3456,
}
},
'2017-12-31': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 1024,
}
},
'2018-02-18': {
'1': {
'nb_workouts': 2,
'total_distance': 11.0,
'total_duration': 1600,
}
},
'2018-04-01': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
},
'2018-05-06': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3000,
}
},
}
def test_it_gets_stats_by_week_for_week_13(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?from=2018-04-01&to=2018-04-30&time=week', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018-04-01': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
def test_if_get_stats_by_week_starting_with_monday(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?time=weekm',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2017-03-20': {
'1': {
'nb_workouts': 1,
'total_distance': 5.0,
'total_duration': 1024,
}
},
'2017-05-29': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3456,
}
},
'2018-01-01': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 1024,
}
},
'2018-02-19': {
'1': {
'nb_workouts': 2,
'total_distance': 11.0,
'total_duration': 1600,
}
},
'2018-03-26': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
},
'2018-05-07': {
'1': {
'nb_workouts': 1,
'total_distance': 10.0,
'total_duration': 3000,
}
},
}
def test_it_gets_stats_by_week_starting_with_monday_for_week_13(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_time?from=2018-04-01&to=2018-04-30&time=weekm', # noqa
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'2018-03-26': {
'1': {
'nb_workouts': 1,
'total_distance': 8.0,
'total_duration': 6000,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
}
class TestGetStatsBySport(ApiTestCaseMixin):
def test_it_gets_stats_by_sport(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_sport',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'1': {
'nb_workouts': 7,
'total_distance': 54.0,
'total_duration': 16104,
},
'2': {
'nb_workouts': 1,
'total_distance': 12.0,
'total_duration': 6000,
},
}
def test_it_get_stats_for_sport_1(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_sport?sport_id=1',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['statistics'] == {
'1': {
'nb_workouts': 7,
'total_distance': 54.0,
'total_duration': 16104,
}
}
def test_it_returns_errors_if_user_does_not_exist(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
'/api/stats/1000/by_sport?sport_id=1',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 404
assert 'not found' in data['status']
assert 'User does not exist.' in data['message']
def test_it_returns_error_if_sport_does_not_exist(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_sport?sport_id=999',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 404
assert 'not found' in data['status']
assert 'Sport does not exist.' in data['message']
def test_it_returns_error_if_sport_id_is_invalid(
self,
app: Flask,
user_1: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
seven_workouts_user_1: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
f'/api/stats/{user_1.username}/by_sport?sport_id="999',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 500
assert 'error' in data['status']
assert (
'Error. Please try again or contact the administrator.'
in data['message']
)
class TestGetAllStats(ApiTestCaseMixin):
def test_it_returns_all_stats_when_users_have_no_workouts(
self, app: Flask, user_1_admin: User, user_2: User
) -> None:
client, auth_token = self.get_test_client_and_auth_token(
app, as_admin=True
)
response = client.get(
'/api/stats/all',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['workouts'] == 0
assert data['data']['sports'] == 0
assert data['data']['users'] == 2
assert 'uploads_dir_size' in data['data']
def test_it_gets_app_all_stats_with_workouts(
self,
app: Flask,
user_1_admin: User,
user_2: User,
user_3: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
workout_cycling_user_1: Workout,
workout_cycling_user_2: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(
app, as_admin=True
)
response = client.get(
'/api/stats/all',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'success' in data['status']
assert data['data']['workouts'] == 3
assert data['data']['sports'] == 2
assert data['data']['users'] == 3
assert 'uploads_dir_size' in data['data']
def test_it_returns_error_if_user_has_no_admin_rights(
self,
app: Flask,
user_1: User,
user_2: User,
user_3: User,
sport_1_cycling: Sport,
sport_2_running: Sport,
workout_cycling_user_1: Workout,
workout_cycling_user_2: Workout,
workout_running_user_1: Workout,
) -> None:
client, auth_token = self.get_test_client_and_auth_token(app)
response = client.get(
'/api/stats/all',
headers=dict(Authorization=f'Bearer {auth_token}'),
)
data = json.loads(response.data.decode())
assert response.status_code == 403
assert 'success' not in data['status']
assert 'error' in data['status']
assert 'You do not have permissions.' in data['message']
| gpl-3.0 |
ds-hwang/chromium-crosswalk | tools/perf/page_sets/intl_ko_th_vi.py | 10 | 1671 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class IntlKoThViPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlKoThViPage, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/intl_ko_th_vi.json'
class IntlKoThViPageSet(story.StorySet):
""" Popular pages in Korean, Thai and Vietnamese. """
def __init__(self):
super(IntlKoThViPageSet, self).__init__(
archive_data_file='data/intl_ko_th_vi.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
urls_list = [
# Why: #7 site in Vietnam
'http://us.24h.com.vn/',
# Why: #6 site in Vietnam
'http://vnexpress.net/',
# Why: #18 site in Vietnam
'http://vietnamnet.vn/',
# Why: #5 site in Vietnam
# pylint: disable=line-too-long
'http://news.zing.vn/the-gioi/ba-dam-thep-margaret-thatcher-qua-doi/a312895.html#home_noibat1',
'http://kenh14.vn/home.chn',
# Why: #5 site in Korea
'http://www.naver.com/',
# Why: #9 site in Korea
'http://www.daum.net/',
# Why: #25 site in Korea
'http://www.donga.com/',
'http://www.chosun.com/',
'http://www.danawa.com/',
# Why: #10 site in Thailand
'http://pantip.com/',
'http://thaimisc.com/'
]
for url in urls_list:
self.AddStory(IntlKoThViPage(url, self))
| bsd-3-clause |
Pulgama/supriya | supriya/realtime/BufferProxy.py | 1 | 8402 | from supriya.system.SupriyaValueObject import SupriyaValueObject
class BufferProxy(SupriyaValueObject):
"""
A buffer proxy.
Acts as a singleton reference to a buffer on the server, tracking the state
of a single buffer id and responding to `/b_info` messages. Multiple Buffer
instances reference a single BufferProxy.
BufferProxy instances are created internally by the server, and should be
treated as an implementation detail.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy
BufferProxy(
buffer_id=0,
channel_count=2,
frame_count=441,
sample_rate=44100,
server=<Server: offline>,
)
"""
### CLASS VARIABLES ###
__documentation_section__ = "Server Internals"
__slots__ = (
"_buffer_id",
"_channel_count",
"_frame_count",
"_sample_rate",
"_server",
)
### INITIALIZER ###
def __init__(
self, buffer_id=None, channel_count=0, frame_count=0, sample_rate=0, server=None
):
import supriya.realtime
buffer_id = int(buffer_id)
assert 0 <= buffer_id
assert isinstance(server, supriya.realtime.Server)
self._buffer_id = int(buffer_id)
self._channel_count = int(channel_count)
self._frame_count = int(frame_count)
self._sample_rate = int(sample_rate)
self._server = server
### SPECIAL METHODS ###
def __float__(self):
"""
Gets float representation of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> float(buffer_proxy)
0.0
Returns float.
"""
return float(self.buffer_id)
def __int__(self):
"""
Gets integer representation of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> int(buffer_proxy)
0
Returns integer.
"""
return int(self.buffer_id)
### PRIVATE METHODS ###
def _handle_response(self, response):
"""
Updates buffer proxy with buffer-info response.
::
>>> server = supriya.Server.default()
>>> a_buffer = supriya.realtime.BufferProxy(
... buffer_id=23,
... channel_count=1,
... frame_count=256,
... sample_rate=44100,
... server=server,
... )
>>> a_buffer
BufferProxy(
buffer_id=23,
channel_count=1,
frame_count=256,
sample_rate=44100,
server=<Server: offline>,
)
::
>>> response_item = supriya.commands.BufferInfoResponse.Item(
... buffer_id=23,
... channel_count=2,
... frame_count=512,
... sample_rate=44100,
... )
::
>>> a_buffer._handle_response(response_item)
>>> a_buffer
BufferProxy(
buffer_id=23,
channel_count=2,
frame_count=512,
sample_rate=44100,
server=<Server: offline>,
)
Returns none.
"""
import supriya.commands
if isinstance(response, supriya.commands.BufferInfoResponse.Item):
assert response.buffer_id == self.buffer_id
self._channel_count = response.channel_count
self._frame_count = response.frame_count
self._sample_rate = response.sample_rate
### PUBLIC PROPERTIES ###
@property
def buffer_id(self):
"""
Gets buffer id of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.buffer_id
0
Returns integer.
"""
return self._buffer_id
@property
def channel_count(self):
"""
Gets channel count of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.channel_count
2
Returns integer.
"""
return self._channel_count
@property
def duration_in_seconds(self):
"""
Gets duration in seconds of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.duration_in_seconds
0.01
Returns float.
"""
return float(self._frame_count) / float(self.sample_rate)
@property
def frame_count(self):
"""
Gets frame count of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.frame_count
441
Returns integer.
"""
return self._frame_count
@property
def sample_count(self):
"""
Gets sample count of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.sample_count
882
Returns integer.
"""
return self._channel_count * self._frame_count
@property
def sample_rate(self):
"""
Gets sample-rate of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.sample_rate
44100
Returns integer.
"""
return self._sample_rate
@property
def server(self):
"""
Gets server of buffer proxy.
::
>>> server = supriya.Server.default()
>>> buffer_proxy = supriya.realtime.BufferProxy(
... buffer_id=0,
... server=server,
... channel_count=2,
... frame_count=441,
... sample_rate=44100,
... )
>>> buffer_proxy.server
<Server: offline>
Returns server.
"""
return self._server
| mit |
kidmaple/CoolWall | user/python/Tools/scripts/which.py | 3 | 1130 | #! /usr/bin/env python
# Variant of "which".
# On stderr, near and total misses are reported.
# '-l<flags>' argument adds ls -l<flags> of each file found.
import sys
if sys.path[0] in (".", ""): del sys.path[0]
import sys, os, string
from stat import *
def msg(str):
sys.stderr.write(str + '\n')
pathlist = string.splitfields(os.environ['PATH'], ':')
sts = 0
longlist = ''
if sys.argv[1:] and sys.argv[1][:2] == '-l':
longlist = sys.argv[1]
del sys.argv[1]
for prog in sys.argv[1:]:
ident = ()
for dir in pathlist:
file = os.path.join(dir, prog)
try:
st = os.stat(file)
except os.error:
continue
if not S_ISREG(st[ST_MODE]):
msg(file + ': not a disk file')
else:
mode = S_IMODE(st[ST_MODE])
if mode & 0111:
if not ident:
print file
ident = st[:3]
else:
if st[:3] == ident:
s = 'same as: '
else:
s = 'also: '
msg(s + file)
else:
msg(file + ': not executable')
if longlist:
sts = os.system('ls ' + longlist + ' ' + file)
if sts: msg('"ls -l" exit status: ' + `sts`)
if not ident:
msg(prog + ': not found')
sts = 1
sys.exit(sts)
| gpl-2.0 |
dreispt/geospatial | geoengine_project/tests/test_geoengine_project.py | 5 | 1905 | # -*- coding: utf-8 -*-
#
#
# Authors: Jonathan Nemry
# Copyright (c) 2015 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import openerp.tests.common as common
class TestGeoengineProjects(common.TransactionCase):
def setUp(self):
common.TransactionCase.setUp(self)
def test_crud_project(self):
vals = {
'name': 'Partner Project',
'street': 'Rue au bois la dame',
'country_id': self.env.ref('base.be').id,
'zip': '6800',
}
partner_id = self.env['res.partner'].create(vals)
vals = {
'name': 'Located Partner',
'partner_id': partner_id.id,
'parent_id': self.env.ref('project.all_projects_account').id,
'description': 'located partner',
}
project_id = self.env['project.project'].create(vals)
project_id.name = 'Other Name'
domain = [('id', '=', project_id.id)]
self.assertTrue(
self.env['project.project'].search(domain),
'Should find this project')
project_id.unlink()
self.assertFalse(
self.env['project.project'].search(domain),
'Should not find anymore this project')
| agpl-3.0 |
dirtbag3/osmapi | tests/node_tests.py | 2 | 10554 | from __future__ import (unicode_literals, absolute_import)
from nose.tools import * # noqa
from . import osmapi_tests
import osmapi
import mock
import datetime
class TestOsmApiNode(osmapi_tests.TestOsmApi):
def test_NodeGet(self):
self._conn_mock()
result = self.api.NodeGet(123)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'GET')
self.assertEquals(args[1], '/api/0.6/node/123')
self.assertEquals(result, {
'id': 123,
'changeset': 15293,
'uid': 605,
'timestamp': datetime.datetime(2012, 4, 18, 11, 14, 26),
'lat': 51.8753146,
'lon': -1.4857118,
'visible': True,
'version': 8,
'user': 'freundchen',
'tag': {
'amenity': 'school',
'foo': 'bar',
'name': 'Berolina & Schule'
},
})
def test_NodeGet_with_version(self):
self._conn_mock()
result = self.api.NodeGet(123, NodeVersion=2)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'GET')
self.assertEquals(args[1], '/api/0.6/node/123/2')
self.assertEquals(result, {
'id': 123,
'changeset': 4152,
'uid': 605,
'timestamp': datetime.datetime(2011, 4, 18, 11, 14, 26),
'lat': 51.8753146,
'lon': -1.4857118,
'visible': True,
'version': 2,
'user': 'freundchen',
'tag': {
'amenity': 'school',
},
})
def test_NodeCreate_changesetauto(self):
# setup mock
self.api = osmapi.OsmApi(
api="api06.dev.openstreetmap.org",
changesetauto=True
)
self._conn_mock(auth=True, filenames=[
'test_NodeCreate_changesetauto.xml',
'test_ChangesetUpload_create_node.xml',
'test_ChangesetClose.xml',
])
test_node = {
'lat': 47.123,
'lon': 8.555,
'tag': {
'amenity': 'place_of_worship',
'religion': 'pastafarian'
}
}
self.assertIsNone(self.api.NodeCreate(test_node))
def test_NodeCreate(self):
self._conn_mock(auth=True)
# setup mock
self.api.ChangesetCreate = mock.Mock(
return_value=1111
)
self.api._CurrentChangesetId = 1111
test_node = {
'lat': 47.287,
'lon': 8.765,
'tag': {
'amenity': 'place_of_worship',
'religion': 'pastafarian'
}
}
cs = self.api.ChangesetCreate({
'comment': 'This is a test dataset'
})
self.assertEquals(cs, 1111)
result = self.api.NodeCreate(test_node)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'PUT')
self.assertEquals(args[1], '/api/0.6/node/create')
self.assertEquals(result['id'], 9876)
self.assertEquals(result['lat'], test_node['lat'])
self.assertEquals(result['lon'], test_node['lon'])
self.assertEquals(result['tag'], test_node['tag'])
def test_NodeCreate_wo_changeset(self):
test_node = {
'lat': 47.287,
'lon': 8.765,
'tag': {
'amenity': 'place_of_worship',
'religion': 'pastafarian'
}
}
with self.assertRaisesRegexp(
osmapi.NoChangesetOpenError,
'need to open a changeset'):
self.api.NodeCreate(test_node)
def test_NodeCreate_existing_node(self):
# setup mock
self.api.ChangesetCreate = mock.Mock(
return_value=1111
)
self.api._CurrentChangesetId = 1111
test_node = {
'id': 123,
'lat': 47.287,
'lon': 8.765,
'tag': {
'amenity': 'place_of_worship',
'religion': 'pastafarian'
}
}
with self.assertRaisesRegexp(
osmapi.OsmTypeAlreadyExistsError,
'This node already exists'):
self.api.NodeCreate(test_node)
def test_NodeCreate_wo_auth(self):
self._conn_mock()
# setup mock
self.api.ChangesetCreate = mock.Mock(
return_value=1111
)
self.api._CurrentChangesetId = 1111
test_node = {
'lat': 47.287,
'lon': 8.765,
'tag': {
'amenity': 'place_of_worship',
'religion': 'pastafarian'
}
}
with self.assertRaisesRegexp(
osmapi.UsernamePasswordMissingError,
'Username/Password missing'):
self.api.NodeCreate(test_node)
def test_NodeCreate_with_exception(self):
self._conn_mock(auth=True)
self.api._http_request = mock.Mock(side_effect=Exception)
# setup mock
self.api.ChangesetCreate = mock.Mock(
return_value=1111
)
self.api._CurrentChangesetId = 1111
test_node = {
'lat': 47.287,
'lon': 8.765,
'tag': {
'amenity': 'place_of_worship',
'religion': 'pastafarian'
}
}
with self.assertRaisesRegexp(
osmapi.MaximumRetryLimitReachedError,
'Give up after 5 retries'):
self.api.NodeCreate(test_node)
def test_NodeUpdate(self):
self._conn_mock(auth=True)
# setup mock
self.api.ChangesetCreate = mock.Mock(
return_value=1111
)
self.api._CurrentChangesetId = 1111
test_node = {
'id': 7676,
'lat': 47.287,
'lon': 8.765,
'tag': {
'amenity': 'place_of_worship',
'name': 'christian'
}
}
cs = self.api.ChangesetCreate({
'comment': 'This is a test dataset'
})
self.assertEquals(cs, 1111)
result = self.api.NodeUpdate(test_node)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'PUT')
self.assertEquals(args[1], '/api/0.6/node/7676')
self.assertEquals(result['id'], 7676)
self.assertEquals(result['version'], 3)
self.assertEquals(result['lat'], test_node['lat'])
self.assertEquals(result['lon'], test_node['lon'])
self.assertEquals(result['tag'], test_node['tag'])
def test_NodeDelete(self):
self._conn_mock(auth=True)
# setup mock
self.api.ChangesetCreate = mock.Mock(
return_value=1111
)
self.api._CurrentChangesetId = 1111
test_node = {
'id': 7676
}
cs = self.api.ChangesetCreate({
'comment': 'This is a test dataset'
})
self.assertEquals(cs, 1111)
result = self.api.NodeDelete(test_node)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'DELETE')
self.assertEquals(args[1], '/api/0.6/node/7676')
self.assertEquals(result['id'], 7676)
self.assertEquals(result['version'], 4)
def test_NodeHistory(self):
self._conn_mock()
result = self.api.NodeHistory(123)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'GET')
self.assertEquals(args[1], '/api/0.6/node/123/history')
self.assertEquals(len(result), 8)
self.assertEquals(result[4]['id'], 123)
self.assertEquals(result[4]['version'], 4)
self.assertEquals(result[4]['lat'], 51.8753146)
self.assertEquals(result[4]['lon'], -1.4857118)
self.assertEquals(
result[4]['tag'], {
'empty': '',
'foo': 'bar',
}
)
def test_NodeWays(self):
self._conn_mock()
result = self.api.NodeWays(234)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'GET')
self.assertEquals(args[1], '/api/0.6/node/234/ways')
self.assertEquals(len(result), 1)
self.assertEquals(result[0]['id'], 60)
self.assertEquals(result[0]['changeset'], 61)
self.assertEquals(
result[0]['tag'],
{
'highway': 'path',
'name': 'Dog walking path',
}
)
def test_NodeRelations(self):
self._conn_mock()
result = self.api.NodeRelations(4295668179)
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'GET')
self.assertEquals(args[1], '/api/0.6/node/4295668179/relations')
self.assertEquals(len(result), 1)
self.assertEquals(result[0]['id'], 4294968148)
self.assertEquals(result[0]['changeset'], 23123)
self.assertEquals(
result[0]['member'][1],
{
'role': 'point',
'ref': 4295668179,
'type': 'node',
}
)
self.assertEquals(
result[0]['tag'],
{
'type': 'fancy',
}
)
def test_NodesGet(self):
self._conn_mock()
result = self.api.NodesGet([123, 345])
args, kwargs = self.api._conn.putrequest.call_args
self.assertEquals(args[0], 'GET')
self.assertEquals(args[1], '/api/0.6/nodes?nodes=123,345')
self.assertEquals(len(result), 2)
self.assertEquals(result[123], {
'id': 123,
'changeset': 15293,
'uid': 605,
'timestamp': datetime.datetime(2012, 4, 18, 11, 14, 26),
'lat': 51.8753146,
'lon': -1.4857118,
'visible': True,
'version': 8,
'user': 'freundchen',
'tag': {
'amenity': 'school',
'foo': 'bar',
'name': 'Berolina & Schule'
},
})
self.assertEquals(result[345], {
'id': 345,
'changeset': 244,
'timestamp': datetime.datetime(2009, 9, 12, 3, 22, 59),
'uid': 1,
'visible': False,
'version': 2,
'user': 'guggis',
'tag': {},
})
| gpl-3.0 |
dbertha/odoo | openerp/cli/__init__.py | 135 | 2016 | import logging
import sys
import os
import openerp
from openerp import tools
from openerp.modules import module
_logger = logging.getLogger(__name__)
commands = {}
class CommandType(type):
def __init__(cls, name, bases, attrs):
super(CommandType, cls).__init__(name, bases, attrs)
name = getattr(cls, name, cls.__name__.lower())
cls.name = name
if name != 'command':
commands[name] = cls
class Command(object):
"""Subclass this class to define new openerp subcommands """
__metaclass__ = CommandType
def run(self, args):
pass
class Help(Command):
"""Display the list of available commands"""
def run(self, args):
print "Available commands:\n"
padding = max([len(k) for k in commands.keys()]) + 2
for k, v in commands.items():
print " %s%s" % (k.ljust(padding, ' '), v.__doc__ or '')
print "\nUse '%s <command> --help' for individual command help." % sys.argv[0].split(os.path.sep)[-1]
import server
import deploy
import scaffold
import start
def main():
args = sys.argv[1:]
# The only shared option is '--addons-path=' needed to discover additional
# commands from modules
if len(args) > 1 and args[0].startswith('--addons-path=') and not args[1].startswith("-"):
# parse only the addons-path, do not setup the logger...
tools.config._parse_config([args[0]])
args = args[1:]
# Default legacy command
command = "server"
# Subcommand discovery
if len(args) and not args[0].startswith("-"):
logging.disable(logging.CRITICAL)
for m in module.get_modules():
m = 'openerp.addons.' + m
__import__(m)
#try:
#except Exception, e:
# raise
# print e
logging.disable(logging.NOTSET)
command = args[0]
args = args[1:]
if command in commands:
o = commands[command]()
o.run(args)
# vim:et:ts=4:sw=4:
| agpl-3.0 |
bmcfee/librosa | librosa/util/utils.py | 1 | 64787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions"""
import warnings
import scipy.ndimage
import scipy.sparse
import numpy as np
import numba
from numpy.lib.stride_tricks import as_strided
from .._cache import cache
from .exceptions import ParameterError
# Constrain STFT block sizes to 256 KB
MAX_MEM_BLOCK = 2 ** 8 * 2 ** 10
__all__ = [
"MAX_MEM_BLOCK",
"frame",
"pad_center",
"fix_length",
"valid_audio",
"valid_int",
"valid_intervals",
"fix_frames",
"axis_sort",
"localmax",
"localmin",
"normalize",
"peak_pick",
"sparsify_rows",
"shear",
"stack",
"fill_off_diagonal",
"index_to_slice",
"sync",
"softmask",
"buf_to_float",
"tiny",
"cyclic_gradient",
"dtype_r2c",
"dtype_c2r",
]
def frame(x, frame_length, hop_length, axis=-1):
"""Slice a data array into (overlapping) frames.
This implementation uses low-level stride manipulation to avoid
making a copy of the data. The resulting frame representation
is a new view of the same input data.
However, if the input data is not contiguous in memory, a warning
will be issued and the output will be a full copy, rather than
a view of the input data.
For example, a one-dimensional input ``x = [0, 1, 2, 3, 4, 5, 6]``
can be framed with frame length 3 and hop length 2 in two ways.
The first (``axis=-1``), results in the array ``x_frames``::
[[0, 2, 4],
[1, 3, 5],
[2, 4, 6]]
where each column ``x_frames[:, i]`` contains a contiguous slice of
the input ``x[i * hop_length : i * hop_length + frame_length]``.
The second way (``axis=0``) results in the array ``x_frames``::
[[0, 1, 2],
[2, 3, 4],
[4, 5, 6]]
where each row ``x_frames[i]`` contains a contiguous slice of the input.
This generalizes to higher dimensional inputs, as shown in the examples below.
In general, the framing operation increments by 1 the number of dimensions,
adding a new "frame axis" either to the end of the array (``axis=-1``)
or the beginning of the array (``axis=0``).
Parameters
----------
x : np.ndarray
Array to frame
frame_length : int > 0 [scalar]
Length of the frame
hop_length : int > 0 [scalar]
Number of steps to advance between frames
axis : 0 or -1
The axis along which to frame.
If ``axis=-1`` (the default), then ``x`` is framed along its last dimension.
``x`` must be "F-contiguous" in this case.
If ``axis=0``, then ``x`` is framed along its first dimension.
``x`` must be "C-contiguous" in this case.
Returns
-------
x_frames : np.ndarray [shape=(..., frame_length, N_FRAMES) or (N_FRAMES, frame_length, ...)]
A framed view of ``x``, for example with ``axis=-1`` (framing on the last dimension)::
x_frames[..., j] == x[..., j * hop_length : j * hop_length + frame_length]
If ``axis=0`` (framing on the first dimension), then::
x_frames[j] = x[j * hop_length : j * hop_length + frame_length]
Raises
------
ParameterError
If ``x`` is not an `np.ndarray`.
If ``x.shape[axis] < frame_length``, there is not enough data to fill one frame.
If ``hop_length < 1``, frames cannot advance.
If ``axis`` is not 0 or -1. Framing is only supported along the first or last axis.
See Also
--------
numpy.asfortranarray : Convert data to F-contiguous representation
numpy.ascontiguousarray : Convert data to C-contiguous representation
numpy.ndarray.flags : information about the memory layout of a numpy `ndarray`.
Examples
--------
Extract 2048-sample frames from monophonic signal with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
>>> frames
array([[-1.407e-03, -2.604e-02, ..., -1.795e-05, -8.108e-06],
[-4.461e-04, -3.721e-02, ..., -1.573e-05, -1.652e-05],
...,
[ 7.960e-02, -2.335e-01, ..., -6.815e-06, 1.266e-05],
[ 9.568e-02, -1.252e-01, ..., 7.397e-06, -1.921e-05]],
dtype=float32)
>>> y.shape
(117601,)
>>> frames.shape
(2048, 1806)
Or frame along the first axis instead of the last:
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64, axis=0)
>>> frames.shape
(1806, 2048)
Frame a stereo signal:
>>> y, sr = librosa.load(librosa.ex('trumpet', hq=True), mono=False)
>>> y.shape
(2, 117601)
>>> frames = librosa.util.frame(y, frame_length=2048, hop_length=64)
(2, 2048, 1806)
Carve an STFT into fixed-length patches of 32 frames with 50% overlap
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> S.shape
(1025, 230)
>>> S_patch = librosa.util.frame(S, frame_length=32, hop_length=16)
>>> S_patch.shape
(1025, 32, 13)
>>> # The first patch contains the first 32 frames of S
>>> np.allclose(S_patch[:, :, 0], S[:, :32])
True
>>> # The second patch contains frames 16 to 16+32=48, and so on
>>> np.allclose(S_patch[:, :, 1], S[:, 16:48])
True
"""
if not isinstance(x, np.ndarray):
raise ParameterError(
"Input must be of type numpy.ndarray, " "given type(x)={}".format(type(x))
)
if x.shape[axis] < frame_length:
raise ParameterError(
"Input is too short (n={:d})"
" for frame_length={:d}".format(x.shape[axis], frame_length)
)
if hop_length < 1:
raise ParameterError("Invalid hop_length: {:d}".format(hop_length))
if axis == -1 and not x.flags["F_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.asfortranarray(x)
elif axis == 0 and not x.flags["C_CONTIGUOUS"]:
warnings.warn(
"librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(axis)
)
x = np.ascontiguousarray(x)
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize
if axis == -1:
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * new_stride]
elif axis == 0:
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * new_stride] + list(strides)
else:
raise ParameterError("Frame axis={} must be either 0 or -1".format(axis))
return as_strided(x, shape=shape, strides=strides)
@cache(level=20)
def valid_audio(y, mono=True):
"""Determine whether a variable contains valid audio data.
If ``mono=True``, then ``y`` is only considered valid if it has shape
``(N,)`` (number of samples).
If ``mono=False``, then ``y`` may be either monophonic, or have shape
``(2, N)`` (stereo) or ``(K, N)`` for ``K>=2`` for general multi-channel.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to require monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
In any of these cases:
- ``type(y)`` is not ``np.ndarray``
- ``y.dtype`` is not floating-point
- ``mono == True`` and ``y.ndim`` is not 1
- ``mono == False`` and ``y.ndim`` is not 1 or 2
- ``mono == False`` and ``y.ndim == 2`` but ``y.shape[0] == 1``
- ``np.isfinite(y).all()`` is False
Notes
-----
This function caches at level 20.
Examples
--------
>>> # By default, valid_audio allows only mono signals
>>> filepath = librosa.ex('trumpet', hq=True)
>>> y_mono, sr = librosa.load(filepath, mono=True)
>>> y_stereo, _ = librosa.load(filepath, mono=False)
>>> librosa.util.valid_audio(y_mono), librosa.util.valid_audio(y_stereo)
True, False
>>> # To allow stereo signals, set mono=False
>>> librosa.util.valid_audio(y_stereo, mono=False)
True
See also
--------
numpy.float32
"""
if not isinstance(y, np.ndarray):
raise ParameterError("Audio data must be of type numpy.ndarray")
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError("Audio data must be floating-point")
if mono and y.ndim != 1:
raise ParameterError(
"Invalid shape for monophonic audio: "
"ndim={:d}, shape={}".format(y.ndim, y.shape)
)
elif y.ndim > 2 or y.ndim == 0:
raise ParameterError(
"Audio data must have shape (samples,) or (channels, samples). "
"Received shape={}".format(y.shape)
)
elif y.ndim == 2 and y.shape[0] < 2:
raise ParameterError(
"Mono data must have shape (samples,). " "Received shape={}".format(y.shape)
)
if not np.isfinite(y).all():
raise ParameterError("Audio buffer is not finite everywhere")
return True
def valid_int(x, cast=None):
"""Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify ``x`` before casting.
Default: `np.floor`
Returns
-------
x_int : int
``x_int = int(cast(x))``
Raises
------
ParameterError
If ``cast`` is provided and is not callable.
"""
if cast is None:
cast = np.floor
if not callable(cast):
raise ParameterError("cast parameter must be callable")
return int(cast(x))
def valid_intervals(intervals):
"""Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if ``intervals`` passes validation.
"""
if intervals.ndim != 2 or intervals.shape[-1] != 2:
raise ParameterError("intervals must have shape (n, 2)")
if np.any(intervals[:, 0] > intervals[:, 1]):
raise ParameterError(
"intervals={} must have non-negative durations".format(intervals)
)
return True
def pad_center(data, size, axis=-1, **kwargs):
"""Pad an array to a target length along a target axis.
This differs from `np.pad` by centering the data prior to padding,
analogous to `str.center`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad ``data``
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad`
Returns
-------
data_padded : np.ndarray
``data`` centered and padded to length ``size`` along the
specified axis
Raises
------
ParameterError
If ``size < data.shape[axis]``
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise ParameterError(
("Target size ({:d}) must be " "at least input size ({:d})").format(size, n)
)
return np.pad(data, lengths, **kwargs)
def fix_length(data, size, axis=-1, **kwargs):
"""Fix the length an array ``data`` to exactly ``size`` along a target axis.
If ``data.shape[axis] < n``, pad according to the provided kwargs.
By default, ``data`` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to ``np.pad``
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
``data`` either trimmed or padded to length ``size``
along the specified axis.
See Also
--------
numpy.pad
"""
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
def fix_frames(frames, x_min=0, x_max=None, pad=True):
"""Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If ``True``, then ``frames`` is expanded to span the full range
``[x_min, x_max]``
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If ``frames`` contains negative values
"""
frames = np.asarray(frames)
if np.any(frames < 0):
raise ParameterError("Negative frame index detected")
if pad and (x_min is not None or x_max is not None):
frames = np.clip(frames, x_min, x_max)
if pad:
pad_data = []
if x_min is not None:
pad_data.append(x_min)
if x_max is not None:
pad_data.append(x_max)
frames = np.concatenate((pad_data, frames))
if x_min is not None:
frames = frames[frames >= x_min]
if x_max is not None:
frames = frames[frames <= x_max]
return np.unique(frames).astype(int)
def axis_sort(S, axis=-1, index=False, value=None):
"""Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=64)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=2, ncols=2)
>>> img_w = librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log', ax=ax[0, 0])
>>> ax[0, 0].set(title='W')
>>> ax[0, 0].label_outer()
>>> img_act = librosa.display.specshow(H, x_axis='time', ax=ax[0, 1])
>>> ax[0, 1].set(title='H')
>>> ax[0, 1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log', ax=ax[1, 0])
>>> ax[1, 0].set(title='W sorted')
>>> librosa.display.specshow(H_sort, x_axis='time', ax=ax[1, 1])
>>> ax[1, 1].set(title='H sorted')
>>> ax[1, 1].label_outer()
>>> fig.colorbar(img_w, ax=ax[:, 0], orientation='horizontal')
>>> fig.colorbar(img_act, ax=ax[:, 1], orientation='horizontal')
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- ``axis=0`` to sort rows by peak column index
- ``axis=1`` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
``S`` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If ``index == True``, the sorting index used to permute ``S``.
Length of ``idx`` corresponds to the selected ``axis``.
Raises
------
ParameterError
If ``S`` does not have exactly 2 dimensions (``S.ndim != 2``)
"""
if value is None:
value = np.argmax
if S.ndim != 2:
raise ParameterError("axis_sort is only defined for 2D arrays")
bin_idx = value(S, axis=np.mod(1 - axis, S.ndim))
idx = np.argsort(bin_idx)
sort_slice = [slice(None)] * S.ndim
sort_slice[axis] = idx
if index:
return S[tuple(sort_slice)], idx
else:
return S[tuple(sort_slice)]
@cache(level=40)
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
"""Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that::
norm(S, axis=axis) == 1
For example, ``axis=0`` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, ``axis=1`` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
``threshold`` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : mininum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least ``threshold`` are
normalized.
By default, the threshold is determined from
the numerical precision of ``S.dtype``.
fill : None or bool
If None, then columns (or rows) with norm below ``threshold``
are left as is.
If False, then columns (rows) with norm below ``threshold``
are set to 0.
If True, then columns (rows) with norm below ``threshold``
are filled uniformly such that the corresponding norm is 1.
.. note:: ``fill=True`` is incompatible with ``norm=0`` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If ``norm`` is not among the valid types defined above
If ``S`` is not finite
If ``fill=True`` and ``norm=0``
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
"""
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise ParameterError(
"threshold={} must be strictly " "positive".format(threshold)
)
if fill not in [None, False, True]:
raise ParameterError("fill={} must be None or boolean".format(fill))
if not np.all(np.isfinite(S)):
raise ParameterError("Input must be finite")
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise ParameterError("Cannot normalize with norm=0 and fill=True")
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag ** norm, axis=axis, keepdims=True) ** (1.0 / norm)
if axis is None:
fill_norm = mag.size ** (-1.0 / norm)
else:
fill_norm = mag.shape[axis] ** (-1.0 / norm)
elif norm is None:
return S
else:
raise ParameterError("Unsupported norm: {}".format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
def localmax(x, axis=0):
"""Find local maxima in an array
An element ``x[i]`` is considered a local maximum if the following
conditions are met:
- ``x[i] > x[i-1]``
- ``x[i] >= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along ``axis``
See Also
--------
localmin
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)])
def localmin(x, axis=0):
"""Find local minima in an array
An element ``x[i]`` is considered a local minimum if the following
conditions are met:
- ``x[i] < x[i-1]``
- ``x[i] <= x[i+1]``
Note that the first condition is strict, and that the first element
``x[0]`` will never be considered as a local minimum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmin(x)
array([False, True, False, False, True, False, True, False])
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmin(x, axis=0)
array([[False, False, False],
[False, True, True],
[False, False, False]])
>>> librosa.util.localmin(x, axis=1)
array([[False, True, False],
[False, True, False],
[False, True, False]])
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local minimality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local minimality along ``axis``
See Also
--------
localmax
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode="edge")
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x < x_pad[tuple(inds1)]) & (x <= x_pad[tuple(inds2)])
def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
"""Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding ``x[n]``
fulfills the following three conditions:
1. ``x[n] == max(x[n - pre_max:n + post_max])``
2. ``x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta``
3. ``n - previous_n > wait``
where ``previous_n`` is the last sample picked as a peak (greedily).
This implementation is based on [#]_ and [#]_.
.. [#] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [#] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before ``n`` over which max is computed
post_max : int >= 1 [scalar]
number of samples after ``n`` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before ``n`` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after ``n`` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in ``x``
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 3, 27, 40, 61, 72, 88, 103])
>>> import matplotlib.pyplot as plt
>>> times = librosa.times_like(onset_env, sr=sr, hop_length=512)
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> D = np.abs(librosa.stft(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[1])
>>> ax[0].plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> ax[0].vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> ax[0].legend(frameon=True, framealpha=0.8)
>>> ax[0].label_outer()
"""
if pre_max < 0:
raise ParameterError("pre_max must be non-negative")
if pre_avg < 0:
raise ParameterError("pre_avg must be non-negative")
if delta < 0:
raise ParameterError("delta must be non-negative")
if wait < 0:
raise ParameterError("wait must be non-negative")
if post_max <= 0:
raise ParameterError("post_max must be positive")
if post_avg <= 0:
raise ParameterError("post_avg must be positive")
if x.ndim != 1:
raise ParameterError("input array must be one-dimensional")
# Ensure valid index types
pre_max = valid_int(pre_max, cast=np.ceil)
post_max = valid_int(post_max, cast=np.ceil)
pre_avg = valid_int(pre_avg, cast=np.ceil)
post_avg = valid_int(post_avg, cast=np.ceil)
wait = valid_int(wait, cast=np.ceil)
# Get the maximum of the signal over a sliding window
max_length = pre_max + post_max
max_origin = np.ceil(0.5 * (pre_max - post_max))
# Using mode='constant' and cval=x.min() effectively truncates
# the sliding window at the boundaries
mov_max = scipy.ndimage.filters.maximum_filter1d(
x, int(max_length), mode="constant", origin=int(max_origin), cval=x.min()
)
# Get the mean of the signal over a sliding window
avg_length = pre_avg + post_avg
avg_origin = np.ceil(0.5 * (pre_avg - post_avg))
# Here, there is no mode which results in the behavior we want,
# so we'll correct below.
mov_avg = scipy.ndimage.filters.uniform_filter1d(
x, int(avg_length), mode="nearest", origin=int(avg_origin)
)
# Correct sliding average at the beginning
n = 0
# Only need to correct in the range where the window needs to be truncated
while n - pre_avg < 0 and n < x.shape[0]:
# This just explicitly does mean(x[n - pre_avg:n + post_avg])
# with truncation
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# Correct sliding average at the end
n = x.shape[0] - post_avg
# When post_avg > x.shape[0] (weird case), reset to 0
n = n if n > 0 else 0
while n < x.shape[0]:
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start : n + post_avg])
n += 1
# First mask out all entries not equal to the local max
detections = x * (x == mov_max)
# Then mask out all entries less than the thresholded average
detections = detections * (detections >= (mov_avg + delta))
# Initialize peaks array, to be filled greedily
peaks = []
# Remove onsets which are close together in time
last_onset = -np.inf
for i in np.nonzero(detections)[0]:
# Only report an onset if the "wait" samples was reported
if i > last_onset + wait:
peaks.append(i)
# Save last reported onset
last_onset = i
return np.array(peaks)
@cache(level=40)
def sparsify_rows(x, quantile=0.01, dtype=None):
"""Return a row-sparse matrix approximating the input
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of ``x``
dtype : np.dtype, optional
The dtype of the output array.
If not provided, then ``x.dtype`` will be used.
Returns
-------
x_sparse : ``scipy.sparse.csr_matrix`` [shape=x.shape]
Row-sparsified approximation of ``x``
If ``x.ndim == 1``, then ``x`` is interpreted as a row vector,
and ``x_sparse.shape == (1, len(x))``.
Raises
------
ParameterError
If ``x.ndim > 2``
If ``quantile`` lies outside ``[0, 1.0)``
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
"""
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError(
"Input must have 2 or fewer dimensions. "
"Provided x.shape={}.".format(x.shape)
)
if not 0.0 <= quantile < 1:
raise ParameterError("Invalid quantile {:.2f}".format(quantile))
if dtype is None:
dtype = x.dtype
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = "<i{:d}".format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True):
"""Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min, idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad ``idx`` to span the range ``idx_min:idx_max``.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
``idx_min`` and ``idx_max``.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
"""
# First, normalize the index set
idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad)
# Now convert the indices to slices
return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])]
@cache(level=40)
def sync(data, idx, aggregate=None, pad=True, axis=-1):
"""Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to ``idx``.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, ``idx`` is padded to span the full range ``[0, data.shape[axis]]``
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
``data_sync`` will have the same dimension as ``data``, except that the ``axis``
coordinate will be reduced according to ``idx``.
For example, a 2-dimensional ``data`` with ``axis=-1`` should satisfy::
data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.ex('choice'))
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> fig, ax = plt.subplots(nrows=3, sharex=True, sharey=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time', ax=ax[0])
>>> ax[0].set(title='CQT power, shape={}'.format(C.shape))
>>> ax[0].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time', ax=ax[1])
>>> ax[1].set(title='Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> ax[1].label_outer()
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time', ax=ax[2])
>>> ax[2].set(title='Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
"""
if aggregate is None:
aggregate = np.mean
shape = list(data.shape)
if np.all([isinstance(_, slice) for _ in idx]):
slices = idx
elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]):
slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad)
else:
raise ParameterError("Invalid index set: {}".format(idx))
agg_shape = list(shape)
agg_shape[axis] = len(slices)
data_agg = np.empty(
agg_shape, order="F" if np.isfortran(data) else "C", dtype=data.dtype
)
idx_in = [slice(None)] * data.ndim
idx_agg = [slice(None)] * data_agg.ndim
for (i, segment) in enumerate(slices):
idx_in[axis] = segment
idx_agg[axis] = i
data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis)
return data_agg
def softmask(X, X_ref, power=1, split_zeros=False):
"""Robustly compute a soft-mask operation.
``M = X**power / (X**power + X_ref**power)``
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as ``X``.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to ``X > X_ref``.
Note: for hard masks, ties are always broken in favor of ``X_ref`` (``mask=0``).
split_zeros : bool
If `True`, entries where ``X`` and ``X_ref`` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=X.shape
The output mask array
Raises
------
ParameterError
If ``X`` and ``X_ref`` have different shapes.
If ``X`` or ``X_ref`` are negative anywhere
If ``power <= 0``
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
"""
if X.shape != X_ref.shape:
raise ParameterError("Shape mismatch: {}!={}".format(X.shape, X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError("X and X_ref must be non-negative")
if power <= 0:
raise ParameterError("power must be strictly positive")
# We're working with ints, cast to float.
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
# Re-scale the input arrays relative to the larger value
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = Z < np.finfo(dtype).tiny
Z[bad_idx] = 1
# For finite power, compute the softmask
if np.isfinite(power):
mask = (X / Z) ** power
ref_mask = (X_ref / Z) ** power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
# Wherever energy is below energy in both inputs, split the mask
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
# Otherwise, compute the hard mask
mask = X > X_ref
return mask
def tiny(x):
"""Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in ``x.dtype``
(e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is ``x.dtype``
Returns
-------
tiny_value : float
The smallest positive usable number for the type of ``x``.
If ``x`` is integer-typed, then the tiny value for ``np.float32``
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
"""
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(
x.dtype, np.complexfloating
):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
def cyclic_gradient(data, edge_order=1, axis=-1):
"""Estimate the gradient of a function over a uniformly sampled,
periodic domain.
This is essentially the same as `np.gradient`, except that edge effects
are handled by wrapping the observations (i.e. assuming periodicity)
rather than extrapolation.
Parameters
----------
data : np.ndarray
The function values observed at uniformly spaced positions on
a periodic domain
edge_order: {1, 2}
The order of the difference approximation used for estimating
the gradient
axis : int
The axis along which gradients are calculated.
Returns
-------
grad : np.ndarray like ``data``
The gradient of ``data`` taken along the specified axis.
See Also
--------
numpy.gradient
Examples
--------
This example estimates the gradient of cosine (-sine) from 64
samples using direct (aperiodic) and periodic gradient
calculation.
>>> import matplotlib.pyplot as plt
>>> x = 2 * np.pi * np.linspace(0, 1, num=64, endpoint=False)
>>> y = np.cos(x)
>>> grad = np.gradient(y)
>>> cyclic_grad = librosa.util.cyclic_gradient(y)
>>> true_grad = -np.sin(x) * 2 * np.pi / len(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, true_grad, label='True gradient', linewidth=5,
... alpha=0.35)
>>> ax.plot(x, cyclic_grad, label='cyclic_gradient')
>>> ax.plot(x, grad, label='np.gradient', linestyle=':')
>>> ax.legend()
>>> # Zoom into the first part of the sequence
>>> ax.set(xlim=[0, np.pi/16], ylim=[-0.025, 0.025])
"""
# Wrap-pad the data along the target axis by `edge_order` on each side
padding = [(0, 0)] * data.ndim
padding[axis] = (edge_order, edge_order)
data_pad = np.pad(data, padding, mode="wrap")
# Compute the gradient
grad = np.gradient(data_pad, edge_order=edge_order, axis=axis)
# Remove the padding
slices = [slice(None)] * data.ndim
slices[axis] = slice(edge_order, -edge_order)
return grad[tuple(slices)]
@numba.jit(nopython=True, cache=True)
def __shear_dense(X, factor=+1, axis=-1):
"""Numba-accelerated shear for dense (ndarray) arrays"""
if axis == 0:
X = X.T
X_shear = np.empty_like(X)
for i in range(X.shape[1]):
X_shear[:, i] = np.roll(X[:, i], factor * i)
if axis == 0:
X_shear = X_shear.T
return X_shear
def __shear_sparse(X, factor=+1, axis=-1):
"""Fast shearing for sparse matrices
Shearing is performed using CSC array indices,
and the result is converted back to whatever sparse format
the data was originally provided in.
"""
fmt = X.format
if axis == 0:
X = X.T
# Now we're definitely rolling on the correct axis
X_shear = X.tocsc(copy=True)
# The idea here is to repeat the shear amount (factor * range)
# by the number of non-zeros for each column.
# The number of non-zeros is computed by diffing the index pointer array
roll = np.repeat(factor * np.arange(X_shear.shape[1]), np.diff(X_shear.indptr))
# In-place roll
np.mod(X_shear.indices + roll, X_shear.shape[0], out=X_shear.indices)
if axis == 0:
X_shear = X_shear.T
# And convert back to the input format
return X_shear.asformat(fmt)
def shear(X, factor=1, axis=-1):
"""Shear a matrix by a given factor.
The column ``X[:, n]`` will be displaced (rolled)
by ``factor * n``
This is primarily useful for converting between lag and recurrence
representations: shearing with ``factor=-1`` converts the main diagonal
to a horizontal. Shearing with ``factor=1`` converts a horizontal to
a diagonal.
Parameters
----------
X : np.ndarray [ndim=2] or scipy.sparse matrix
The array to be sheared
factor : integer
The shear factor: ``X[:, n] -> np.roll(X[:, n], factor * n)``
axis : integer
The axis along which to shear
Returns
-------
X_shear : same type as ``X``
The sheared matrix
Examples
--------
>>> E = np.eye(3)
>>> librosa.util.shear(E, factor=-1, axis=-1)
array([[1., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]])
>>> librosa.util.shear(E, factor=-1, axis=0)
array([[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]])
>>> librosa.util.shear(E, factor=1, axis=-1)
array([[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]])
"""
if not np.issubdtype(type(factor), np.integer):
raise ParameterError("factor={} must be integer-valued".format(factor))
if scipy.sparse.isspmatrix(X):
return __shear_sparse(X, factor=factor, axis=axis)
else:
return __shear_dense(X, factor=factor, axis=axis)
def stack(arrays, axis=0):
"""Stack one or more arrays along a target axis.
This function is similar to `np.stack`, except that memory contiguity is
retained when stacking along the first dimension.
This is useful when combining multiple monophonic audio signals into a
multi-channel signal, or when stacking multiple feature representations
to form a multi-dimensional array.
Parameters
----------
arrays : list
one or more `np.ndarray`
axis : integer
The target axis along which to stack. ``axis=0`` creates a new first axis,
and ``axis=-1`` creates a new last axis.
Returns
-------
arr_stack : np.ndarray [shape=(len(arrays), array_shape) or shape=(array_shape, len(arrays))]
The input arrays, stacked along the target dimension.
If ``axis=0``, then ``arr_stack`` will be F-contiguous.
Otherwise, ``arr_stack`` will be C-contiguous by default, as computed by
`np.stack`.
Raises
------
ParameterError
- If ``arrays`` do not all have the same shape
- If no ``arrays`` are given
See Also
--------
numpy.stack
numpy.ndarray.flags
frame
Examples
--------
Combine two buffers into a contiguous arrays
>>> y_left = np.ones(5)
>>> y_right = -np.ones(5)
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=0)
>>> y_stereo
array([[ 1., 1., 1., 1., 1.],
[-1., -1., -1., -1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
Or along the trailing axis
>>> y_stereo = librosa.util.stack([y_left, y_right], axis=-1)
>>> y_stereo
array([[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.],
[ 1., -1.]])
>>> y_stereo.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
shapes = {arr.shape for arr in arrays}
if len(shapes) > 1:
raise ParameterError("all input arrays must have the same shape")
elif len(shapes) < 1:
raise ParameterError("at least one input array must be provided for stack")
shape_in = shapes.pop()
if axis != 0:
return np.stack(arrays, axis=axis)
else:
# If axis is 0, enforce F-ordering
shape = tuple([len(arrays)] + list(shape_in))
# Find the common dtype for all inputs
dtype = np.find_common_type([arr.dtype for arr in arrays], [])
# Allocate an empty array of the right shape and type
result = np.empty(shape, dtype=dtype, order="F")
# Stack into the preallocated buffer
np.stack(arrays, axis=axis, out=result)
return result
def dtype_r2c(d, default=np.complex64):
"""Find the complex numpy dtype corresponding to a real dtype.
This is used to maintain numerical precision and memory footprint
when constructing complex arrays from real-valued data
(e.g. in a Fourier transform).
A `float32` (single-precision) type maps to `complex64`,
while a `float64` (double-precision) maps to `complex128`.
Parameters
----------
d : np.dtype
The real-valued dtype to convert to complex.
If ``d`` is a complex type already, it will be returned.
default : np.dtype, optional
The default complex target type, if ``d`` does not match a
known dtype
Returns
-------
d_c : np.dtype
The complex dtype
See Also
--------
dtype_c2r
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.float32)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.int16)
dtype('complex64')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('complex128')
"""
mapping = {
np.dtype(np.float32): np.complex64,
np.dtype(np.float64): np.complex128,
np.dtype(np.float): np.complex,
}
# If we're given a complex type already, return it
dt = np.dtype(d)
if dt.kind == "c":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(dt, default))
def dtype_c2r(d, default=np.float32):
"""Find the real numpy dtype corresponding to a complex dtype.
This is used to maintain numerical precision and memory footprint
when constructing real arrays from complex-valued data
(e.g. in an inverse Fourier transform).
A `complex64` (single-precision) type maps to `float32`,
while a `complex128` (double-precision) maps to `float64`.
Parameters
----------
d : np.dtype
The complex-valued dtype to convert to real.
If ``d`` is a real (float) type already, it will be returned.
default : np.dtype, optional
The default real target type, if ``d`` does not match a
known dtype
Returns
-------
d_r : np.dtype
The real dtype
See Also
--------
dtype_r2c
numpy.dtype
Examples
--------
>>> librosa.util.dtype_r2c(np.complex64)
dtype('float32')
>>> librosa.util.dtype_r2c(np.float32)
dtype('float32')
>>> librosa.util.dtype_r2c(np.int16)
dtype('float32')
>>> librosa.util.dtype_r2c(np.complex128)
dtype('float64')
"""
mapping = {
np.dtype(np.complex64): np.float32,
np.dtype(np.complex128): np.float64,
np.dtype(np.complex): np.float,
}
# If we're given a real type already, return it
dt = np.dtype(d)
if dt.kind == "f":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(np.dtype(d), default))
| isc |
keerts/home-assistant | tests/components/mqtt/test_init.py | 3 | 14663 | """The tests for the MQTT component."""
from collections import namedtuple
import unittest
from unittest import mock
import socket
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
import homeassistant.components.mqtt as mqtt
from homeassistant.const import (
EVENT_CALL_SERVICE, ATTR_DOMAIN, ATTR_SERVICE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
# pylint: disable=invalid-name
class TestMQTT(unittest.TestCase):
"""Test the MQTT component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
self.calls = []
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@callback
def record_calls(self, *args):
"""Helper for recording calls."""
self.calls.append(args)
def test_client_starts_on_home_assistant_start(self):
""""Test if client start on HA launch."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.start.called)
def test_client_stops_on_home_assistant_start(self):
"""Test if client stops on HA launch."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.block_till_done()
self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.hass.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.stop.called)
@mock.patch('paho.mqtt.client.Client')
def test_setup_fails_if_no_connect_broker(self, _):
"""Test for setup failure if connection to broker is missing."""
test_broker_cfg = {mqtt.DOMAIN: {mqtt.CONF_BROKER: 'test-broker'}}
with mock.patch('homeassistant.components.mqtt.MQTT',
side_effect=socket.error()):
self.hass.config.components = set()
assert not setup_component(self.hass, mqtt.DOMAIN, test_broker_cfg)
# Ensure if we dont raise it sets up correctly
self.hass.config.components = set()
assert setup_component(self.hass, mqtt.DOMAIN, test_broker_cfg)
@mock.patch('paho.mqtt.client.Client')
def test_setup_embedded(self, _):
"""Test setting up embedded server with no config."""
client_config = ('localhost', 1883, 'user', 'pass', None, '3.1.1')
with mock.patch('homeassistant.components.mqtt.server.start',
return_value=(True, client_config)) as _start:
self.hass.config.components = set()
assert setup_component(self.hass, mqtt.DOMAIN,
{mqtt.DOMAIN: {}})
assert _start.call_count == 1
# Test with `embedded: None`
self.hass.config.components = set()
assert setup_component(self.hass, mqtt.DOMAIN,
{mqtt.DOMAIN: {'embedded': None}})
assert _start.call_count == 2 # Another call
def test_publish_calls_service(self):
"""Test the publishing of call to services."""
self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls)
mqtt.publish(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(
'test-topic',
self.calls[0][0].data['service_data'][mqtt.ATTR_TOPIC])
self.assertEqual(
'test-payload',
self.calls[0][0].data['service_data'][mqtt.ATTR_PAYLOAD])
def test_service_call_without_topic_does_not_publish(self):
"""Test the service call if topic is missing."""
self.hass.bus.fire(EVENT_CALL_SERVICE, {
ATTR_DOMAIN: mqtt.DOMAIN,
ATTR_SERVICE: mqtt.SERVICE_PUBLISH
})
self.hass.block_till_done()
self.assertTrue(not mqtt.MQTT_CLIENT.publish.called)
def test_service_call_with_template_payload_renders_template(self):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}")
self.hass.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], "2")
def test_service_call_with_payload_doesnt_render_template(self):
"""Test the service call with unrendered template.
If both 'payload' and 'payload_template' are provided then fail.
"""
payload = "not a template"
payload_template = "a template"
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template
}, blocking=True)
self.assertFalse(mqtt.MQTT_CLIENT.publish.called)
def test_service_call_with_ascii_qos_retain_flags(self):
"""Test the service call with args that can be misinterpreted.
Empty payload message and ascii formatted qos and retain flags.
"""
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: "",
mqtt.ATTR_QOS: '2',
mqtt.ATTR_RETAIN: 'no'
}, blocking=True)
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][2], 2)
self.assertFalse(mqtt.MQTT_CLIENT.publish.call_args[0][3])
def test_subscribe_topic(self):
"""Test the subscription of a topic."""
unsub = mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
unsub()
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_subscribe_topic_not_match(self):
"""Test if subscribed topic is not a match."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_level_wildcard(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_level_wildcard_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier', 'test-payload')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_subtree_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.block_till_done()
self.assertEqual(0, len(self.calls))
class TestMQTTCallbacks(unittest.TestCase):
"""Test the MQTT callbacks."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# mock_mqtt_component(self.hass)
with mock.patch('paho.mqtt.client.Client'):
self.hass.config.components = set()
assert setup_component(self.hass, mqtt.DOMAIN, {
mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'mock-broker',
mqtt.CONF_BIRTH_MESSAGE: {mqtt.ATTR_TOPIC: 'birth',
mqtt.ATTR_PAYLOAD: 'birth'}
}
})
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_receiving_mqtt_message_fires_hass_event(self):
"""Test if receiving triggers an event."""
calls = []
@callback
def record(event):
"""Helper to record calls."""
calls.append(event)
self.hass.bus.listen_once(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, record)
MQTTMessage = namedtuple('MQTTMessage', ['topic', 'qos', 'payload'])
message = MQTTMessage('test_topic', 1, 'Hello World!'.encode('utf-8'))
mqtt.MQTT_CLIENT._mqtt_on_message(None, {'hass': self.hass}, message)
self.hass.block_till_done()
self.assertEqual(1, len(calls))
last_event = calls[0]
self.assertEqual('Hello World!', last_event.data['payload'])
self.assertEqual(message.topic, last_event.data['topic'])
self.assertEqual(message.qos, last_event.data['qos'])
def test_mqtt_failed_connection_results_in_disconnect(self):
"""Test if connection failure leads to disconnect."""
for result_code in range(1, 6):
mqtt.MQTT_CLIENT._mqttc = mock.MagicMock()
mqtt.MQTT_CLIENT._mqtt_on_connect(None, {'topics': {}}, 0,
result_code)
self.assertTrue(mqtt.MQTT_CLIENT._mqttc.disconnect.called)
def test_mqtt_subscribes_topics_on_connect(self):
"""Test subscription to topic on connect."""
from collections import OrderedDict
prev_topics = OrderedDict()
prev_topics['topic/test'] = 1,
prev_topics['home/sensor'] = 2,
prev_topics['still/pending'] = None
mqtt.MQTT_CLIENT.topics = prev_topics
mqtt.MQTT_CLIENT.progress = {1: 'still/pending'}
# Return values for subscribe calls (rc, mid)
mqtt.MQTT_CLIENT._mqttc.subscribe.side_effect = ((0, 2), (0, 3))
mqtt.MQTT_CLIENT._mqtt_on_connect(None, None, 0, 0)
self.assertFalse(mqtt.MQTT_CLIENT._mqttc.disconnect.called)
expected = [(topic, qos) for topic, qos in prev_topics.items()
if qos is not None]
self.assertEqual(
expected,
[call[1] for call in mqtt.MQTT_CLIENT._mqttc.subscribe.mock_calls])
self.assertEqual({
1: 'still/pending',
2: 'topic/test',
3: 'home/sensor',
}, mqtt.MQTT_CLIENT.progress)
def test_mqtt_birth_message_on_connect(self): \
# pylint: disable=no-self-use
"""Test birth message on connect."""
mqtt.MQTT_CLIENT._mqtt_on_connect(None, None, 0, 0)
mqtt.MQTT_CLIENT._mqttc.publish.assert_called_with('birth', 'birth', 0,
False)
def test_mqtt_disconnect_tries_no_reconnect_on_stop(self):
"""Test the disconnect tries."""
mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 0)
self.assertFalse(mqtt.MQTT_CLIENT._mqttc.reconnect.called)
@mock.patch('homeassistant.components.mqtt.time.sleep')
def test_mqtt_disconnect_tries_reconnect(self, mock_sleep):
"""Test the re-connect tries."""
mqtt.MQTT_CLIENT.topics = {
'test/topic': 1,
'test/progress': None
}
mqtt.MQTT_CLIENT.progress = {
1: 'test/progress'
}
mqtt.MQTT_CLIENT._mqttc.reconnect.side_effect = [1, 1, 1, 0]
mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 1)
self.assertTrue(mqtt.MQTT_CLIENT._mqttc.reconnect.called)
self.assertEqual(4, len(mqtt.MQTT_CLIENT._mqttc.reconnect.mock_calls))
self.assertEqual([1, 2, 4],
[call[1][0] for call in mock_sleep.mock_calls])
self.assertEqual({'test/topic': 1}, mqtt.MQTT_CLIENT.topics)
self.assertEqual({}, mqtt.MQTT_CLIENT.progress)
def test_invalid_mqtt_topics(self):
"""Test invalid topics."""
self.assertRaises(vol.Invalid, mqtt.valid_publish_topic, 'bad+topic')
self.assertRaises(vol.Invalid, mqtt.valid_subscribe_topic, 'bad\0one')
def test_receiving_non_utf8_message_gets_logged(self):
"""Test receiving a non utf8 encoded message."""
calls = []
@callback
def record(event):
"""Helper to record calls."""
calls.append(event)
payload = 0x9a
topic = 'test_topic'
self.hass.bus.listen_once(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, record)
MQTTMessage = namedtuple('MQTTMessage', ['topic', 'qos', 'payload'])
message = MQTTMessage(topic, 1, payload)
with self.assertLogs(level='ERROR') as test_handle:
mqtt.MQTT_CLIENT._mqtt_on_message(
None,
{'hass': self.hass},
message)
self.hass.block_till_done()
self.assertIn(
"ERROR:homeassistant.components.mqtt:Illegal utf-8 unicode "
"payload from MQTT topic: %s, Payload: " % topic,
test_handle.output[0])
| apache-2.0 |
ducksboard/libsaas | test/test_googleoauth2.py | 4 | 1616 | import unittest
from libsaas.executors import test_executor
from libsaas.services import googleoauth2
from libsaas.port import urlencode
class GoogleOauth2TestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'{}', 200, {})
self.service = googleoauth2.GoogleOAuth2('id', 'secret')
def expect(self, method=None, uri=None, params=None):
if method is not None:
self.assertEqual(method, self.executor.request.method)
if uri is not None:
self.assertEqual(self.executor.request.uri,
self.service.APIROOT + uri)
if params is not None:
self.assertEqual(self.executor.request.params, params)
def test_access_token(self):
params = {'client_id': 'id',
'client_secret': 'secret',
'grant_type': 'authorization_code',
'code': 'code',
'redirect_uri': 'uri'}
self.service.access_token('code', 'uri')
self.expect('POST', '/token', params)
def test_refresh_token(self):
params = {'client_id': 'id',
'client_secret': 'secret',
'grant_type': 'refresh_token',
'refresh_token': 'token'}
self.service.refresh_token('token')
self.expect('POST', '/token', params)
def test_get_auth_url(self):
auth_url = self.service.get_auth_url('code', 'foo', 'openid',
openid_realm='bar')
self.assertTrue(urlencode({'openid.realm': 'bar'}) in auth_url)
| mit |
frouty/odoo_oph | openerp/tools/amount_to_text_en.py | 441 | 5103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andrewleech/script.module.raven | lib/raven/utils/http.py | 20 | 1847 | """
raven.utils.http
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
import ssl
import sys
from raven.conf import defaults
from raven.utils.compat import urllib2, httplib
from raven.utils.ssl_match_hostname import match_hostname
def urlopen(url, data=None, timeout=defaults.TIMEOUT, ca_certs=None,
verify_ssl=False, assert_hostname=None):
class ValidHTTPSConnection(httplib.HTTPConnection):
default_port = httplib.HTTPS_PORT
def __init__(self, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection(
address=(self.host, self.port),
timeout=self.timeout,
)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(
sock, ca_certs=ca_certs, cert_reqs=ssl.CERT_REQUIRED)
if assert_hostname is not None:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
class ValidHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(ValidHTTPSConnection, req)
if verify_ssl:
handlers = [ValidHTTPSHandler]
else:
handlers = []
opener = urllib2.build_opener(*handlers)
if sys.version_info < (2, 6):
default_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return opener.open(url, data)
finally:
socket.setdefaulttimeout(default_timeout)
return opener.open(url, data, timeout)
| bsd-3-clause |
samueljackson92/tsp-solver | tspsolver/ga/test/crossover_test.py | 1 | 3989 | import unittest
import nose.tools
import numpy as np
from scipy.spatial import distance_matrix
from tspsolver.tsp_generator import TSPGenerator
from ..population_generation import SimplePopulationGenerator
from ..crossover import OnePointPMX, TwoPointPMX, OrderCrossover
class OnePointCrossoverTest(unittest.TestCase):
def setUp(self):
self._num_points = 100
self._pop_size = 20
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_crossover_for_chromosomes(self):
onept_pmx = OnePointPMX()
x = np.arange(10)
y = x[::-1]
c1, c2 = onept_pmx._crossover_for_chromosomes(x, y)
nose.tools.assert_equal(np.unique(c1).size, c1.size)
nose.tools.assert_equal(np.unique(c2).size, c2.size)
def test_crossover(self):
onept_pmx = OnePointPMX()
new_pop = onept_pmx.crossover(self._population)
nose.tools.assert_equal(new_pop.shape, self._population.shape)
for row in new_pop:
nose.tools.assert_equal(np.unique(row).size, row.size)
def test_crossover_with_rog(self):
onept_pmx = OnePointPMX(pcross=1.0, use_rog=True)
pop = np.array([np.arange(10), np.arange(10)])
new_pop = onept_pmx.crossover(pop)
nose.tools.assert_equal(new_pop.shape, pop.shape)
for row, new_row in zip(pop, new_pop):
nose.tools.assert_equal(np.unique(new_row).size, new_row.size)
print row, new_row
nose.tools.assert_false(np.array_equal(row, new_row))
class TwoPointCrossoverTest(unittest.TestCase):
def setUp(self):
self._num_points = 100
self._pop_size = 20
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_crossover(self):
twopt_pmx = TwoPointPMX()
new_pop = twopt_pmx.crossover(self._population)
nose.tools.assert_equal(new_pop.shape, self._population.shape)
for row in new_pop:
nose.tools.assert_equal(np.unique(row).size, row.size)
def test_crossover_for_chromosomes(self):
twopt_pmx = TwoPointPMX()
x = np.arange(10)
y = x[::-1]
c1, c2 = twopt_pmx._crossover_for_chromosomes(x, y)
nose.tools.assert_equal(np.unique(c1).size, c1.size)
nose.tools.assert_equal(np.unique(c2).size, c2.size)
class OrderCrossoverTest(unittest.TestCase):
def setUp(self):
self._num_points = 10
self._pop_size = 20
gen = TSPGenerator(self._num_points)
self._data = gen.generate()
self._distances = distance_matrix(self._data, self._data)
popGen = SimplePopulationGenerator(self._pop_size)
self._population = popGen.generate(self._distances[0])
def test_crossover_single(self):
xover = OrderCrossover(1.0)
pop = np.array([[1, 2, 3, 4, 5, 6, 7], [7, 6, 5, 4, 3, 2, 1]])
new_pop = xover.crossover(pop)
np.testing.assert_array_equal(pop[0], np.sort(new_pop[0]))
def test_crossover(self):
xover = OrderCrossover()
new_pop = xover.crossover(self._population)
nose.tools.assert_equal(new_pop.shape, self._population.shape)
for row in new_pop:
nose.tools.assert_equal(np.unique(row).size, row.size)
def test_crossover_for_chromosomes(self):
xover = OrderCrossover()
x = np.arange(10)
y = x[::-1]
c1, c2 = xover._crossover_for_chromosomes(x, y)
nose.tools.assert_equal(np.unique(c1).size, c1.size)
nose.tools.assert_equal(np.unique(c2).size, c2.size)
| mit |
kbrebanov/ansible | lib/ansible/modules/cloud/rackspace/rax_files.py | 33 | 11750 | #!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
meta:
description:
- A hash of items to set as metadata values on a container
private:
description:
- Used to set a container as private, removing it from the CDN. B(Warning!)
Private containers, if previously made public, can have live objects
available until the TTL on cached objects expires
public:
description:
- Used to set a container as public, available via the Cloud Files CDN
region:
description:
- Region to create an instance in
default: DFW
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
Setting a TTL is only appropriate for containers that are public
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
choices:
- file
- meta
default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Containers"
hosts: local
gather_facts: no
tasks:
- name: "List all containers"
rax_files:
state: list
- name: "Create container called 'mycontainer'"
rax_files:
container: mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
container: mycontainer2
meta:
key: value
file_for: someuser@example.com
- name: "Set a container's web index page"
rax_files:
container: mycontainer
web_index: index.html
- name: "Set a container's web error page"
rax_files:
container: mycontainer
web_error: error.html
- name: "Make container public"
rax_files:
container: mycontainer
public: yes
- name: "Make container public with a 24 hour TTL"
rax_files:
container: mycontainer
public: yes
ttl: 86400
- name: "Make container private"
rax_files:
container: mycontainer
private: yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
gather_facts: no
tasks:
- name: "Get mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
- name: "Set mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
meta:
uploaded_by: someuser@example.com
- name: "Remove mycontainer2 metadata"
rax_files:
container: "mycontainer2"
type: meta
state: absent
meta:
key: ""
file_for: ""
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError as e:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _fetch_meta(module, container):
EXIT_DICT['meta'] = dict()
try:
for k, v in container.get_metadata().items():
split_key = k.split(META_PREFIX)[-1]
EXIT_DICT['meta'][split_key] = v
except Exception as e:
module.fail_json(msg=e.message)
def meta(cf, module, container_, state, meta_, clear_meta):
c = _get_container(module, cf, container_)
if meta_ and state == 'present':
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
elif meta_ and state == 'absent':
remove_results = []
for k, v in meta_.items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
elif state == 'absent':
remove_results = []
for k, v in c.get_metadata().items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
_fetch_meta(module, c)
_locals = locals().keys()
EXIT_DICT['container'] = c.name
if 'meta_set' in _locals or 'remove_results' in _locals:
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
module.fail_json(msg='state cannot be omitted when setting/removing '
'attributes on a container')
if state == 'list':
# We don't care if attributes are specified, let's list containers
EXIT_DICT['containers'] = cf.list_containers()
module.exit_json(**EXIT_DICT)
try:
c = cf.get_container(container_)
except pyrax.exc.NoSuchContainer as e:
# Make the container if state=present, otherwise bomb out
if state == 'present':
try:
c = cf.create_container(container_)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
else:
# Successfully grabbed a container object
# Delete if state is absent
if state == 'absent':
try:
cont_deleted = c.delete()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['deleted'] = True
if meta_:
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
finally:
_fetch_meta(module, c)
if ttl:
try:
c.cdn_ttl = ttl
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['ttl'] = c.cdn_ttl
if public:
try:
cont_public = c.make_public()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
ssl_url=c.cdn_ssl_uri,
streaming_url=c.cdn_streaming_uri,
ios_uri=c.cdn_ios_uri)
if private:
try:
cont_private = c.make_private()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_private'] = True
if web_index:
try:
cont_web_index = c.set_web_index_page(web_index)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_index'] = True
finally:
_fetch_meta(module, c)
if web_error:
try:
cont_err_index = c.set_web_error_page(web_error)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_error'] = True
finally:
_fetch_meta(module, c)
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
_locals = locals().keys()
if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
or 'cont_web_index' in _locals
or 'cont_err_index' in _locals):
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "container":
container(cf, module, container_, state, meta_, clear_meta, ttl,
public, private, web_index, web_error)
else:
meta(cf, module, container_, state, meta_, clear_meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(),
state=dict(choices=['present', 'absent', 'list'],
default='present'),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
public=dict(default=False, type='bool'),
private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
typ = module.params.get('type')
ttl = module.params.get('ttl')
public = module.params.get('public')
private = module.params.get('private')
web_index = module.params.get('web_index')
web_error = module.params.get('web_error')
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting '
'metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error)
if __name__ == '__main__':
main()
| gpl-3.0 |
YeoLab/anchor | anchor/simulate.py | 1 | 7366 |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import six
from .visualize import violinplot, MODALITY_ORDER, MODALITY_TO_COLOR, barplot
def add_noise(data, iteration_per_noise=100,
noise_percentages=np.arange(0, 101, step=10), plot=True,
violinplot_kws=None, figure_prefix='anchor_simulation'):
data_dfs = []
violinplot_kws = {} if violinplot_kws is None else violinplot_kws
width = len(data.columns) * 0.75
alpha = max(0.05, 1. / iteration_per_noise)
for noise_percentage in noise_percentages:
if plot:
fig, ax = plt.subplots(figsize=(width, 3))
for iteration in range(iteration_per_noise):
if iteration > 0 and noise_percentage == 0:
continue
noisy_data = data.copy()
shape = (noisy_data.shape[0] * noise_percentage / 100,
noisy_data.shape[1])
size = np.product(shape)
noise_ind = np.random.choice(noisy_data.index,
size=noise_percentage,
replace=False)
noisy_data.loc[noise_ind] = np.random.uniform(
low=0., high=1., size=size).reshape(shape)
renamer = dict(
(col, '{}_noise{}_iter{}'.format(
col, noise_percentage, iteration))
for col in noisy_data.columns)
renamed = noisy_data.rename(columns=renamer)
data_dfs.append(renamed)
if plot:
noisy_data_tidy = noisy_data.unstack()
noisy_data_tidy = noisy_data_tidy.reset_index()
noisy_data_tidy = noisy_data_tidy.rename(
columns={'level_0': 'Feature ID',
'level_1': 'Sample ID',
0: '$\Psi$'})
violinplot(x='Feature ID', y='$\Psi$',
data=noisy_data_tidy, ax=ax,
**violinplot_kws)
if plot:
if noise_percentage > 0:
for c in ax.collections:
c.set_alpha(alpha)
ax.set(ylim=(0, 1), title='{}% Uniform Noise'.format(
noise_percentage), yticks=(0, 0.5, 1), ylabel='$\Psi$',
xlabel='')
plt.setp(ax.get_xticklabels(), rotation=90)
sns.despine()
fig.tight_layout()
fig.savefig('{}_noise_percentage_{}.pdf'.format(figure_prefix,
noise_percentage))
all_noisy_data = pd.concat(data_dfs, axis=1)
return all_noisy_data
class ModalityEvaluator(object):
def __init__(self, estimator, data, waypoints, fitted, predicted):
self.estimator = estimator
self.data = data
self.predicted = predicted
self.fitted = fitted
self.waypoints = waypoints
def evaluate_estimator(estimator, data, waypoints=None, figure_prefix=''):
#
# estimator.violinplot(n=1e3)
# fig = plt.gcf()
# for ax in fig.axes:
# ax.set(yticks=[0, 0.5, 1], xlabel='')
# # xticklabels =
# # ax.set_xticklabels(fontsize=20)
# fig.tight_layout()
# sns.despine()
# fig.savefig('{}_modality_parameterization.pdf'.format(figure_prefix))
fitted = estimator.fit(data)
predicted = estimator.predict(fitted)
predicted.name = 'Predicted Modality'
fitted_tidy = fitted.stack().reset_index()
fitted_tidy = fitted_tidy.rename(
columns={'level_1': 'Feature ID', 'level_0': "Modality",
0: estimator.score_name}, copy=False)
predicted_tidy = predicted.to_frame().reset_index()
predicted_tidy = predicted_tidy.rename(columns={'index': 'Feature ID'})
predicted_tidy = predicted_tidy.merge(
fitted_tidy, left_on=['Feature ID', 'Predicted Modality'],
right_on=['Feature ID', 'Modality'])
# Make categorical so they are plotted in the correct order
predicted_tidy['Predicted Modality'] = \
pd.Categorical(predicted_tidy['Predicted Modality'],
categories=MODALITY_ORDER, ordered=True)
predicted_tidy['Modality'] = \
pd.Categorical(predicted_tidy['Modality'],
categories=MODALITY_ORDER, ordered=True)
grouped = data.groupby(predicted, axis=1)
size = 5
fig, axes = plt.subplots(figsize=(size*0.75, 8), nrows=len(grouped))
for ax, (modality, df) in zip(axes, grouped):
random_ids = np.random.choice(df.columns, replace=False, size=size)
random_df = df[random_ids]
tidy_random = random_df.stack().reset_index()
tidy_random = tidy_random.rename(columns={'level_0': 'sample_id',
'level_1': 'event_id',
0: '$\Psi$'})
sns.violinplot(x='event_id', y='$\Psi$', data=tidy_random,
color=MODALITY_TO_COLOR[modality], ax=ax,
inner=None, bw=0.2, scale='width')
ax.set(ylim=(0, 1), yticks=(0, 0.5, 1), xticks=[], xlabel='',
title=modality)
sns.despine()
fig.tight_layout()
fig.savefig('{}_random_estimated_modalities.pdf'.format(figure_prefix))
g = barplot(predicted_tidy, hue='Modality')
g.savefig('{}_modalities_barplot.pdf'.format(figure_prefix))
plot_best_worst_fits(predicted_tidy, data, modality_col='Modality',
score=estimator.score_name)
fig = plt.gcf()
fig.savefig('{}_best_worst_fit_violinplots.pdf'.format(figure_prefix))
fitted.to_csv('{}_fitted.csv'.format(figure_prefix))
predicted.to_csv('{}_predicted.csv'.format(figure_prefix))
result = ModalityEvaluator(estimator, data, waypoints, fitted, predicted)
return result
def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality"""
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout()
| bsd-3-clause |
olemis/sqlalchemy | test/dialect/postgresql/test_compiler.py | 10 | 38258 | # coding: utf-8
from sqlalchemy.testing.assertions import AssertsCompiledSQL, is_, \
assert_raises
from sqlalchemy.testing import engines, fixtures
from sqlalchemy import testing
from sqlalchemy import Sequence, Table, Column, Integer, update, String,\
insert, func, MetaData, Enum, Index, and_, delete, select, cast, text, \
Text
from sqlalchemy.dialects.postgresql import ExcludeConstraint, array
from sqlalchemy import exc, schema
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import TSRANGE
from sqlalchemy.orm import mapper, aliased, Session
from sqlalchemy.sql import table, column, operators
from sqlalchemy.util import u
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
__prefer__ = 'postgresql'
def test_format(self):
seq = Sequence('my_seq_no_schema')
dialect = postgresql.PGDialect()
assert dialect.identifier_preparer.format_sequence(seq) \
== 'my_seq_no_schema'
seq = Sequence('my_seq', schema='some_schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== 'some_schema.my_seq'
seq = Sequence('My_Seq', schema='Some_Schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== '"Some_Schema"."My_Seq"'
@testing.only_on('postgresql', 'foo')
@testing.provide_metadata
def test_reverse_eng_name(self):
metadata = self.metadata
engine = engines.testing_engine(options=dict(implicit_returning=False))
for tname, cname in [
('tb1' * 30, 'abc'),
('tb2', 'abc' * 30),
('tb3' * 30, 'abc' * 30),
('tb4', 'abc'),
]:
t = Table(tname[:57],
metadata,
Column(cname[:57], Integer, primary_key=True)
)
t.create(engine)
r = engine.execute(t.insert())
assert r.inserted_primary_key == [1]
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = postgresql.dialect()
def test_update_returning(self):
dialect = postgresql.dialect()
table1 = table(
'mytable',
column(
'myid', Integer),
column(
'name', String(128)),
column(
'description', String(128)))
u = update(
table1,
values=dict(
name='foo')).returning(
table1.c.myid,
table1.c.name)
self.assert_compile(u,
'UPDATE mytable SET name=%(name)s '
'RETURNING mytable.myid, mytable.name',
dialect=dialect)
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u,
'UPDATE mytable SET name=%(name)s '
'RETURNING mytable.myid, mytable.name, '
'mytable.description', dialect=dialect)
u = update(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(
u,
'UPDATE mytable SET name=%(name)s '
'RETURNING length(mytable.name) AS length_1',
dialect=dialect)
def test_insert_returning(self):
dialect = postgresql.dialect()
table1 = table('mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)),
)
i = insert(
table1,
values=dict(
name='foo')).returning(
table1.c.myid,
table1.c.name)
self.assert_compile(i,
'INSERT INTO mytable (name) VALUES '
'(%(name)s) RETURNING mytable.myid, '
'mytable.name', dialect=dialect)
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i,
'INSERT INTO mytable (name) VALUES '
'(%(name)s) RETURNING mytable.myid, '
'mytable.name, mytable.description',
dialect=dialect)
i = insert(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(i,
'INSERT INTO mytable (name) VALUES '
'(%(name)s) RETURNING length(mytable.name) '
'AS length_1', dialect=dialect)
def test_create_drop_enum(self):
# test escaping and unicode within CREATE TYPE for ENUM
typ = postgresql.ENUM(
"val1", "val2", "val's 3", u('méil'), name="myname")
self.assert_compile(
postgresql.CreateEnumType(typ),
u("CREATE TYPE myname AS "
"ENUM ('val1', 'val2', 'val''s 3', 'méil')"))
typ = postgresql.ENUM(
"val1", "val2", "val's 3", name="PleaseQuoteMe")
self.assert_compile(postgresql.CreateEnumType(typ),
"CREATE TYPE \"PleaseQuoteMe\" AS ENUM "
"('val1', 'val2', 'val''s 3')"
)
def test_generic_enum(self):
e1 = Enum('x', 'y', 'z', name='somename')
e2 = Enum('x', 'y', 'z', name='somename', schema='someschema')
self.assert_compile(postgresql.CreateEnumType(e1),
"CREATE TYPE somename AS ENUM ('x', 'y', 'z')"
)
self.assert_compile(postgresql.CreateEnumType(e2),
"CREATE TYPE someschema.somename AS ENUM "
"('x', 'y', 'z')")
self.assert_compile(postgresql.DropEnumType(e1),
'DROP TYPE somename')
self.assert_compile(postgresql.DropEnumType(e2),
'DROP TYPE someschema.somename')
t1 = Table('sometable', MetaData(), Column('somecolumn', e1))
self.assert_compile(schema.CreateTable(t1),
'CREATE TABLE sometable (somecolumn '
'somename)')
t1 = Table(
'sometable',
MetaData(),
Column(
'somecolumn',
Enum(
'x',
'y',
'z',
native_enum=False)))
self.assert_compile(schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn "
"VARCHAR(1), CHECK (somecolumn IN ('x', "
"'y', 'z')))")
def test_create_table_with_tablespace(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_tablespace='sometablespace')
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) TABLESPACE sometablespace")
def test_create_table_with_tablespace_quoted(self):
# testing quoting of tablespace name
m = MetaData()
tbl = Table(
'anothertable', m, Column("id", Integer),
postgresql_tablespace='table')
self.assert_compile(
schema.CreateTable(tbl),
'CREATE TABLE anothertable (id INTEGER) TABLESPACE "table"')
def test_create_table_inherits(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_inherits='i1')
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS ( i1 )")
def test_create_table_inherits_tuple(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_inherits=('i1', 'i2'))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS ( i1, i2 )")
def test_create_table_inherits_quoting(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_inherits=('Quote Me', 'quote Me Too'))
self.assert_compile(
schema.CreateTable(tbl),
'CREATE TABLE atable (id INTEGER) INHERITS '
'( "Quote Me", "quote Me Too" )')
def test_create_table_with_oids(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_with_oids=True, )
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) WITH OIDS")
tbl2 = Table(
'anothertable', m, Column("id", Integer),
postgresql_with_oids=False)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE anothertable (id INTEGER) WITHOUT OIDS")
def test_create_table_with_oncommit_option(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_on_commit="drop")
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) ON COMMIT DROP")
def test_create_table_with_multiple_options(self):
m = MetaData()
tbl = Table(
'atable', m, Column("id", Integer),
postgresql_tablespace='sometablespace',
postgresql_with_oids=False,
postgresql_on_commit="preserve_rows")
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) WITHOUT OIDS "
"ON COMMIT PRESERVE ROWS TABLESPACE sometablespace")
def test_create_partial_index(self):
m = MetaData()
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
postgresql_where=and_(tbl.c.data > 5, tbl.c.data
< 10))
idx = Index('test_idx1', tbl.c.data,
postgresql_where=and_(tbl.c.data > 5, tbl.c.data
< 10))
# test quoting and all that
idx2 = Index('test_idx2', tbl.c.data,
postgresql_where=and_(tbl.c.data > 'a', tbl.c.data
< "b's"))
self.assert_compile(schema.CreateIndex(idx),
'CREATE INDEX test_idx1 ON testtbl (data) '
'WHERE data > 5 AND data < 10',
dialect=postgresql.dialect())
self.assert_compile(schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=postgresql.dialect())
def test_create_index_with_ops(self):
m = MetaData()
tbl = Table('testtbl', m,
Column('data', String),
Column('data2', Integer, key='d2'))
idx = Index('test_idx1', tbl.c.data,
postgresql_ops={'data': 'text_pattern_ops'})
idx2 = Index('test_idx2', tbl.c.data, tbl.c.d2,
postgresql_ops={'data': 'text_pattern_ops',
'd2': 'int4_ops'})
self.assert_compile(schema.CreateIndex(idx),
'CREATE INDEX test_idx1 ON testtbl '
'(data text_pattern_ops)',
dialect=postgresql.dialect())
self.assert_compile(schema.CreateIndex(idx2),
'CREATE INDEX test_idx2 ON testtbl '
'(data text_pattern_ops, data2 int4_ops)',
dialect=postgresql.dialect())
def test_create_index_with_text_or_composite(self):
m = MetaData()
tbl = Table('testtbl', m,
Column('d1', String),
Column('d2', Integer))
idx = Index('test_idx1', text('x'))
tbl.append_constraint(idx)
idx2 = Index('test_idx2', text('y'), tbl.c.d2)
idx3 = Index(
'test_idx2', tbl.c.d1, text('y'), tbl.c.d2,
postgresql_ops={'d1': 'x1', 'd2': 'x2'}
)
idx4 = Index(
'test_idx2', tbl.c.d1, tbl.c.d2 > 5, text('q'),
postgresql_ops={'d1': 'x1', 'd2': 'x2'}
)
idx5 = Index(
'test_idx2', tbl.c.d1, (tbl.c.d2 > 5).label('g'), text('q'),
postgresql_ops={'d1': 'x1', 'g': 'x2'}
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (x)"
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (y, d2)"
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, y, d2 x2)"
)
# note that at the moment we do not expect the 'd2' op to
# pick up on the "d2 > 5" expression
self.assert_compile(
schema.CreateIndex(idx4),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, (d2 > 5), q)"
)
# however it does work if we label!
self.assert_compile(
schema.CreateIndex(idx5),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, (d2 > 5) x2, q)"
)
def test_create_index_with_using(self):
m = MetaData()
tbl = Table('testtbl', m, Column('data', String))
idx1 = Index('test_idx1', tbl.c.data)
idx2 = Index('test_idx2', tbl.c.data, postgresql_using='btree')
idx3 = Index('test_idx3', tbl.c.data, postgresql_using='hash')
self.assert_compile(schema.CreateIndex(idx1),
'CREATE INDEX test_idx1 ON testtbl '
'(data)',
dialect=postgresql.dialect())
self.assert_compile(schema.CreateIndex(idx2),
'CREATE INDEX test_idx2 ON testtbl '
'USING btree (data)',
dialect=postgresql.dialect())
self.assert_compile(schema.CreateIndex(idx3),
'CREATE INDEX test_idx3 ON testtbl '
'USING hash (data)',
dialect=postgresql.dialect())
def test_create_index_with_with(self):
m = MetaData()
tbl = Table('testtbl', m, Column('data', String))
idx1 = Index('test_idx1', tbl.c.data)
idx2 = Index(
'test_idx2', tbl.c.data, postgresql_with={"fillfactor": 50})
idx3 = Index('test_idx3', tbl.c.data, postgresql_using="gist",
postgresql_with={"buffering": "off"})
self.assert_compile(schema.CreateIndex(idx1),
'CREATE INDEX test_idx1 ON testtbl '
'(data)')
self.assert_compile(schema.CreateIndex(idx2),
'CREATE INDEX test_idx2 ON testtbl '
'(data) '
'WITH (fillfactor = 50)')
self.assert_compile(schema.CreateIndex(idx3),
'CREATE INDEX test_idx3 ON testtbl '
'USING gist (data) '
'WITH (buffering = off)')
def test_create_index_expr_gets_parens(self):
m = MetaData()
tbl = Table('testtbl', m, Column('x', Integer), Column('y', Integer))
idx1 = Index('test_idx1', 5 / (tbl.c.x + tbl.c.y))
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((5 / (x + y)))"
)
def test_create_index_literals(self):
m = MetaData()
tbl = Table('testtbl', m, Column('data', Integer))
idx1 = Index('test_idx1', tbl.c.data + 5)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((data + 5))"
)
def test_create_index_concurrently(self):
m = MetaData()
tbl = Table('testtbl', m, Column('data', Integer))
idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)"
)
def test_exclude_constraint_min(self):
m = MetaData()
tbl = Table('testtbl', m,
Column('room', Integer, primary_key=True))
cons = ExcludeConstraint(('room', '='))
tbl.append_constraint(cons)
self.assert_compile(schema.AddConstraint(cons),
'ALTER TABLE testtbl ADD EXCLUDE USING gist '
'(room WITH =)',
dialect=postgresql.dialect())
def test_exclude_constraint_full(self):
m = MetaData()
room = Column('room', Integer, primary_key=True)
tbl = Table('testtbl', m,
room,
Column('during', TSRANGE))
room = Column('room', Integer, primary_key=True)
cons = ExcludeConstraint((room, '='), ('during', '&&'),
name='my_name',
using='gist',
where="room > 100",
deferrable=True,
initially='immediate')
tbl.append_constraint(cons)
self.assert_compile(schema.AddConstraint(cons),
'ALTER TABLE testtbl ADD CONSTRAINT my_name '
'EXCLUDE USING gist '
'(room WITH =, during WITH ''&&) WHERE '
'(room > 100) DEFERRABLE INITIALLY immediate',
dialect=postgresql.dialect())
def test_exclude_constraint_copy(self):
m = MetaData()
cons = ExcludeConstraint(('room', '='))
tbl = Table('testtbl', m,
Column('room', Integer, primary_key=True),
cons)
# apparently you can't copy a ColumnCollectionConstraint until
# after it has been bound to a table...
cons_copy = cons.copy()
tbl.append_constraint(cons_copy)
self.assert_compile(schema.AddConstraint(cons_copy),
'ALTER TABLE testtbl ADD EXCLUDE USING gist '
'(room WITH =)')
def test_exclude_constraint_text(self):
m = MetaData()
cons = ExcludeConstraint((text('room::TEXT'), '='))
Table(
'testtbl', m,
Column('room', String),
cons)
self.assert_compile(
schema.AddConstraint(cons),
'ALTER TABLE testtbl ADD EXCLUDE USING gist '
'(room::TEXT WITH =)')
def test_exclude_constraint_cast(self):
m = MetaData()
tbl = Table(
'testtbl', m,
Column('room', String)
)
cons = ExcludeConstraint((cast(tbl.c.room, Text), '='))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
'ALTER TABLE testtbl ADD EXCLUDE USING gist '
'(CAST(room AS TEXT) WITH =)'
)
def test_exclude_constraint_cast_quote(self):
m = MetaData()
tbl = Table(
'testtbl', m,
Column('Room', String)
)
cons = ExcludeConstraint((cast(tbl.c.Room, Text), '='))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
'ALTER TABLE testtbl ADD EXCLUDE USING gist '
'(CAST("Room" AS TEXT) WITH =)'
)
def test_substring(self):
self.assert_compile(func.substring('abc', 1, 2),
'SUBSTRING(%(substring_1)s FROM %(substring_2)s '
'FOR %(substring_3)s)')
self.assert_compile(func.substring('abc', 1),
'SUBSTRING(%(substring_1)s FROM %(substring_2)s)')
def test_for_update(self):
table1 = table('mytable',
column('myid'), column('name'), column('description'))
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE")
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(read=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(read=True, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE OF mytable")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(read=True, nowait=True, of=table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(read=True, nowait=True, of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT")
self.assert_compile(
table1.select(table1.c.myid == 7).
with_for_update(read=True, nowait=True,
of=[table1.c.myid, table1.c.name]),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT")
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).
with_for_update(of=[ta.c.myid, ta.c.name]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1 "
"WHERE mytable_1.myid = %(myid_1)s FOR UPDATE OF mytable_1"
)
def test_reserved_words(self):
table = Table("pg_table", MetaData(),
Column("col1", Integer),
Column("variadic", Integer))
x = select([table.c.col1, table.c.variadic])
self.assert_compile(
x,
'''SELECT pg_table.col1, pg_table."variadic" FROM pg_table''')
def test_array(self):
c = Column('x', postgresql.ARRAY(Integer))
self.assert_compile(
cast(c, postgresql.ARRAY(Integer)),
"CAST(x AS INTEGER[])"
)
self.assert_compile(
c[5],
"x[%(x_1)s]",
checkparams={'x_1': 5}
)
self.assert_compile(
c[5:7],
"x[%(x_1)s:%(x_2)s]",
checkparams={'x_2': 7, 'x_1': 5}
)
self.assert_compile(
c[5:7][2:3],
"x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]",
checkparams={'x_2': 7, 'x_1': 5, 'param_1': 2, 'param_2': 3}
)
self.assert_compile(
c[5:7][3],
"x[%(x_1)s:%(x_2)s][%(param_1)s]",
checkparams={'x_2': 7, 'x_1': 5, 'param_1': 3}
)
self.assert_compile(
c.contains([1]),
'x @> %(x_1)s',
checkparams={'x_1': [1]}
)
self.assert_compile(
c.contained_by([2]),
'x <@ %(x_1)s',
checkparams={'x_1': [2]}
)
self.assert_compile(
c.overlap([3]),
'x && %(x_1)s',
checkparams={'x_1': [3]}
)
self.assert_compile(
postgresql.Any(4, c),
'%(param_1)s = ANY (x)',
checkparams={'param_1': 4}
)
self.assert_compile(
c.any(5, operator=operators.ne),
'%(param_1)s != ANY (x)',
checkparams={'param_1': 5}
)
self.assert_compile(
postgresql.All(6, c, operator=operators.gt),
'%(param_1)s > ALL (x)',
checkparams={'param_1': 6}
)
self.assert_compile(
c.all(7, operator=operators.lt),
'%(param_1)s < ALL (x)',
checkparams={'param_1': 7}
)
def _test_array_zero_indexes(self, zero_indexes):
c = Column('x', postgresql.ARRAY(Integer, zero_indexes=zero_indexes))
add_one = 1 if zero_indexes else 0
self.assert_compile(
cast(c, postgresql.ARRAY(Integer, zero_indexes=zero_indexes)),
"CAST(x AS INTEGER[])"
)
self.assert_compile(
c[5],
"x[%(x_1)s]",
checkparams={'x_1': 5 + add_one}
)
self.assert_compile(
c[5:7],
"x[%(x_1)s:%(x_2)s]",
checkparams={'x_2': 7 + add_one, 'x_1': 5 + add_one}
)
self.assert_compile(
c[5:7][2:3],
"x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]",
checkparams={'x_2': 7 + add_one, 'x_1': 5 + add_one,
'param_1': 2 + add_one, 'param_2': 3 + add_one}
)
self.assert_compile(
c[5:7][3],
"x[%(x_1)s:%(x_2)s][%(param_1)s]",
checkparams={'x_2': 7 + add_one, 'x_1': 5 + add_one,
'param_1': 3 + add_one}
)
def test_array_zero_indexes_true(self):
self._test_array_zero_indexes(True)
def test_array_zero_indexes_false(self):
self._test_array_zero_indexes(False)
def test_array_literal_type(self):
is_(postgresql.array([1, 2]).type._type_affinity, postgresql.ARRAY)
is_(postgresql.array([1, 2]).type.item_type._type_affinity, Integer)
is_(postgresql.array([1, 2], type_=String).
type.item_type._type_affinity, String)
def test_array_literal(self):
self.assert_compile(
func.array_dims(postgresql.array([1, 2]) +
postgresql.array([3, 4, 5])),
"array_dims(ARRAY[%(param_1)s, %(param_2)s] || "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s])",
checkparams={'param_5': 5, 'param_4': 4, 'param_1': 1,
'param_3': 3, 'param_2': 2}
)
def test_array_literal_compare(self):
self.assert_compile(
postgresql.array([1, 2]) == [3, 4, 5],
"ARRAY[%(param_1)s, %(param_2)s] = "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]",
checkparams={'param_5': 5, 'param_4': 4, 'param_1': 1,
'param_3': 3, 'param_2': 2}
)
def test_array_literal_insert(self):
m = MetaData()
t = Table('t', m, Column('data', postgresql.ARRAY(Integer)))
self.assert_compile(
t.insert().values(data=array([1, 2, 3])),
"INSERT INTO t (data) VALUES (ARRAY[%(param_1)s, "
"%(param_2)s, %(param_3)s])"
)
def test_update_array_element(self):
m = MetaData()
t = Table('t', m, Column('data', postgresql.ARRAY(Integer)))
self.assert_compile(
t.update().values({t.c.data[5]: 1}),
"UPDATE t SET data[%(data_1)s]=%(param_1)s",
checkparams={'data_1': 5, 'param_1': 1}
)
def test_update_array_slice(self):
m = MetaData()
t = Table('t', m, Column('data', postgresql.ARRAY(Integer)))
self.assert_compile(
t.update().values({t.c.data[2:5]: 2}),
"UPDATE t SET data[%(data_1)s:%(data_2)s]=%(param_1)s",
checkparams={'param_1': 2, 'data_2': 5, 'data_1': 2}
)
def test_from_only(self):
m = MetaData()
tbl1 = Table('testtbl1', m, Column('id', Integer))
tbl2 = Table('testtbl2', m, Column('id', Integer))
stmt = tbl1.select().with_hint(tbl1, 'ONLY', 'postgresql')
expected = 'SELECT testtbl1.id FROM ONLY testtbl1'
self.assert_compile(stmt, expected)
talias1 = tbl1.alias('foo')
stmt = talias1.select().with_hint(talias1, 'ONLY', 'postgresql')
expected = 'SELECT foo.id FROM ONLY testtbl1 AS foo'
self.assert_compile(stmt, expected)
stmt = select([tbl1, tbl2]).with_hint(tbl1, 'ONLY', 'postgresql')
expected = ('SELECT testtbl1.id, testtbl2.id FROM ONLY testtbl1, '
'testtbl2')
self.assert_compile(stmt, expected)
stmt = select([tbl1, tbl2]).with_hint(tbl2, 'ONLY', 'postgresql')
expected = ('SELECT testtbl1.id, testtbl2.id FROM testtbl1, ONLY '
'testtbl2')
self.assert_compile(stmt, expected)
stmt = select([tbl1, tbl2])
stmt = stmt.with_hint(tbl1, 'ONLY', 'postgresql')
stmt = stmt.with_hint(tbl2, 'ONLY', 'postgresql')
expected = ('SELECT testtbl1.id, testtbl2.id FROM ONLY testtbl1, '
'ONLY testtbl2')
self.assert_compile(stmt, expected)
stmt = update(tbl1, values=dict(id=1))
stmt = stmt.with_hint('ONLY', dialect_name='postgresql')
expected = 'UPDATE ONLY testtbl1 SET id=%(id)s'
self.assert_compile(stmt, expected)
stmt = delete(tbl1).with_hint(
'ONLY', selectable=tbl1, dialect_name='postgresql')
expected = 'DELETE FROM ONLY testtbl1'
self.assert_compile(stmt, expected)
tbl3 = Table('testtbl3', m, Column('id', Integer), schema='testschema')
stmt = tbl3.select().with_hint(tbl3, 'ONLY', 'postgresql')
expected = 'SELECT testschema.testtbl3.id FROM '\
'ONLY testschema.testtbl3'
self.assert_compile(stmt, expected)
assert_raises(
exc.CompileError,
tbl3.select().with_hint(tbl3, "FAKE", "postgresql").compile,
dialect=postgresql.dialect()
)
class DistinctOnTest(fixtures.TestBase, AssertsCompiledSQL):
"""Test 'DISTINCT' with SQL expression language and orm.Query with
an emphasis on PG's 'DISTINCT ON' syntax.
"""
__dialect__ = postgresql.dialect()
def setup(self):
self.table = Table('t', MetaData(),
Column('id', Integer, primary_key=True),
Column('a', String),
Column('b', String),
)
def test_plain_generative(self):
self.assert_compile(
select([self.table]).distinct(),
"SELECT DISTINCT t.id, t.a, t.b FROM t"
)
def test_on_columns_generative(self):
self.assert_compile(
select([self.table]).distinct(self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t"
)
def test_on_columns_generative_multi_call(self):
self.assert_compile(
select([self.table]).distinct(self.table.c.a).
distinct(self.table.c.b),
"SELECT DISTINCT ON (t.a, t.b) t.id, t.a, t.b FROM t"
)
def test_plain_inline(self):
self.assert_compile(
select([self.table], distinct=True),
"SELECT DISTINCT t.id, t.a, t.b FROM t"
)
def test_on_columns_inline_list(self):
self.assert_compile(
select([self.table],
distinct=[self.table.c.a, self.table.c.b]).
order_by(self.table.c.a, self.table.c.b),
"SELECT DISTINCT ON (t.a, t.b) t.id, "
"t.a, t.b FROM t ORDER BY t.a, t.b"
)
def test_on_columns_inline_scalar(self):
self.assert_compile(
select([self.table], distinct=self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t"
)
def test_query_plain(self):
sess = Session()
self.assert_compile(
sess.query(self.table).distinct(),
"SELECT DISTINCT t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t"
)
def test_query_on_columns(self):
sess = Session()
self.assert_compile(
sess.query(self.table).distinct(self.table.c.a),
"SELECT DISTINCT ON (t.a) t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t"
)
def test_query_on_columns_multi_call(self):
sess = Session()
self.assert_compile(
sess.query(self.table).distinct(self.table.c.a).
distinct(self.table.c.b),
"SELECT DISTINCT ON (t.a, t.b) t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t"
)
def test_query_on_columns_subquery(self):
sess = Session()
class Foo(object):
pass
mapper(Foo, self.table)
sess = Session()
self.assert_compile(
sess.query(Foo).from_self().distinct(Foo.a, Foo.b),
"SELECT DISTINCT ON (anon_1.t_a, anon_1.t_b) anon_1.t_id "
"AS anon_1_t_id, anon_1.t_a AS anon_1_t_a, anon_1.t_b "
"AS anon_1_t_b FROM (SELECT t.id AS t_id, t.a AS t_a, "
"t.b AS t_b FROM t) AS anon_1"
)
def test_query_distinct_on_aliased(self):
class Foo(object):
pass
mapper(Foo, self.table)
a1 = aliased(Foo)
sess = Session()
self.assert_compile(
sess.query(a1).distinct(a1.a),
"SELECT DISTINCT ON (t_1.a) t_1.id AS t_1_id, "
"t_1.a AS t_1_a, t_1.b AS t_1_b FROM t AS t_1"
)
def test_distinct_on_subquery_anon(self):
sq = select([self.table]).alias()
q = select([self.table.c.id, sq.c.id]).\
distinct(sq.c.id).\
where(self.table.c.id == sq.c.id)
self.assert_compile(
q,
"SELECT DISTINCT ON (anon_1.id) t.id, anon_1.id "
"FROM t, (SELECT t.id AS id, t.a AS a, t.b "
"AS b FROM t) AS anon_1 WHERE t.id = anon_1.id"
)
def test_distinct_on_subquery_named(self):
sq = select([self.table]).alias('sq')
q = select([self.table.c.id, sq.c.id]).\
distinct(sq.c.id).\
where(self.table.c.id == sq.c.id)
self.assert_compile(
q,
"SELECT DISTINCT ON (sq.id) t.id, sq.id "
"FROM t, (SELECT t.id AS id, t.a AS a, "
"t.b AS b FROM t) AS sq WHERE t.id = sq.id"
)
class FullTextSearchTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests for full text searching
"""
__dialect__ = postgresql.dialect()
def setup(self):
self.table = Table('t', MetaData(),
Column('id', Integer, primary_key=True),
Column('title', String),
Column('body', String),
)
self.table_alt = table('mytable',
column('id', Integer),
column('title', String(128)),
column('body', String(128)))
def _raise_query(self, q):
"""
useful for debugging. just do...
self._raise_query(q)
"""
c = q.compile(dialect=postgresql.dialect())
raise ValueError(c)
def test_match_basic(self):
s = select([self.table_alt.c.id])\
.where(self.table_alt.c.title.match('somestring'))
self.assert_compile(s,
'SELECT mytable.id '
'FROM mytable '
'WHERE mytable.title @@ to_tsquery(%(title_1)s)')
def test_match_regconfig(self):
s = select([self.table_alt.c.id]).where(
self.table_alt.c.title.match(
'somestring',
postgresql_regconfig='english')
)
self.assert_compile(
s, 'SELECT mytable.id '
'FROM mytable '
"""WHERE mytable.title @@ to_tsquery('english', %(title_1)s)""")
def test_match_tsvector(self):
s = select([self.table_alt.c.id]).where(
func.to_tsvector(self.table_alt.c.title)
.match('somestring')
)
self.assert_compile(
s, 'SELECT mytable.id '
'FROM mytable '
'WHERE to_tsvector(mytable.title) '
'@@ to_tsquery(%(to_tsvector_1)s)')
def test_match_tsvectorconfig(self):
s = select([self.table_alt.c.id]).where(
func.to_tsvector('english', self.table_alt.c.title)
.match('somestring')
)
self.assert_compile(
s, 'SELECT mytable.id '
'FROM mytable '
'WHERE to_tsvector(%(to_tsvector_1)s, mytable.title) @@ '
'to_tsquery(%(to_tsvector_2)s)')
def test_match_tsvectorconfig_regconfig(self):
s = select([self.table_alt.c.id]).where(
func.to_tsvector('english', self.table_alt.c.title)
.match('somestring', postgresql_regconfig='english')
)
self.assert_compile(
s, 'SELECT mytable.id '
'FROM mytable '
'WHERE to_tsvector(%(to_tsvector_1)s, mytable.title) @@ '
"""to_tsquery('english', %(to_tsvector_2)s)""")
| mit |
vmware/nsxramlclient | tests/vdnConfig.py | 1 | 5003 | # coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
import time
client_session = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)
def test_segment_pools():
### Test Segment ID Pool Operations
# Get all configured Segment Pools
get_segment_resp = client_session.read('vdnSegmentPools')
client_session.view_response(get_segment_resp)
# Add a Segment Pool
segments_create_body = client_session.extract_resource_body_example('vdnSegmentPools', 'create')
client_session.view_body_dict(segments_create_body)
segments_create_body['segmentRange']['begin'] = '11002'
segments_create_body['segmentRange']['end'] = '11003'
segments_create_body['segmentRange']['name'] = 'legacy'
create_response = client_session.create('vdnSegmentPools', request_body_dict=segments_create_body)
client_session.view_response(create_response)
time.sleep(5)
# Update the new Segment Pool:
update_segment_body = client_session.extract_resource_body_example('vdnSegmentPool', 'update')
update_segment_body['segmentRange']['name'] = 'PythonTest'
update_segment_body['segmentRange']['end'] = '11005'
client_session.update('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']},
request_body_dict=update_segment_body)
time.sleep(5)
# Display a specific Segment pool (the new one)
specific_segement_resp = client_session.read('vdnSegmentPool', uri_parameters={'segmentPoolId':
create_response['objectId']})
client_session.view_response(specific_segement_resp)
time.sleep(5)
# Delete new Segment Pool
client_session.delete('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']})
def test_mcast_pools():
### Test Multicast Pool Operations
# Add a multicast Pool
mcastpool_create_body = client_session.extract_resource_body_example('vdnMulticastPools', 'create')
client_session.view_body_dict(mcastpool_create_body)
mcastpool_create_body['multicastRange']['desc'] = 'Test'
mcastpool_create_body['multicastRange']['begin'] = '235.0.0.0'
mcastpool_create_body['multicastRange']['end'] = '235.1.1.1'
mcastpool_create_body['multicastRange']['name'] = 'legacy'
create_response = client_session.create('vdnMulticastPools', request_body_dict=mcastpool_create_body)
client_session.view_response(create_response)
# Get all configured Multicast Pools
get_mcast_pools = client_session.read('vdnMulticastPools')
client_session.view_response(get_mcast_pools)
time.sleep(5)
# Update the newly created mcast pool
mcastpool_update_body = client_session.extract_resource_body_example('vdnMulticastPool', 'update')
mcastpool_update_body['multicastRange']['end'] = '235.3.1.1'
mcastpool_update_body['multicastRange']['name'] = 'Python'
update_response = client_session.update('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']},
request_body_dict=mcastpool_update_body)
client_session.view_response(update_response)
# display a specific Multicast Pool
get_mcast_pool = client_session.read('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']})
client_session.view_response(get_mcast_pool)
# Delete new mcast pool
client_session.delete('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId': create_response['objectId']})
#test_segment_pools()
#test_mcast_pools()
| mit |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/whoosh/analysis/intraword.py | 92 | 18991 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import re
from collections import deque
from whoosh.compat import u, text_type
from whoosh.compat import xrange
from whoosh.analysis.filters import Filter
class CompoundWordFilter(Filter):
"""Given a set of words (or any object with a ``__contains__`` method),
break any tokens in the stream that are composites of words in the word set
into their individual parts.
Given the correct set of words, this filter can break apart run-together
words and trademarks (e.g. "turbosquid", "applescript"). It can also be
useful for agglutinative languages such as German.
The ``keep_compound`` argument lets you decide whether to keep the
compound word in the token stream along with the word segments.
>>> cwf = CompoundWordFilter(wordset, keep_compound=True)
>>> analyzer = RegexTokenizer(r"\S+") | cwf
>>> [t.text for t in analyzer("I do not like greeneggs and ham")
["I", "do", "not", "like", "greeneggs", "green", "eggs", "and", "ham"]
>>> cwf.keep_compound = False
>>> [t.text for t in analyzer("I do not like greeneggs and ham")
["I", "do", "not", "like", "green", "eggs", "and", "ham"]
"""
def __init__(self, wordset, keep_compound=True):
"""
:param wordset: an object with a ``__contains__`` method, such as a
set, containing strings to look for inside the tokens.
:param keep_compound: if True (the default), the original compound
token will be retained in the stream before the subwords.
"""
self.wordset = wordset
self.keep_compound = keep_compound
def subwords(self, s, memo):
if s in self.wordset:
return [s]
if s in memo:
return memo[s]
for i in xrange(1, len(s)):
prefix = s[:i]
if prefix in self.wordset:
suffix = s[i:]
suffix_subs = self.subwords(suffix, memo)
if suffix_subs:
result = [prefix] + suffix_subs
memo[s] = result
return result
return None
def __call__(self, tokens):
keep_compound = self.keep_compound
memo = {}
subwords = self.subwords
for t in tokens:
subs = subwords(t.text, memo)
if subs:
if len(subs) > 1 and keep_compound:
yield t
for subword in subs:
t.text = subword
yield t
else:
yield t
class BiWordFilter(Filter):
"""Merges adjacent tokens into "bi-word" tokens, so that for example::
"the", "sign", "of", "four"
becomes::
"the-sign", "sign-of", "of-four"
This can be used to create fields for pseudo-phrase searching, where if
all the terms match the document probably contains the phrase, but the
searching is faster than actually doing a phrase search on individual word
terms.
The ``BiWordFilter`` is much faster than using the otherwise equivalent
``ShingleFilter(2)``.
"""
def __init__(self, sep="-"):
self.sep = sep
def __call__(self, tokens):
sep = self.sep
prev_text = None
prev_startchar = None
prev_pos = None
atleastone = False
for token in tokens:
# Save the original text of this token
text = token.text
# Save the original position
positions = token.positions
if positions:
ps = token.pos
# Save the original start char
chars = token.chars
if chars:
sc = token.startchar
if prev_text is not None:
# Use the pos and startchar from the previous token
if positions:
token.pos = prev_pos
if chars:
token.startchar = prev_startchar
# Join the previous token text and the current token text to
# form the biword token
token.text = "".join((prev_text, sep, text))
yield token
atleastone = True
# Save the originals and the new "previous" values
prev_text = text
if chars:
prev_startchar = sc
if positions:
prev_pos = ps
# If no bi-words were emitted, that is, the token stream only had
# a single token, then emit that single token.
if not atleastone:
yield token
class ShingleFilter(Filter):
"""Merges a certain number of adjacent tokens into multi-word tokens, so
that for example::
"better", "a", "witty", "fool", "than", "a", "foolish", "wit"
with ``ShingleFilter(3, ' ')`` becomes::
'better a witty', 'a witty fool', 'witty fool than', 'fool than a',
'than a foolish', 'a foolish wit'
This can be used to create fields for pseudo-phrase searching, where if
all the terms match the document probably contains the phrase, but the
searching is faster than actually doing a phrase search on individual word
terms.
If you're using two-word shingles, you should use the functionally
equivalent ``BiWordFilter`` instead because it's faster than
``ShingleFilter``.
"""
def __init__(self, size=2, sep="-"):
self.size = size
self.sep = sep
def __call__(self, tokens):
size = self.size
sep = self.sep
buf = deque()
atleastone = False
def make_token():
tk = buf[0]
tk.text = sep.join([t.text for t in buf])
if tk.chars:
tk.endchar = buf[-1].endchar
return tk
for token in tokens:
if not token.stopped:
buf.append(token.copy())
if len(buf) == size:
atleastone = True
yield make_token()
buf.popleft()
# If no shingles were emitted, that is, the token stream had fewer than
# 'size' tokens, then emit a single token with whatever tokens there
# were
if not atleastone and buf:
yield make_token()
class IntraWordFilter(Filter):
"""Splits words into subwords and performs optional transformations on
subword groups. This filter is funtionally based on yonik's
WordDelimiterFilter in Solr, but shares no code with it.
* Split on intra-word delimiters, e.g. `Wi-Fi` -> `Wi`, `Fi`.
* When splitwords=True, split on case transitions,
e.g. `PowerShot` -> `Power`, `Shot`.
* When splitnums=True, split on letter-number transitions,
e.g. `SD500` -> `SD`, `500`.
* Leading and trailing delimiter characters are ignored.
* Trailing possesive "'s" removed from subwords,
e.g. `O'Neil's` -> `O`, `Neil`.
The mergewords and mergenums arguments turn on merging of subwords.
When the merge arguments are false, subwords are not merged.
* `PowerShot` -> `0`:`Power`, `1`:`Shot` (where `0` and `1` are token
positions).
When one or both of the merge arguments are true, consecutive runs of
alphabetic and/or numeric subwords are merged into an additional token with
the same position as the last sub-word.
* `PowerShot` -> `0`:`Power`, `1`:`Shot`, `1`:`PowerShot`
* `A's+B's&C's` -> `0`:`A`, `1`:`B`, `2`:`C`, `2`:`ABC`
* `Super-Duper-XL500-42-AutoCoder!` -> `0`:`Super`, `1`:`Duper`, `2`:`XL`,
`2`:`SuperDuperXL`,
`3`:`500`, `4`:`42`, `4`:`50042`, `5`:`Auto`, `6`:`Coder`,
`6`:`AutoCoder`
When using this filter you should use a tokenizer that only splits on
whitespace, so the tokenizer does not remove intra-word delimiters before
this filter can see them, and put this filter before any use of
LowercaseFilter.
>>> rt = RegexTokenizer(r"\\S+")
>>> iwf = IntraWordFilter()
>>> lcf = LowercaseFilter()
>>> analyzer = rt | iwf | lcf
One use for this filter is to help match different written representations
of a concept. For example, if the source text contained `wi-fi`, you
probably want `wifi`, `WiFi`, `wi-fi`, etc. to match. One way of doing this
is to specify mergewords=True and/or mergenums=True in the analyzer used
for indexing, and mergewords=False / mergenums=False in the analyzer used
for querying.
>>> iwf_i = IntraWordFilter(mergewords=True, mergenums=True)
>>> iwf_q = IntraWordFilter(mergewords=False, mergenums=False)
>>> iwf = MultiFilter(index=iwf_i, query=iwf_q)
>>> analyzer = RegexTokenizer(r"\S+") | iwf | LowercaseFilter()
(See :class:`MultiFilter`.)
"""
is_morph = True
__inittypes__ = dict(delims=text_type, splitwords=bool, splitnums=bool,
mergewords=bool, mergenums=bool)
def __init__(self, delims=u("-_'\"()!@#$%^&*[]{}<>\|;:,./?`~=+"),
splitwords=True, splitnums=True,
mergewords=False, mergenums=False):
"""
:param delims: a string of delimiter characters.
:param splitwords: if True, split at case transitions,
e.g. `PowerShot` -> `Power`, `Shot`
:param splitnums: if True, split at letter-number transitions,
e.g. `SD500` -> `SD`, `500`
:param mergewords: merge consecutive runs of alphabetic subwords into
an additional token with the same position as the last subword.
:param mergenums: merge consecutive runs of numeric subwords into an
additional token with the same position as the last subword.
"""
from whoosh.support.unicode import digits, lowercase, uppercase
self.delims = re.escape(delims)
# Expression for text between delimiter characters
self.between = re.compile(u("[^%s]+") % (self.delims,), re.UNICODE)
# Expression for removing "'s" from the end of sub-words
dispat = u("(?<=[%s%s])'[Ss](?=$|[%s])") % (lowercase, uppercase,
self.delims)
self.possessive = re.compile(dispat, re.UNICODE)
# Expression for finding case and letter-number transitions
lower2upper = u("[%s][%s]") % (lowercase, uppercase)
letter2digit = u("[%s%s][%s]") % (lowercase, uppercase, digits)
digit2letter = u("[%s][%s%s]") % (digits, lowercase, uppercase)
if splitwords and splitnums:
splitpat = u("(%s|%s|%s)") % (lower2upper, letter2digit,
digit2letter)
self.boundary = re.compile(splitpat, re.UNICODE)
elif splitwords:
self.boundary = re.compile(text_type(lower2upper), re.UNICODE)
elif splitnums:
numpat = u("(%s|%s)") % (letter2digit, digit2letter)
self.boundary = re.compile(numpat, re.UNICODE)
self.splitting = splitwords or splitnums
self.mergewords = mergewords
self.mergenums = mergenums
def __eq__(self, other):
return other and self.__class__ is other.__class__\
and self.__dict__ == other.__dict__
def _split(self, string):
bound = self.boundary
# Yields (startchar, endchar) pairs for each indexable substring in
# the given string, e.g. "WikiWord" -> (0, 4), (4, 8)
# Whether we're splitting on transitions (case changes, letter -> num,
# num -> letter, etc.)
splitting = self.splitting
# Make a list (dispos, for "dispossessed") of (startchar, endchar)
# pairs for runs of text between "'s"
if "'" in string:
# Split on possessive 's
dispos = []
prev = 0
for match in self.possessive.finditer(string):
dispos.append((prev, match.start()))
prev = match.end()
if prev < len(string):
dispos.append((prev, len(string)))
else:
# Shortcut if there's no apostrophe in the string
dispos = ((0, len(string)),)
# For each run between 's
for sc, ec in dispos:
# Split on boundary characters
for part_match in self.between.finditer(string, sc, ec):
part_start = part_match.start()
part_end = part_match.end()
if splitting:
# The point to start splitting at
prev = part_start
# Find transitions (e.g. "iW" or "a0")
for bmatch in bound.finditer(string, part_start, part_end):
# The point in the middle of the transition
pivot = bmatch.start() + 1
# Yield from the previous match to the transition
yield (prev, pivot)
# Make the transition the new starting point
prev = pivot
# If there's leftover text at the end, yield it too
if prev < part_end:
yield (prev, part_end)
else:
# Not splitting on transitions, just yield the part
yield (part_start, part_end)
def _merge(self, parts):
mergewords = self.mergewords
mergenums = self.mergenums
# Current type (1=alpah, 2=digit)
last = 0
# Where to insert a merged term in the original list
insertat = 0
# Buffer for parts to merge
buf = []
# Iterate on a copy of the parts list so we can modify the original as
# we go
def insert_item(buf, at, newpos):
newtext = "".join(item[0] for item in buf)
newsc = buf[0][2] # start char of first item in buffer
newec = buf[-1][3] # end char of last item in buffer
parts.insert(insertat, (newtext, newpos, newsc, newec))
for item in list(parts):
# item = (text, pos, startchar, endchar)
text = item[0]
pos = item[1]
# Set the type of this part
if text.isalpha():
this = 1
elif text.isdigit():
this = 2
else:
this = None
# Is this the same type as the previous part?
if (buf and (this == last == 1 and mergewords)
or (this == last == 2 and mergenums)):
# This part is the same type as the previous. Add it to the
# buffer of parts to merge.
buf.append(item)
else:
# This part is different than the previous.
if len(buf) > 1:
# If the buffer has at least two parts in it, merge them
# and add them to the original list of parts.
insert_item(buf, insertat, pos - 1)
insertat += 1
# Reset the buffer
buf = [item]
last = this
insertat += 1
# If there are parts left in the buffer at the end, merge them and add
# them to the original list.
if len(buf) > 1:
insert_item(buf, len(parts), pos)
def __call__(self, tokens):
mergewords = self.mergewords
mergenums = self.mergenums
# This filter renumbers tokens as it expands them. New position
# counter.
newpos = None
for t in tokens:
text = t.text
# If this is the first token we've seen, use it to set the new
# position counter
if newpos is None:
if t.positions:
newpos = t.pos
else:
# Token doesn't have positions, just use 0
newpos = 0
if ((text.isalpha() and (text.islower() or text.isupper()))
or text.isdigit()):
# Short-circuit the common cases of no delimiters, no case
# transitions, only digits, etc.
t.pos = newpos
yield t
newpos += 1
else:
# Split the token text on delimiters, word and/or number
# boundaries into a list of (text, pos, startchar, endchar)
# tuples
ranges = self._split(text)
parts = [(text[sc:ec], i + newpos, sc, ec)
for i, (sc, ec) in enumerate(ranges)]
# Did the split yield more than one part?
if len(parts) > 1:
# If the options are set, merge consecutive runs of all-
# letters and/or all-numbers.
if mergewords or mergenums:
self._merge(parts)
# Yield tokens for the parts
chars = t.chars
if chars:
base = t.startchar
for text, pos, startchar, endchar in parts:
t.text = text
t.pos = pos
if t.chars:
t.startchar = base + startchar
t.endchar = base + endchar
yield t
if parts:
# Set the new position counter based on the last part
newpos = parts[-1][1] + 1
| apache-2.0 |
waytai/odoo | addons/l10n_pe/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
boto/botoflow | botoflow/history_events/event_bases.py | 2 | 1634 | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
class EventBase(object):
def __init__(self, event_id, datetime, attributes):
"""
:param event_id: event id
:type event_id: int
:param datetime: datetime of event
:type datetime: datetime.datetime
:param attributes: event attributes
:type attributes: dict
"""
self.id = event_id
self.datetime = datetime
self.attributes = attributes
def __repr__(self):
return "<{0} id={1}, time={2}, attributes={3}>".format(
self.__class__.__name__, self.id, self.datetime,
self.attributes)
class ActivityEventBase(EventBase):
pass
class ChildWorkflowEventBase(EventBase):
pass
class DecisionEventBase(EventBase):
"""
To be used as a mixin with events that represent decisions
"""
pass
class DecisionTaskEventBase(EventBase):
pass
class ExternalWorkflowEventBase(EventBase):
pass
class MarkerEventBase(EventBase):
pass
class TimerEventBase(EventBase):
pass
class WorkflowEventBase(EventBase):
pass
| apache-2.0 |
bjzhang/xen | tools/python/xen/xend/server/BlktapController.py | 26 | 10719 | # Copyright (c) 2005, XenSource Ltd.
import string, re, os
from xen.xend.server.blkif import BlkifController
from xen.xend.XendLogging import log
from xen.util.xpopen import xPopen3
phantomDev = 0;
phantomId = 0;
blktap1_disk_types = [
'aio',
'sync',
'vmdk',
'ram',
'qcow',
'qcow2',
'ioemu',
]
blktap2_disk_types = [
'aio',
'ram',
'qcow',
'vhd',
'remus',
]
blktap_disk_types = blktap1_disk_types + blktap2_disk_types
def doexec(args, inputtext=None):
"""Execute a subprocess, then return its return code, stdout and stderr"""
proc = xPopen3(args, True)
if inputtext != None:
proc.tochild.write(inputtext)
stdout = proc.fromchild
stderr = proc.childerr
rc = proc.wait()
return (rc,stdout,stderr)
# blktap1 device controller
class BlktapController(BlkifController):
def __init__(self, vm):
BlkifController.__init__(self, vm)
def frontendRoot(self):
"""@see DevController#frontendRoot"""
return "%s/device/vbd" % self.vm.getDomainPath()
def getDeviceDetails(self, config):
(devid, back, front) = BlkifController.getDeviceDetails(self, config)
phantomDevid = 0
wrapped = False
try:
imagetype = self.vm.info['image']['type']
except:
imagetype = ""
if imagetype == 'hvm':
tdevname = back['dev']
index = ['c', 'd', 'e', 'f', 'g', 'h', 'i', \
'j', 'l', 'm', 'n', 'o', 'p']
while True:
global phantomDev
global phantomId
import os, stat
phantomId = phantomId + 1
if phantomId == 16:
if index[phantomDev] == index[-1]:
if wrapped:
raise VmError(" No loopback block \
devices are available. ")
wrapped = True
phantomDev = 0
else:
phantomDev = phantomDev + 1
phantomId = 1
devname = 'xvd%s%d' % (index[phantomDev], phantomId)
try:
info = os.stat('/dev/%s' % devname)
except:
break
vbd = { 'mode': 'w', 'device': devname }
fn = 'tap:%s' % back['params']
# recurse ... by creating the vbd, then fallthrough
# and finish creating the original device
from xen.xend import XendDomain
dom0 = XendDomain.instance().privilegedDomain()
phantomDevid = dom0.create_phantom_vbd_with_vdi(vbd, fn)
# we need to wait for this device at a higher level
# the vbd that gets created will have a link to us
# and will let them do it there
# add a hook to point to the phantom device,
# root path is always the same (dom0 tap)
if phantomDevid != 0:
front['phantom_vbd'] = '/local/domain/0/backend/tap/0/%s' \
% str(phantomDevid)
return (devid, back, front)
class Blktap2Controller(BlktapController):
def __init__(self, vm):
BlktapController.__init__(self, vm)
def backendPath(self, backdom, devid):
if self.deviceClass == 'tap2':
deviceClass = 'vbd'
else:
deviceClass = 'tap'
return "%s/backend/%s/%s/%d" % (backdom.getDomainPath(),
deviceClass,
self.vm.getDomid(), devid)
def getDeviceDetails(self, config):
(devid, back, front) = BlktapController.getDeviceDetails(self, config)
if self.deviceClass == 'tap2':
# since blktap2 uses blkback as a backend the 'params' feild contains
# the path to the blktap2 device (/dev/xen/blktap-2/tapdev*). As well,
# we need to store the params used to create the blktap2 device
# (tap:tapdisk:<driver>:/<image-path>)
tapdisk_uname = config.get('tapdisk_uname', '')
(_, tapdisk_params) = string.split(tapdisk_uname, ':', 1)
back['tapdisk-params'] = tapdisk_params
return (devid, back, front)
def getDeviceConfiguration(self, devid, transaction = None):
# this is a blktap2 device, so we need to overwrite the 'params' feild
# with the actual blktap2 parameters. (the vbd parameters are of little
# use to us)
config = BlktapController.getDeviceConfiguration(self, devid, transaction)
if transaction is None:
tapdisk_params = self.readBackend(devid, 'tapdisk-params')
else:
tapdisk_params = self.readBackendTxn(transaction, devid, 'tapdisk-params')
if tapdisk_params:
config['uname'] = 'tap:' + tapdisk_params
return config
def createDevice(self, config):
uname = config.get('uname', '')
try:
(typ, subtyp, params, file) = string.split(uname, ':', 3)
if subtyp not in ('tapdisk', 'ioemu'):
raise ValueError('invalid subtype')
except:
(typ, params, file) = string.split(uname, ':', 2)
subtyp = 'tapdisk'
if typ in ('tap'):
if subtyp in ('tapdisk', 'ioemu'):
if params not in blktap2_disk_types or \
TapdiskController.check():
# pass this device off to BlktapController
log.warn('WARNING: using deprecated blktap module')
self.deviceClass = 'tap'
devid = BlktapController.createDevice(self, config)
self.deviceClass = 'tap2'
return devid
device = TapdiskController.create(params, file)
# modify the configutration to create a blkback for the underlying
# blktap2 device. Note: we need to preserve the original tapdisk uname
# (it is used during save/restore and for managed domains).
config.update({'tapdisk_uname' : uname})
config.update({'uname' : 'phy:' + device.rstrip()})
devid = BlkifController.createDevice(self, config)
config.update({'uname' : uname})
config.pop('tapdisk_uname')
return devid
# This function is called from a thread when the
# domain is detached from the disk.
def finishDeviceCleanup(self, backpath, path):
"""Perform any device specific cleanup
@backpath backend xenstore path.
@path frontend device path
"""
#Figure out what we're going to wait on.
self.waitForBackend_destroy(backpath)
TapdiskController.destroy(path)
class TapdiskException(Exception):
pass
class TapdiskController(object):
'''class which encapsulates all tapdisk control operations'''
TAP_CTL = 'tap-ctl'
TAP_DEV = '/dev/xen/blktap-2/tapdev'
class Tapdisk(object):
def __init__(self, pid=None, minor=-1, state=None,
dtype='', image=None, device=None):
self.pid = pid
self.minor = minor
self.state = state
self.dtype = dtype
self.image = image
self.device = device
def __str__(self):
return 'image=%s pid=%s minor=%s state=%s type=%s device=%s' \
% (self.image, self.pid, self.minor, self.state, self.dtype,
self.device)
@staticmethod
def exc(*args):
rc, stdout, stderr = doexec([TapdiskController.TAP_CTL] + list(args))
out, err = stdout.read().strip(), stderr.read().strip()
stdout.close()
stderr.close()
if rc:
raise TapdiskException('%s failed (%s %s %s)' % \
(args, rc, out, err))
return out
@staticmethod
def check():
try:
TapdiskController.exc('check')
return 0
except Exception, e:
log.warn("tapdisk2 check failed: %s" % e)
return -1
@staticmethod
def list():
tapdisks = []
_list = TapdiskController.exc('list')
if not _list: return []
for line in _list.splitlines():
tapdisk = TapdiskController.Tapdisk()
# Since 'tap-ctl list' does not escape blanks in the path, hard-code the current format using 4 pairs to prevent splitting the path
for pair in line.split(None, 3):
key, value = pair.split('=', 1)
if key == 'pid':
tapdisk.pid = value
elif key == 'minor':
tapdisk.minor = int(value)
if tapdisk.minor >= 0:
tapdisk.device = '%s%s' % \
(TapdiskController.TAP_DEV, tapdisk.minor)
elif key == 'state':
tapdisk.state = value
elif key == 'args' and value.find(':') != -1:
tapdisk.dtype, tapdisk.image = value.split(':', 1)
tapdisks.append(tapdisk)
return tapdisks
@staticmethod
def fromDevice(device):
if device.startswith(TapdiskController.TAP_DEV):
minor = os.minor(os.stat(device).st_rdev)
tapdisks = filter(lambda x: x.minor == minor,
TapdiskController.list())
if len(tapdisks) == 1:
return tapdisks[0]
return None
@staticmethod
def create(dtype, image):
return TapdiskController.exc('create', '-a%s:%s' % (dtype, image))
@staticmethod
def destroy(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk:
if tapdisk.pid:
TapdiskController.exc('destroy',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
else:
TapdiskController.exc('free', '-m%s' % tapdisk.minor)
@staticmethod
def pause(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk and tapdisk.pid:
TapdiskController.exc('pause',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
@staticmethod
def unpause(device):
tapdisk = TapdiskController.fromDevice(device)
if tapdisk and tapdisk.pid:
TapdiskController.exc('unpause',
'-p%s' % tapdisk.pid,
'-m%s' % tapdisk.minor)
| gpl-2.0 |
SuperTaiyaki/paifu-tools | paifu.py | 1 | 3158 | import sys
import tenhou
tilelist = []
for x in range(0, 9):
# tilelist.append(Image(source="%sm.gif" % (x+1)))
tilelist.append("images/%sm.gif" % (x+1))
for x in range(0, 9):
# tilelist.append(Image(source="%sp.gif" % (x+1)))
tilelist.append("images/%sp.gif" % (x+1))
for x in range(0, 9):
# tilelist.append(Image(source="%ss.gif" % (x+1)))
tilelist.append("images/%ss.gif" % (x+1))
for x in range(0, 8):
# tilelist.append(Image(source="%sz.gif" % (x+1)))
tilelist.append("images/%sz.gif" % (x+1))
img_arrow = "images/arrow.gif"
img_blank = "images/blank.gif"
TILE_ARROW = -2
TILE_BLANK = -1
def tile_image(tile):
# Handle red 5s
# 4, 13, 22?
if tile == TILE_ARROW:
return img_arrow
elif tile == TILE_BLANK:
return img_blank
else:
return tilelist[tile / 4]
# Don't mix in all the debug noise from the tenhou module
f = open("paifu.html", "w")
def output(x):
f.write(x)
iter = tenhou.run_log(sys.stdin)
iter.next() # Start the game
game = iter.next()
draws = [[], [], [], []]
discards = [[], [], [], []]
events = [[], [], [], []]
initial_hands = [list(player.hand) for player in game.players]
player = -1
if game.event != 'draw':
print "ERROR: First event isn't a draw! NFI what's going on. Event: %s" % game.event
exit
initial_hands[game.player].append(game.tile)
draws[game.player].append(TILE_BLANK)
player = game.player
# Wonder if there's a cleaner way to do this...
map(lambda x: x.sort(), initial_hands)
def fill_blanks(new_player):
global player
player = (player + 1) % 4
while player != new_player:
print "Stuffing blank into player %s" % player
draws[player].append(TILE_BLANK)
discards[player].append(TILE_BLANK)
player = (player + 1) % 4
# Main loop goes here
# Waiting for 'deal' doesn't work - there needs to be a trigger on agari, to
# capture finishing hands
while game.event != 'deal':
game = iter.next()
print "Player: %s" % player
if game.event == 'draw':
draws[game.player].append(game.tile)
player = game.player
elif game.event == 'discard':
discards[game.player].append(game.tile)
if discards[game.player][-1] == draws[game.player][-1]:
draws[game.player][-1] = TILE_ARROW
player = game.player
elif game.event == 'pon' or game.event == 'chi' or game.event == 'kan':
print "Previous player: %s" % player
print "Event player: %s" % game.player
fill_blanks(game.player)
player = game.player
draws[game.player].append(game.tile)
print game.tile
print ("Tile %s attached to player %s's draws" %
(tenhou.tile_decode(game.tile), game.player))
output("""<html>
<head></head>
<body>
<table>""")
for player in range(0,4):
output("<tr>")
output("<td>Initial hand:</td><td> ")
for tile in initial_hands[player]:
output('<img src="%s" />\n' % tile_image(tile))
output("</td></tr><td>Draws: </td><td>")
for tile in draws[player]:
output('<img src="%s" />\n' % tile_image(tile))
output("</td></tr><tr><td>Discards:</td><td>")
for tile in discards[player]:
output('<img src="%s" />\n' % tile_image(tile))
output("</td></tr><tr><td colspan='2'><hr /></td></tr>\n")
output("""</table></body></html>""")
| gpl-3.0 |
gdkar/pyglet | pyglet/lib.py | 20 | 12493 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Functions for loading dynamic libraries.
These extend and correct ctypes functions.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import re
import sys
import ctypes
import ctypes.util
import pyglet
_debug_lib = pyglet.options['debug_lib']
_debug_trace = pyglet.options['debug_trace']
if pyglet.options['search_local_libs']:
script_path = pyglet.resource.get_script_home()
_local_lib_paths = [script_path, os.path.join(script_path, 'lib'),]
else:
_local_lib_paths = None
class _TraceFunction(object):
def __init__(self, func):
self.__dict__['_func'] = func
def __str__(self):
return self._func.__name__
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._func, name)
def __setattr__(self, name, value):
setattr(self._func, name, value)
class _TraceLibrary(object):
def __init__(self, library):
self._library = library
print library
def __getattr__(self, name):
func = getattr(self._library, name)
f = _TraceFunction(func)
return f
class LibraryLoader(object):
darwin_not_found_error = "image not found"
linux_not_found_error = "No such file or directory"
def load_library(self, *names, **kwargs):
'''Find and load a library.
More than one name can be specified, they will be tried in order.
Platform-specific library names (given as kwargs) are tried first.
Raises ImportError if library is not found.
'''
if 'framework' in kwargs and self.platform == 'darwin':
return self.load_framework(kwargs['framework'])
if not names:
raise ImportError("No library name specified")
platform_names = kwargs.get(self.platform, [])
if type(platform_names) in (str, unicode):
platform_names = [platform_names]
elif type(platform_names) is tuple:
platform_names = list(platform_names)
if self.platform.startswith('linux'):
for name in names:
libname = self.find_library(name)
platform_names.append(libname or 'lib%s.so' % name)
platform_names.extend(names)
for name in platform_names:
try:
lib = ctypes.cdll.LoadLibrary(name)
if _debug_lib:
print name
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError, o:
if ((self.platform == "win32" and o.winerror != 126) or
(self.platform.startswith("linux") and
self.linux_not_found_error not in o.args[0]) or
(self.platform == "darwin" and
self.darwin_not_found_error not in o.args[0])):
print "Unexpected error loading library %s: %s" % (name, str(o))
raise
path = self.find_library(name)
if path:
try:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print path
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError:
pass
raise ImportError('Library "%s" not found.' % names[0])
find_library = lambda self, name: ctypes.util.find_library(name)
platform = pyglet.compat_platform
# this is only for library loading, don't include it in pyglet.platform
if platform == 'cygwin':
platform = 'win32'
def load_framework(self, path):
raise RuntimeError("Can't load framework on this platform.")
class MachOLibraryLoader(LibraryLoader):
def __init__(self):
if 'LD_LIBRARY_PATH' in os.environ:
self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
else:
self.ld_library_path = []
if _local_lib_paths:
# search first for local libs
self.ld_library_path = _local_lib_paths + self.ld_library_path
os.environ['LD_LIBRARY_PATH'] = ':'.join(self.ld_library_path)
if 'DYLD_LIBRARY_PATH' in os.environ:
self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
else:
self.dyld_library_path = []
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
self.dyld_fallback_library_path = \
os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
else:
self.dyld_fallback_library_path = [
os.path.expanduser('~/lib'),
'/usr/local/lib',
'/usr/lib']
def find_library(self, path):
'''Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
'''
libname = os.path.basename(path)
search_path = []
if '.' not in libname:
libname = 'lib' + libname + '.dylib'
# py2app support
if (hasattr(sys, 'frozen') and sys.frozen == 'macosx_app' and
'RESOURCEPATH' in os.environ):
search_path.append(os.path.join(
os.environ['RESOURCEPATH'],
'..',
'Frameworks',
libname))
# pyinstaller.py sets sys.frozen to True, and puts dylibs in
# Contents/MacOS, which path pyinstaller puts in sys._MEIPASS
if (hasattr(sys, 'frozen') and hasattr(sys, '_MEIPASS') and
sys.frozen == True and pyglet.compat_platform == 'darwin'):
search_path.append(os.path.join(sys._MEIPASS, libname))
if '/' in path:
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
else:
search_path.extend(
[os.path.join(p, libname) \
for p in self.ld_library_path])
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_library_path])
search_path.append(path)
search_path.extend(
[os.path.join(p, libname) \
for p in self.dyld_fallback_library_path])
for path in search_path:
if os.path.exists(path):
return path
return None
def find_framework(self, path):
'''Implement runtime framework search as described by:
http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html
'''
# e.g. path == '/System/Library/Frameworks/OpenGL.framework'
# name == 'OpenGL'
# return '/System/Library/Frameworks/OpenGL.framework/OpenGL'
name = os.path.splitext(os.path.split(path)[1])[0]
realpath = os.path.join(path, name)
if os.path.exists(realpath):
return realpath
for dir in ('/Library/Frameworks',
'/System/Library/Frameworks'):
realpath = os.path.join(dir, '%s.framework' % name, name)
if os.path.exists(realpath):
return realpath
return None
def load_framework(self, path):
realpath = self.find_framework(path)
if realpath:
lib = ctypes.cdll.LoadLibrary(realpath)
if _debug_lib:
print realpath
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
raise ImportError("Can't find framework %s." % path)
class LinuxLibraryLoader(LibraryLoader):
_ld_so_cache = None
_local_libs_cache = None
def _find_libs(self, directories):
cache = {}
lib_re = re.compile('lib(.*)\.so(?:$|\.)')
for dir in directories:
try:
for file in os.listdir(dir):
match = lib_re.match(file)
if match:
# Index by filename
path = os.path.join(dir, file)
if file not in cache:
cache[file] = path
# Index by library name
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
return cache
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
try:
directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
except KeyError:
pass
try:
with open('/etc/ld.so.conf') as fid:
directories.extend([dir.strip() for dir in fid])
except IOError:
pass
directories.extend(['/lib', '/usr/lib'])
self._ld_so_cache = self._find_libs(directories)
def find_library(self, path):
# search first for local libs
if _local_lib_paths:
if not self._local_libs_cache:
self._local_libs_cache = self._find_libs(_local_lib_paths)
if path in self._local_libs_cache:
return self._local_libs_cache[path]
# ctypes tries ldconfig, gcc and objdump. If none of these are
# present, we implement the ld-linux.so search path as described in
# the man page.
result = ctypes.util.find_library(path)
if result:
return result
if self._ld_so_cache is None:
self._create_ld_so_cache()
return self._ld_so_cache.get(path)
if pyglet.compat_platform == 'darwin':
loader = MachOLibraryLoader()
elif pyglet.compat_platform.startswith('linux'):
loader = LinuxLibraryLoader()
else:
loader = LibraryLoader()
load_library = loader.load_library
| bsd-3-clause |
lpostema/gb | mpg.py | 1 | 5041 | import xml.etree.ElementTree as ET
import os, shutil
import datetime
import openpyxl
tree = ET.parse('Meeting Plan Generator.xml')
root = tree.getroot()
from PyPDF2.PyPDF2.pdf import PdfFileReader
while 1:
today = datetime.date.today()
daysToNextWednesday = datetime.timedelta((2 - datetime.date.weekday(today)) % 7)
nextWednesday = today + daysToNextWednesday
dateString = input("What date do you want to create a plan for? [YYYY-MM-DD format] (Press Enter for " + str(nextWednesday) + ")")
if dateString == "":
break
try:
nextWednesday = datetime.datetime.strptime(dateString, '%Y-%m-%d')
except ValueError:
print("Invalid date. Please use YYYY-MM-DD format.")
continue
break
nextWednesdayMonth = str(nextWednesday.month) if (nextWednesday.month >= 10) else '0' + str(nextWednesday.month)
nextWednesdayDay = str(nextWednesday.day) if (nextWednesday.day >=10) else '0' + str(nextWednesday.day)
wednesdaysDate = str(nextWednesday.year) + '-' + nextWednesdayMonth + '-' + nextWednesdayDay
lessonFile = open('..\\2015-16 Gopher Buddies Bible Study.pdf', 'rb')
lessonPDF = PdfFileReader(lessonFile)
""" TOCpage = lessonPDF.getPage(3)
TOCstring = TOCpage.extractText()
print(TOCstring)"""
shutil.copyfile('Meeting Planner.xlsx', wednesdaysDate + '\Meeting Planner.xlsx')
yearwb = openpyxl.load_workbook('..\\2015 Lesson Plan.xlsx', data_only=True )
yearwb_formula = openpyxl.load_workbook('..\\2015 Lesson Plan.xlsx', data_only=False)
calendarws = yearwb.get_sheet_by_name("Calendar")
calendarws_formula = yearwb_formula.get_sheet_by_name("Calendar")
versesws = yearwb.get_sheet_by_name("Verses")
meetingDates = tuple(calendarws.iter_rows('A2:A39'))
for meetingDate in meetingDates:
if meetingDate[0].value.date() == nextWednesday:
break
meetingDate = meetingDate[0]
nextWednesdayRow = meetingDate.row
nextWednesdayLesson = calendarws[('C' + str(nextWednesdayRow))].value
#find lesson in pdf
for i in range(15, 363):
pdfpage = lessonPDF.getPage(i)
pdfstring = pdfpage.extractText()
if nextWednesdayLesson in pdfstring:
lessonStartPage = i+1
break
#find range of pages
pdfstring = pdfstring[:pdfstring.rfind('\n')] #string page number off
pdfLessonString = pdfstring[pdfstring.rfind('\n'):]
for i in range(lessonStartPage+1, 363):
pdfpage = lessonPDF.getPage(i)
pdfstring = pdfpage.extractText()
if pdfLessonString not in pdfstring:
lessonEndPage = i
break
for plugin in root.findall('plugin'):
for filename in plugin.findall('filelist'):
for file in filename.iter():
if 'name' in file.attrib.keys():
startIndex = file.attrib['name'].find('change-date')
if startIndex != -1:
newString = file.attrib['name'][:startIndex]
newString += wednesdaysDate
newString += '\\Meeting Planner.pdf'
file.set('name', newString)
if 'pageselection' in file.attrib.keys():
startIndex = file.attrib['pageselection'].find('change-page')
if startIndex != -1:
newString = str(lessonStartPage) + '-' + str(lessonEndPage)
file.set('pageselection', newString)
for destination in plugin.findall('destination'):
#print(destination.tag, destination.attrib)
if 'value' in destination.attrib.keys():
startIndex = destination.attrib['value'].find('change-date')
if startIndex != -1:
newString = destination.attrib['value'][:startIndex]
newString += wednesdaysDate
newString += '\\Full Meeting Plan.pdf'
destination.set('value', newString)
try:
os.mkdir(wednesdaysDate)
#print("Directory does not exist yet.")
except FileExistsError:
print("Directory already exists.")
tree.write(wednesdaysDate + '\Meeting Plan Generator 2.xml')
plannerwb = openpyxl.load_workbook(wednesdaysDate + '\\Meeting Planner.xlsx')
meetingws = plannerwb.get_sheet_by_name('Meeting Overview')
meetingws['E10'] = nextWednesdayLesson
meetingws['G1'] = nextWednesday
# find theme
nextWednesdayThemeRow = nextWednesdayRow
nextWednesdayTheme = calendarws['D' + str(nextWednesdayThemeRow)].value
while(nextWednesdayTheme == None):
nextWednesdayThemeRow -= 1
nextWednesdayTheme = calendarws['D' + str(nextWednesdayThemeRow)].value
# put verse in cell 'c7'
verseRow = calendarws_formula['E' + str(nextWednesdayThemeRow)].value[(calendarws_formula['E' + str(nextWednesdayThemeRow)].value).index('B')+1:]
meetingws['C7'] = versesws['E' + verseRow].value
# put verse song in cell 'd8'
meetingws['D8'] = versesws['D' + verseRow].value
# make number format different for date
meetingws['G1'].number_format = '[$-409]mmmm\\ d\\,\\ yyyy;@'
lessonFile.close()
try:
plannerwb.save(wednesdaysDate + '\\Meeting Planner2.xlsx')
except:
print("Please close the meeting worksheet. ")
| mit |
yl565/statsmodels | statsmodels/base/optimizer.py | 12 | 23025 | """
Functions that are general enough to use for any model fitting. The idea is
to untie these from LikelihoodModel so that they may be re-used generally.
"""
from __future__ import print_function
import distutils.version
from scipy import __version__ as scipy_version
import numpy as np
from scipy import optimize
def _check_method(method, methods):
if method not in methods:
message = "Unknown fit method %s" % method
raise ValueError(message)
class Optimizer(object):
def _fit(self, objective, gradient, start_params, fargs, kwargs,
hessian=None, method='newton', maxiter=100, full_output=True,
disp=True, callback=None, retall=False):
"""
Fit function for any model with an objective function.
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping'}
Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,
'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified
Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-
conjugate gradient or 'basinhopping' for global basin-hopping
solver, if available. `method` determines which solver from
scipy.optimize is used. The explicit arguments in `fit` are passed
to the solver, with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports..
maxiter : int
The maximum number of iterations to perform.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
fargs : tuple
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Returns
-------
xopt : array
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
optim_settings : dict
A dictionary that contains the parameters passed to the solver.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for the solvers (available in Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
#TODO: generalize the regularization stuff
# Extract kwargs specific to fit_regularized calling fit
extra_fit_funcs = kwargs.setdefault('extra_fit_funcs', dict())
methods = ['newton', 'nm', 'bfgs', 'lbfgs', 'powell', 'cg', 'ncg',
'basinhopping']
methods += extra_fit_funcs.keys()
method = method.lower()
_check_method(method, methods)
fit_funcs = {
'newton': _fit_newton,
'nm': _fit_nm, # Nelder-Mead
'bfgs': _fit_bfgs,
'lbfgs': _fit_lbfgs,
'cg': _fit_cg,
'ncg': _fit_ncg,
'powell': _fit_powell,
'basinhopping': _fit_basinhopping,
}
#NOTE: fit_regularized checks the methods for these but it should be
# moved up probably
if extra_fit_funcs:
fit_funcs.update(extra_fit_funcs)
func = fit_funcs[method]
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
disp=disp, maxiter=maxiter, callback=callback,
retall=retall, full_output=full_output,
hess=hessian)
optim_settings = {'optimizer': method, 'start_params': start_params,
'maxiter': maxiter, 'full_output': full_output,
'disp': disp, 'fargs': fargs, 'callback': callback,
'retall': retall}
optim_settings.update(kwargs)
# set as attributes or return?
return xopt, retvals, optim_settings
def _fit_constrained(self, params):
"""
TODO: how to add constraints?
Something like
sm.add_constraint(Model, func)
or
model_instance.add_constraint(func)
model_instance.add_constraint("x1 + x2 = 2")
result = model_instance.fit()
"""
pass
def _fit_regularized(self, params):
#TODO: code won't necessarily be general here. 3 options.
# 1) setup for scipy.optimize.fmin_sqlsqp
# 2) setup for cvxopt
# 3) setup for openopt
pass
########################################
# Helper functions to fit
def _fit_newton(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None, ridge_factor=1e-10):
tol = kwargs.setdefault('tol', 1e-8)
iterations = 0
oldparams = np.inf
newparams = np.asarray(start_params)
if retall:
history = [oldparams, newparams]
while (iterations < maxiter and np.any(np.abs(newparams -
oldparams) > tol)):
H = np.asarray(hess(newparams))
# regularize Hessian, not clear what ridge factor should be
# keyword option with absolute default 1e-10, see #1847
if not np.all(ridge_factor == 0):
H[np.diag_indices(H.shape[0])] += ridge_factor
oldparams = newparams
newparams = oldparams - np.dot(np.linalg.inv(H),
score(oldparams))
if retall:
history.append(newparams)
if callback is not None:
callback(newparams)
iterations += 1
fval = f(newparams, *fargs) # this is the negative likelihood
if iterations == maxiter:
warnflag = 1
if disp:
print("Warning: Maximum number of iterations has been "
"exceeded.")
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
else:
warnflag = 0
if disp:
print("Optimization terminated successfully.")
print(" Current function value: %f" % fval)
print(" Iterations %d" % iterations)
if full_output:
(xopt, fopt, niter,
gopt, hopt) = (newparams, f(newparams, *fargs),
iterations, score(newparams),
hess(newparams))
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter, 'score': gopt,
'Hessian': hopt, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': history})
else:
xopt = newparams
retvals = None
return xopt, retvals
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.Inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
gtol=gtol, norm=norm, epsilon=epsilon,
maxiter=maxiter, full_output=full_output,
disp=disp, retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
else:
(xopt, fopt, gopt, Hinv, fcalls,
gcalls, warnflag, allvecs) = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_lbfgs(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
"""
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
Notes
-----
Within the mle part of statsmodels, the log likelihood function and
its gradient with respect to the parameters do not have notationally
consistent sign.
"""
# Use unconstrained optimization by default.
bounds = kwargs.setdefault('bounds', [(None, None)] * len(start_params))
kwargs.setdefault('iprint', 0)
# Pass the following keyword argument names through to fmin_l_bfgs_b
# if they are present in kwargs, otherwise use the fmin_l_bfgs_b
# default values.
names = ('m', 'pgtol', 'factr', 'maxfun', 'epsilon', 'approx_grad')
extra_kwargs = dict((x, kwargs[x]) for x in names if x in kwargs)
# Extract values for the options related to the gradient.
approx_grad = kwargs.get('approx_grad', False)
loglike_and_score = kwargs.get('loglike_and_score', None)
epsilon = kwargs.get('epsilon', None)
# The approx_grad flag has superpowers nullifying the score function arg.
if approx_grad:
score = None
# Choose among three options for dealing with the gradient (the gradient
# of a log likelihood function with respect to its parameters
# is more specifically called the score in statistics terminology).
# The first option is to use the finite-differences
# approximation that is built into the fmin_l_bfgs_b optimizer.
# The second option is to use the provided score function.
# The third option is to use the score component of a provided
# function that simultaneously evaluates the log likelihood and score.
if epsilon and not approx_grad:
raise ValueError('a finite-differences epsilon was provided '
'even though we are not using approx_grad')
if approx_grad and loglike_and_score:
raise ValueError('gradient approximation was requested '
'even though an analytic loglike_and_score function '
'was given')
if loglike_and_score:
func = lambda p, *a : tuple(-x for x in loglike_and_score(p, *a))
elif score:
func = f
extra_kwargs['fprime'] = score
elif approx_grad:
func = f
# Customize the fmin_l_bfgs_b call according to the scipy version.
# Old scipy does not support maxiter and callback.
scipy_version_curr = distutils.version.LooseVersion(scipy_version)
scipy_version_12 = distutils.version.LooseVersion('0.12.0')
if scipy_version_curr < scipy_version_12:
retvals = optimize.fmin_l_bfgs_b(func, start_params, args=fargs,
bounds=bounds, disp=disp,
**extra_kwargs)
else:
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
callback=callback, args=fargs,
bounds=bounds, disp=disp,
**extra_kwargs)
if full_output:
xopt, fopt, d = retvals
# The warnflag is
# 0 if converged
# 1 if too many function evaluations or too many iterations
# 2 if stopped for another reason, given in d['task']
warnflag = d['warnflag']
converged = (warnflag == 0)
gopt = d['grad']
fcalls = d['funcalls']
retvals = {'fopt': fopt, 'gopt': gopt, 'fcalls': fcalls,
'warnflag': warnflag, 'converged': converged}
else:
xopt = retvals[0]
retvals = None
return xopt, retvals
def _fit_nm(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
retvals = optimize.fmin(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp, retall=retall,
callback=callback)
if full_output:
if not retall:
xopt, fopt, niter, fcalls, warnflag = retvals
else:
xopt, fopt, niter, fcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.Inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'warnflag': warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_ncg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
fhess_p = kwargs.setdefault('fhess_p', None)
avextol = kwargs.setdefault('avextol', 1.0000000000000001e-05)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_ncg(f, start_params, score, fhess_p=fhess_p,
fhess=hess, args=fargs, avextol=avextol,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, hcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, hcalls, warnflag, allvecs =\
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'hcalls': hcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
start_direc = kwargs.setdefault('start_direc', None)
retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp,
retall=retall, callback=callback,
direc=start_direc)
if full_output:
if not retall:
xopt, fopt, direc, niter, fcalls, warnflag = retvals
else:
xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_basinhopping(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
if not 'basinhopping' in vars(optimize):
msg = 'basinhopping solver is not available, use e.g. bfgs instead!'
raise ValueError(msg)
from copy import copy
kwargs = copy(kwargs)
niter = kwargs.setdefault('niter', 100)
niter_success = kwargs.setdefault('niter_success', None)
T = kwargs.setdefault('T', 1.0)
stepsize = kwargs.setdefault('stepsize', 0.5)
interval = kwargs.setdefault('interval', 50)
minimizer_kwargs = kwargs.get('minimizer', {})
minimizer_kwargs['args'] = fargs
minimizer_kwargs['jac'] = score
method = minimizer_kwargs.get('method', None)
if method and method != 'L-BFGS-B': # l_bfgs_b doesn't take a hessian
minimizer_kwargs['hess'] = hess
retvals = optimize.basinhopping(f, start_params,
minimizer_kwargs=minimizer_kwargs,
niter=niter, niter_success=niter_success,
T=T, stepsize=stepsize, disp=disp,
callback=callback, interval=interval)
if full_output:
xopt, fopt, niter, fcalls = map(lambda x : getattr(retvals, x),
['x', 'fun', 'nit', 'nfev'])
converged = 'completed successfully' in retvals.message[0]
retvals = {'fopt': fopt, 'iterations': niter,
'fcalls': fcalls, 'converged': converged}
else:
xopt = retvals.x
retvals = None
return xopt, retvals
| bsd-3-clause |
City-of-Turku/living-environment | assignments/migrations/0001_initial.py | 1 | 11292 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-05 08:31
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import djgeojson.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True, verbose_name='name')),
('header', models.CharField(max_length=255, null=True, verbose_name='header')),
('description', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='description')),
('area', djgeojson.fields.GeometryField(verbose_name='area')),
('status', models.IntegerField(choices=[(0, 'Open'), (1, 'Closed')], default=0, verbose_name='status')),
('budget', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='budget')),
('slug', models.SlugField(help_text='The user-friendly URL identifier ex. www.example.com/runosmaen-koulu', max_length=80, unique=True)),
],
options={
'verbose_name_plural': 'Assignments',
'verbose_name': 'Assignment',
},
),
migrations.CreateModel(
name='BudgetingTarget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('unit_price', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='price')),
('reference_amount', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='reference amount')),
('min_amount', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='min amount')),
('max_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='max amount')),
('icon', models.FileField(blank=True, default='target/icons/default.png', upload_to='target/icons/', verbose_name='icon')),
],
options={
'verbose_name_plural': 'budget targets',
'verbose_name': 'budget target',
},
),
migrations.CreateModel(
name='BudgetingTargetAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('point', djgeojson.fields.PointField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='OpenTextAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.TextField()),
],
),
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('assignment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='schools', to='assignments.Assignment')),
],
options={
'verbose_name_plural': 'schools',
'verbose_name': 'school',
},
),
migrations.CreateModel(
name='SchoolClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'verbose_name_plural': 'classes',
'verbose_name': 'class',
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='description')),
('video', models.URLField(blank=True, null=True)),
('order_number', models.IntegerField(default=0, help_text='Order in which sections are shown', verbose_name='order number')),
('assignment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sections', to='assignments.Assignment', verbose_name='Assignment')),
],
options={
'verbose_name_plural': 'Sections',
'verbose_name': 'Section',
'ordering': ['order_number', 'title'],
},
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='submissions', to='assignments.School')),
('school_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='submissions', to='assignments.SchoolClass')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.IntegerField(default=0, help_text='Order in which tasks are shown', verbose_name='order number')),
],
options={
'base_manager_name': 'base_objects',
'ordering': ['order_number'],
},
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='BudgetingTask',
fields=[
('task_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='assignments.Task')),
('name', models.CharField(max_length=255, verbose_name='name')),
('unit', models.IntegerField(choices=[(0, 'ha'), (1, 'pcs')], default=0, verbose_name='unit')),
('amount_of_consumption', models.DecimalField(decimal_places=2, default=0, help_text='Number of units required to be spent on the task', max_digits=10, verbose_name='amount of consumption')),
('targets', models.ManyToManyField(related_name='budgeting_tasks', to='assignments.BudgetingTarget', verbose_name='budget targets')),
],
options={
'verbose_name_plural': 'budgeting tasks',
'verbose_name': 'budgeting task',
},
bases=('assignments.task',),
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='OpenTextTask',
fields=[
('task_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='assignments.Task')),
('question', models.TextField(verbose_name='question')),
],
options={
'verbose_name_plural': 'open text tasks',
'verbose_name': 'open text task',
},
bases=('assignments.task',),
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='VoluntarySignupTask',
fields=[
('task_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='assignments.Task')),
('name', models.CharField(max_length=255, verbose_name='name')),
],
options={
'verbose_name_plural': 'voluntary signup tasks',
'verbose_name': 'voluntary signup task',
},
bases=('assignments.task',),
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='task',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_assignments.task_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='task',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='assignments.Section'),
),
migrations.AddField(
model_name='school',
name='classes',
field=models.ManyToManyField(related_name='schools', to='assignments.SchoolClass'),
),
migrations.AddField(
model_name='opentextanswer',
name='submission',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='open_text_answers', to='assignments.Submission'),
),
migrations.AddField(
model_name='budgetingtargetanswer',
name='submission',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='budgeting_answers', to='assignments.Submission'),
),
migrations.AddField(
model_name='budgetingtargetanswer',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='budgeting_answers', to='assignments.BudgetingTarget'),
),
migrations.AddField(
model_name='opentextanswer',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='open_text_answers', to='assignments.OpenTextTask'),
),
migrations.AddField(
model_name='budgetingtargetanswer',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='budgeting_answers', to='assignments.BudgetingTask'),
),
]
| mit |
BitTract/LibertyCoin | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/OpenGL/raw/GL/__init__.py | 3 | 183316 | # -*- coding: iso-8859-1 -*-
"""Raw (C-style) API for OpenGL.GL
Automatically generated by the generateraw script, do not edit!
"""
from OpenGL.raw.GL.constants import *
from ctypes import *
from OpenGL import platform, arrays
from OpenGL.constant import Constant
from OpenGL import constants as GLconstants
GLvoid = GLconstants.GLvoid
from OpenGL.constants import GL_BYTE
from OpenGL.constants import GL_DOUBLE
from OpenGL.constants import GL_FALSE
from OpenGL.constants import GL_FLOAT
from OpenGL.constants import GL_INT
from OpenGL.constants import GL_SHORT
from OpenGL.constants import GL_TRUE
from OpenGL.constants import GL_UNSIGNED_BYTE
from OpenGL.constants import GL_UNSIGNED_INT
from OpenGL.constants import GL_UNSIGNED_SHORT
from OpenGL.constants import GLenum
from OpenGL.constants import GLfloat
from OpenGL.constants import GLclampf
from OpenGL.constants import GLboolean
from OpenGL.constants import GLsizei
from OpenGL.constants import GLuint
from OpenGL.constants import GLint
from OpenGL.constants import GLubyte
from OpenGL.constants import GLbitfield
from OpenGL.constants import GLclampd
from OpenGL.constants import GLdouble
from OpenGL.constants import GLbyte
from OpenGL.constants import GLshort
from OpenGL.constants import GLushort
GL_BLEND_EQUATION_RGB = GL_BLEND_EQUATION # alias
GL_BLEND_EQUATION_RGB_EXT = GL_BLEND_EQUATION # alias
GL_CURRENT_FOG_COORD = GL_CURRENT_FOG_COORDINATE # alias
GL_FOG_COORD = GL_FOG_COORDINATE # alias
GL_FOG_COORD_ARRAY = GL_FOG_COORDINATE_ARRAY # alias
GL_FOG_COORD_ARRAY_BUFFER_BINDING = GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING # alias
GL_FOG_COORD_ARRAY_POINTER = GL_FOG_COORDINATE_ARRAY_POINTER # alias
GL_FOG_COORD_ARRAY_STRIDE = GL_FOG_COORDINATE_ARRAY_STRIDE # alias
GL_FOG_COORD_ARRAY_TYPE = GL_FOG_COORDINATE_ARRAY_TYPE # alias
GL_FOG_COORD_SRC = GL_FOG_COORDINATE_SOURCE # alias
GL_LINE_WIDTH_GRANULARITY = GL_SMOOTH_LINE_WIDTH_GRANULARITY # alias
GL_LINE_WIDTH_RANGE = GL_SMOOTH_LINE_WIDTH_RANGE # alias
GL_LOGIC_OP = GL_INDEX_LOGIC_OP # alias
GL_MODELVIEW0_EXT = GL_MODELVIEW # alias
GL_MODELVIEW0_MATRIX_EXT = GL_MODELVIEW_MATRIX # alias
GL_MODELVIEW0_STACK_DEPTH_EXT = GL_MODELVIEW_STACK_DEPTH # alias
GL_OFFSET_TEXTURE_2D_BIAS_NV = GL_OFFSET_TEXTURE_BIAS_NV # alias
GL_OFFSET_TEXTURE_2D_MATRIX_NV = GL_OFFSET_TEXTURE_MATRIX_NV # alias
GL_OFFSET_TEXTURE_2D_SCALE_NV = GL_OFFSET_TEXTURE_SCALE_NV # alias
GL_POINT_SIZE_GRANULARITY = GL_SMOOTH_POINT_SIZE_GRANULARITY # alias
GL_POINT_SIZE_RANGE = GL_SMOOTH_POINT_SIZE_RANGE # alias
GL_SRC0_ALPHA = GL_SOURCE0_ALPHA # alias
GL_SRC0_RGB = GL_SOURCE0_RGB # alias
GL_SRC1_ALPHA = GL_SOURCE1_ALPHA # alias
GL_SRC1_RGB = GL_SOURCE1_RGB # alias
GL_SRC2_ALPHA = GL_SOURCE2_ALPHA # alias
GL_SRC2_RGB = GL_SOURCE2_RGB # alias
GL_TEXTURE_COMPONENTS = GL_TEXTURE_INTERNAL_FORMAT # alias
# /usr/include/GL/gl.h 1291
glAccum = platform.createBaseFunction(
'glAccum', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat],
doc='glAccum( GLenum(op), GLfloat(value) ) -> None',
argNames=('op', 'value'),
)
# /usr/include/GL/gl.h 1292
glAlphaFunc = platform.createBaseFunction(
'glAlphaFunc', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLclampf],
doc='glAlphaFunc( GLenum(func), GLclampf(ref) ) -> None',
argNames=('func', 'ref'),
)
# /usr/include/GL/gl.h 1293
glAreTexturesResident = platform.createBaseFunction(
'glAreTexturesResident', dll=platform.GL, resultType=GLboolean,
argTypes=[GLsizei,arrays.GLuintArray,POINTER(GLboolean)],
doc='glAreTexturesResident( GLsizei(n), arrays.GLuintArray(textures), POINTER(GLboolean)(residences) ) -> GLboolean',
argNames=('n', 'textures', 'residences'),
)
# /usr/include/GL/gl.h 1294
glArrayElement = platform.createBaseFunction(
'glArrayElement', dll=platform.GL, resultType=None,
argTypes=[GLint],
doc='glArrayElement( GLint(i) ) -> None',
argNames=('i',),
)
# /usr/include/GL/gl.h 1295
glBegin = platform.createBaseFunction(
'glBegin', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glBegin( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1296
glBindTexture = platform.createBaseFunction(
'glBindTexture', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLuint],
doc='glBindTexture( GLenum(target), GLuint(texture) ) -> None',
argNames=('target', 'texture'),
)
# /usr/include/GL/gl.h 1297
glBitmap = platform.createBaseFunction(
'glBitmap', dll=platform.GL, resultType=None,
argTypes=[GLsizei,GLsizei,GLfloat,GLfloat,GLfloat,GLfloat,arrays.GLubyteArray],
doc='glBitmap( GLsizei(width), GLsizei(height), GLfloat(xorig), GLfloat(yorig), GLfloat(xmove), GLfloat(ymove), arrays.GLubyteArray(bitmap) ) -> None',
argNames=('width', 'height', 'xorig', 'yorig', 'xmove', 'ymove', 'bitmap'),
)
# /usr/include/GL/gl.h 1298
glBlendFunc = platform.createBaseFunction(
'glBlendFunc', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum],
doc='glBlendFunc( GLenum(sfactor), GLenum(dfactor) ) -> None',
argNames=('sfactor', 'dfactor'),
)
# /usr/include/GL/gl.h 1299
glCallList = platform.createBaseFunction(
'glCallList', dll=platform.GL, resultType=None,
argTypes=[GLuint],
doc='glCallList( GLuint(list) ) -> None',
argNames=('list',),
)
GLvoid = None
# /usr/include/GL/gl.h 1300
glCallLists = platform.createBaseFunction(
'glCallLists', dll=platform.GL, resultType=None,
argTypes=[GLsizei,GLenum,POINTER(GLvoid)],
doc='glCallLists( GLsizei(n), GLenum(type), POINTER(GLvoid)(lists) ) -> None',
argNames=('n', 'type', 'lists'),
)
# /usr/include/GL/gl.h 1301
glClear = platform.createBaseFunction(
'glClear', dll=platform.GL, resultType=None,
argTypes=[GLbitfield],
doc='glClear( GLbitfield(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1302
glClearAccum = platform.createBaseFunction(
'glClearAccum', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glClearAccum( GLfloat(red), GLfloat(green), GLfloat(blue), GLfloat(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1303
glClearColor = platform.createBaseFunction(
'glClearColor', dll=platform.GL, resultType=None,
argTypes=[GLclampf,GLclampf,GLclampf,GLclampf],
doc='glClearColor( GLclampf(red), GLclampf(green), GLclampf(blue), GLclampf(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1304
glClearDepth = platform.createBaseFunction(
'glClearDepth', dll=platform.GL, resultType=None,
argTypes=[GLclampd],
doc='glClearDepth( GLclampd(depth) ) -> None',
argNames=('depth',),
)
# /usr/include/GL/gl.h 1305
glClearIndex = platform.createBaseFunction(
'glClearIndex', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glClearIndex( GLfloat(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1306
glClearStencil = platform.createBaseFunction(
'glClearStencil', dll=platform.GL, resultType=None,
argTypes=[GLint],
doc='glClearStencil( GLint(s) ) -> None',
argNames=('s',),
)
# /usr/include/GL/gl.h 1307
glClipPlane = platform.createBaseFunction(
'glClipPlane', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLdoubleArray],
doc='glClipPlane( GLenum(plane), arrays.GLdoubleArray(equation) ) -> None',
argNames=('plane', 'equation'),
)
# /usr/include/GL/gl.h 1308
glColor3b = platform.createBaseFunction(
'glColor3b', dll=platform.GL, resultType=None,
argTypes=[GLbyte,GLbyte,GLbyte],
doc='glColor3b( GLbyte(red), GLbyte(green), GLbyte(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1309
glColor3bv = platform.createBaseFunction(
'glColor3bv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLbyteArray],
doc='glColor3bv( arrays.GLbyteArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1310
glColor3d = platform.createBaseFunction(
'glColor3d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glColor3d( GLdouble(red), GLdouble(green), GLdouble(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1311
glColor3dv = platform.createBaseFunction(
'glColor3dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glColor3dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1312
glColor3f = platform.createBaseFunction(
'glColor3f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glColor3f( GLfloat(red), GLfloat(green), GLfloat(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1313
glColor3fv = platform.createBaseFunction(
'glColor3fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glColor3fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1314
glColor3i = platform.createBaseFunction(
'glColor3i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint],
doc='glColor3i( GLint(red), GLint(green), GLint(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1315
glColor3iv = platform.createBaseFunction(
'glColor3iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glColor3iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1316
glColor3s = platform.createBaseFunction(
'glColor3s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort],
doc='glColor3s( GLshort(red), GLshort(green), GLshort(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1317
glColor3sv = platform.createBaseFunction(
'glColor3sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glColor3sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1318
glColor3ub = platform.createBaseFunction(
'glColor3ub', dll=platform.GL, resultType=None,
argTypes=[GLubyte,GLubyte,GLubyte],
doc='glColor3ub( GLubyte(red), GLubyte(green), GLubyte(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1319
glColor3ubv = platform.createBaseFunction(
'glColor3ubv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLubyteArray],
doc='glColor3ubv( arrays.GLubyteArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1320
glColor3ui = platform.createBaseFunction(
'glColor3ui', dll=platform.GL, resultType=None,
argTypes=[GLuint,GLuint,GLuint],
doc='glColor3ui( GLuint(red), GLuint(green), GLuint(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1321
glColor3uiv = platform.createBaseFunction(
'glColor3uiv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLuintArray],
doc='glColor3uiv( arrays.GLuintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1322
glColor3us = platform.createBaseFunction(
'glColor3us', dll=platform.GL, resultType=None,
argTypes=[GLushort,GLushort,GLushort],
doc='glColor3us( GLushort(red), GLushort(green), GLushort(blue) ) -> None',
argNames=('red', 'green', 'blue'),
)
# /usr/include/GL/gl.h 1323
glColor3usv = platform.createBaseFunction(
'glColor3usv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLushortArray],
doc='glColor3usv( arrays.GLushortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1324
glColor4b = platform.createBaseFunction(
'glColor4b', dll=platform.GL, resultType=None,
argTypes=[GLbyte,GLbyte,GLbyte,GLbyte],
doc='glColor4b( GLbyte(red), GLbyte(green), GLbyte(blue), GLbyte(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1325
glColor4bv = platform.createBaseFunction(
'glColor4bv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLbyteArray],
doc='glColor4bv( arrays.GLbyteArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1326
glColor4d = platform.createBaseFunction(
'glColor4d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble],
doc='glColor4d( GLdouble(red), GLdouble(green), GLdouble(blue), GLdouble(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1327
glColor4dv = platform.createBaseFunction(
'glColor4dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glColor4dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1328
glColor4f = platform.createBaseFunction(
'glColor4f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glColor4f( GLfloat(red), GLfloat(green), GLfloat(blue), GLfloat(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1329
glColor4fv = platform.createBaseFunction(
'glColor4fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glColor4fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1330
glColor4i = platform.createBaseFunction(
'glColor4i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint,GLint],
doc='glColor4i( GLint(red), GLint(green), GLint(blue), GLint(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1331
glColor4iv = platform.createBaseFunction(
'glColor4iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glColor4iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1332
glColor4s = platform.createBaseFunction(
'glColor4s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort,GLshort],
doc='glColor4s( GLshort(red), GLshort(green), GLshort(blue), GLshort(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1333
glColor4sv = platform.createBaseFunction(
'glColor4sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glColor4sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1334
glColor4ub = platform.createBaseFunction(
'glColor4ub', dll=platform.GL, resultType=None,
argTypes=[GLubyte,GLubyte,GLubyte,GLubyte],
doc='glColor4ub( GLubyte(red), GLubyte(green), GLubyte(blue), GLubyte(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1335
glColor4ubv = platform.createBaseFunction(
'glColor4ubv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLubyteArray],
doc='glColor4ubv( arrays.GLubyteArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1336
glColor4ui = platform.createBaseFunction(
'glColor4ui', dll=platform.GL, resultType=None,
argTypes=[GLuint,GLuint,GLuint,GLuint],
doc='glColor4ui( GLuint(red), GLuint(green), GLuint(blue), GLuint(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1337
glColor4uiv = platform.createBaseFunction(
'glColor4uiv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLuintArray],
doc='glColor4uiv( arrays.GLuintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1338
glColor4us = platform.createBaseFunction(
'glColor4us', dll=platform.GL, resultType=None,
argTypes=[GLushort,GLushort,GLushort,GLushort],
doc='glColor4us( GLushort(red), GLushort(green), GLushort(blue), GLushort(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1339
glColor4usv = platform.createBaseFunction(
'glColor4usv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLushortArray],
doc='glColor4usv( arrays.GLushortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1340
glColorMask = platform.createBaseFunction(
'glColorMask', dll=platform.GL, resultType=None,
argTypes=[GLboolean,GLboolean,GLboolean,GLboolean],
doc='glColorMask( GLboolean(red), GLboolean(green), GLboolean(blue), GLboolean(alpha) ) -> None',
argNames=('red', 'green', 'blue', 'alpha'),
)
# /usr/include/GL/gl.h 1341
glColorMaterial = platform.createBaseFunction(
'glColorMaterial', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum],
doc='glColorMaterial( GLenum(face), GLenum(mode) ) -> None',
argNames=('face', 'mode'),
)
# /usr/include/GL/gl.h 1342
glColorPointer = platform.createBaseFunction(
'glColorPointer', dll=platform.GL, resultType=None,
argTypes=[GLint,GLenum,GLsizei,POINTER(GLvoid)],
doc='glColorPointer( GLint(size), GLenum(type), GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('size', 'type', 'stride', 'pointer'),
)
# /usr/include/GL/gl.h 1343
glCopyPixels = platform.createBaseFunction(
'glCopyPixels', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLsizei,GLsizei,GLenum],
doc='glCopyPixels( GLint(x), GLint(y), GLsizei(width), GLsizei(height), GLenum(type) ) -> None',
argNames=('x', 'y', 'width', 'height', 'type'),
)
# /usr/include/GL/gl.h 1344
glCopyTexImage1D = platform.createBaseFunction(
'glCopyTexImage1D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLenum,GLint,GLint,GLsizei,GLint],
doc='glCopyTexImage1D( GLenum(target), GLint(level), GLenum(internalFormat), GLint(x), GLint(y), GLsizei(width), GLint(border) ) -> None',
argNames=('target', 'level', 'internalFormat', 'x', 'y', 'width', 'border'),
)
# /usr/include/GL/gl.h 1345
glCopyTexImage2D = platform.createBaseFunction(
'glCopyTexImage2D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLenum,GLint,GLint,GLsizei,GLsizei,GLint],
doc='glCopyTexImage2D( GLenum(target), GLint(level), GLenum(internalFormat), GLint(x), GLint(y), GLsizei(width), GLsizei(height), GLint(border) ) -> None',
argNames=('target', 'level', 'internalFormat', 'x', 'y', 'width', 'height', 'border'),
)
# /usr/include/GL/gl.h 1346
glCopyTexSubImage1D = platform.createBaseFunction(
'glCopyTexSubImage1D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLint,GLint,GLsizei],
doc='glCopyTexSubImage1D( GLenum(target), GLint(level), GLint(xoffset), GLint(x), GLint(y), GLsizei(width) ) -> None',
argNames=('target', 'level', 'xoffset', 'x', 'y', 'width'),
)
# /usr/include/GL/gl.h 1347
glCopyTexSubImage2D = platform.createBaseFunction(
'glCopyTexSubImage2D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLint,GLint,GLint,GLsizei,GLsizei],
doc='glCopyTexSubImage2D( GLenum(target), GLint(level), GLint(xoffset), GLint(yoffset), GLint(x), GLint(y), GLsizei(width), GLsizei(height) ) -> None',
argNames=('target', 'level', 'xoffset', 'yoffset', 'x', 'y', 'width', 'height'),
)
# /usr/include/GL/gl.h 1348
glCullFace = platform.createBaseFunction(
'glCullFace', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glCullFace( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1349
glDeleteLists = platform.createBaseFunction(
'glDeleteLists', dll=platform.GL, resultType=None,
argTypes=[GLuint,GLsizei],
doc='glDeleteLists( GLuint(list), GLsizei(range) ) -> None',
argNames=('list', 'range'),
)
# /usr/include/GL/gl.h 1350
glDeleteTextures = platform.createBaseFunction(
'glDeleteTextures', dll=platform.GL, resultType=None,
argTypes=[GLsizei,arrays.GLuintArray],
doc='glDeleteTextures( GLsizei(n), arrays.GLuintArray(textures) ) -> None',
argNames=('n', 'textures'),
)
# /usr/include/GL/gl.h 1351
glDepthFunc = platform.createBaseFunction(
'glDepthFunc', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glDepthFunc( GLenum(func) ) -> None',
argNames=('func',),
)
# /usr/include/GL/gl.h 1352
glDepthMask = platform.createBaseFunction(
'glDepthMask', dll=platform.GL, resultType=None,
argTypes=[GLboolean],
doc='glDepthMask( GLboolean(flag) ) -> None',
argNames=('flag',),
)
# /usr/include/GL/gl.h 1353
glDepthRange = platform.createBaseFunction(
'glDepthRange', dll=platform.GL, resultType=None,
argTypes=[GLclampd,GLclampd],
doc='glDepthRange( GLclampd(zNear), GLclampd(zFar) ) -> None',
argNames=('zNear', 'zFar'),
)
# /usr/include/GL/gl.h 1354
glDisable = platform.createBaseFunction(
'glDisable', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glDisable( GLenum(cap) ) -> None',
argNames=('cap',),
)
# /usr/include/GL/gl.h 1355
glDisableClientState = platform.createBaseFunction(
'glDisableClientState', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glDisableClientState( GLenum(array) ) -> None',
argNames=('array',),
)
# /usr/include/GL/gl.h 1356
glDrawArrays = platform.createBaseFunction(
'glDrawArrays', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLsizei],
doc='glDrawArrays( GLenum(mode), GLint(first), GLsizei(count) ) -> None',
argNames=('mode', 'first', 'count'),
)
# /usr/include/GL/gl.h 1357
glDrawBuffer = platform.createBaseFunction(
'glDrawBuffer', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glDrawBuffer( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1358
glDrawElements = platform.createBaseFunction(
'glDrawElements', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,GLenum,POINTER(GLvoid)],
doc='glDrawElements( GLenum(mode), GLsizei(count), GLenum(type), POINTER(GLvoid)(indices) ) -> None',
argNames=('mode', 'count', 'type', 'indices'),
)
# /usr/include/GL/gl.h 1359
glDrawPixels = platform.createBaseFunction(
'glDrawPixels', dll=platform.GL, resultType=None,
argTypes=[GLsizei,GLsizei,GLenum,GLenum,POINTER(GLvoid)],
doc='glDrawPixels( GLsizei(width), GLsizei(height), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('width', 'height', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1360
glEdgeFlag = platform.createBaseFunction(
'glEdgeFlag', dll=platform.GL, resultType=None,
argTypes=[GLboolean],
doc='glEdgeFlag( GLboolean(flag) ) -> None',
argNames=('flag',),
)
# /usr/include/GL/gl.h 1361
glEdgeFlagPointer = platform.createBaseFunction(
'glEdgeFlagPointer', dll=platform.GL, resultType=None,
argTypes=[GLsizei,POINTER(GLvoid)],
doc='glEdgeFlagPointer( GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('stride', 'pointer'),
)
# /usr/include/GL/gl.h 1362
glEdgeFlagv = platform.createBaseFunction(
'glEdgeFlagv', dll=platform.GL, resultType=None,
argTypes=[POINTER(GLboolean)],
doc='glEdgeFlagv( POINTER(GLboolean)(flag) ) -> None',
argNames=('flag',),
)
# /usr/include/GL/gl.h 1363
glEnable = platform.createBaseFunction(
'glEnable', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glEnable( GLenum(cap) ) -> None',
argNames=('cap',),
)
# /usr/include/GL/gl.h 1364
glEnableClientState = platform.createBaseFunction(
'glEnableClientState', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glEnableClientState( GLenum(array) ) -> None',
argNames=('array',),
)
# /usr/include/GL/gl.h 1365
glEnd = platform.createBaseFunction(
'glEnd', dll=platform.GL, resultType=None,
argTypes=[],
doc='glEnd( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1366
glEndList = platform.createBaseFunction(
'glEndList', dll=platform.GL, resultType=None,
argTypes=[],
doc='glEndList( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1367
glEvalCoord1d = platform.createBaseFunction(
'glEvalCoord1d', dll=platform.GL, resultType=None,
argTypes=[GLdouble],
doc='glEvalCoord1d( GLdouble(u) ) -> None',
argNames=('u',),
)
# /usr/include/GL/gl.h 1368
glEvalCoord1dv = platform.createBaseFunction(
'glEvalCoord1dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glEvalCoord1dv( arrays.GLdoubleArray(u) ) -> None',
argNames=('u',),
)
# /usr/include/GL/gl.h 1369
glEvalCoord1f = platform.createBaseFunction(
'glEvalCoord1f', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glEvalCoord1f( GLfloat(u) ) -> None',
argNames=('u',),
)
# /usr/include/GL/gl.h 1370
glEvalCoord1fv = platform.createBaseFunction(
'glEvalCoord1fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glEvalCoord1fv( arrays.GLfloatArray(u) ) -> None',
argNames=('u',),
)
# /usr/include/GL/gl.h 1371
glEvalCoord2d = platform.createBaseFunction(
'glEvalCoord2d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble],
doc='glEvalCoord2d( GLdouble(u), GLdouble(v) ) -> None',
argNames=('u', 'v'),
)
# /usr/include/GL/gl.h 1372
glEvalCoord2dv = platform.createBaseFunction(
'glEvalCoord2dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glEvalCoord2dv( arrays.GLdoubleArray(u) ) -> None',
argNames=('u',),
)
# /usr/include/GL/gl.h 1373
glEvalCoord2f = platform.createBaseFunction(
'glEvalCoord2f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat],
doc='glEvalCoord2f( GLfloat(u), GLfloat(v) ) -> None',
argNames=('u', 'v'),
)
# /usr/include/GL/gl.h 1374
glEvalCoord2fv = platform.createBaseFunction(
'glEvalCoord2fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glEvalCoord2fv( arrays.GLfloatArray(u) ) -> None',
argNames=('u',),
)
# /usr/include/GL/gl.h 1375
glEvalMesh1 = platform.createBaseFunction(
'glEvalMesh1', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint],
doc='glEvalMesh1( GLenum(mode), GLint(i1), GLint(i2) ) -> None',
argNames=('mode', 'i1', 'i2'),
)
# /usr/include/GL/gl.h 1376
glEvalMesh2 = platform.createBaseFunction(
'glEvalMesh2', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLint,GLint],
doc='glEvalMesh2( GLenum(mode), GLint(i1), GLint(i2), GLint(j1), GLint(j2) ) -> None',
argNames=('mode', 'i1', 'i2', 'j1', 'j2'),
)
# /usr/include/GL/gl.h 1377
glEvalPoint1 = platform.createBaseFunction(
'glEvalPoint1', dll=platform.GL, resultType=None,
argTypes=[GLint],
doc='glEvalPoint1( GLint(i) ) -> None',
argNames=('i',),
)
# /usr/include/GL/gl.h 1378
glEvalPoint2 = platform.createBaseFunction(
'glEvalPoint2', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint],
doc='glEvalPoint2( GLint(i), GLint(j) ) -> None',
argNames=('i', 'j'),
)
# /usr/include/GL/gl.h 1379
glFeedbackBuffer = platform.createBaseFunction(
'glFeedbackBuffer', dll=platform.GL, resultType=None,
argTypes=[GLsizei,GLenum,arrays.GLfloatArray],
doc='glFeedbackBuffer( GLsizei(size), GLenum(type), arrays.GLfloatArray(buffer) ) -> None',
argNames=('size', 'type', 'buffer'),
)
# /usr/include/GL/gl.h 1380
glFinish = platform.createBaseFunction(
'glFinish', dll=platform.GL, resultType=None,
argTypes=[],
doc='glFinish( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1381
glFlush = platform.createBaseFunction(
'glFlush', dll=platform.GL, resultType=None,
argTypes=[],
doc='glFlush( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1382
glFogf = platform.createBaseFunction(
'glFogf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat],
doc='glFogf( GLenum(pname), GLfloat(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1383
glFogfv = platform.createBaseFunction(
'glFogfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLfloatArray],
doc='glFogfv( GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1384
glFogi = platform.createBaseFunction(
'glFogi', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint],
doc='glFogi( GLenum(pname), GLint(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1385
glFogiv = platform.createBaseFunction(
'glFogiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLintArray],
doc='glFogiv( GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1386
glFrontFace = platform.createBaseFunction(
'glFrontFace', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glFrontFace( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1387
glFrustum = platform.createBaseFunction(
'glFrustum', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble,GLdouble,GLdouble],
doc='glFrustum( GLdouble(left), GLdouble(right), GLdouble(bottom), GLdouble(top), GLdouble(zNear), GLdouble(zFar) ) -> None',
argNames=('left', 'right', 'bottom', 'top', 'zNear', 'zFar'),
)
# /usr/include/GL/gl.h 1388
glGenLists = platform.createBaseFunction(
'glGenLists', dll=platform.GL, resultType=GLuint,
argTypes=[GLsizei],
doc='glGenLists( GLsizei(range) ) -> GLuint',
argNames=('range',),
)
# /usr/include/GL/gl.h 1389
glGenTextures = platform.createBaseFunction(
'glGenTextures', dll=platform.GL, resultType=None,
argTypes=[GLsizei,arrays.GLuintArray],
doc='glGenTextures( GLsizei(n), arrays.GLuintArray(textures) ) -> None',
argNames=('n', 'textures'),
)
# /usr/include/GL/gl.h 1390
glGetBooleanv = platform.createBaseFunction(
'glGetBooleanv', dll=platform.GL, resultType=None,
argTypes=[GLenum,POINTER(GLboolean)],
doc='glGetBooleanv( GLenum(pname), POINTER(GLboolean)(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1391
glGetClipPlane = platform.createBaseFunction(
'glGetClipPlane', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLdoubleArray],
doc='glGetClipPlane( GLenum(plane), arrays.GLdoubleArray(equation) ) -> None',
argNames=('plane', 'equation'),
)
# /usr/include/GL/gl.h 1392
glGetDoublev = platform.createBaseFunction(
'glGetDoublev', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLdoubleArray],
doc='glGetDoublev( GLenum(pname), arrays.GLdoubleArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1393
glGetError = platform.createBaseFunction(
'glGetError', dll=platform.GL, resultType=GLenum,
argTypes=[],
doc='glGetError( ) -> GLenum',
argNames=(),
)
# /usr/include/GL/gl.h 1394
glGetFloatv = platform.createBaseFunction(
'glGetFloatv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLfloatArray],
doc='glGetFloatv( GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1395
glGetIntegerv = platform.createBaseFunction(
'glGetIntegerv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLintArray],
doc='glGetIntegerv( GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1396
glGetLightfv = platform.createBaseFunction(
'glGetLightfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glGetLightfv( GLenum(light), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('light', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1397
glGetLightiv = platform.createBaseFunction(
'glGetLightiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glGetLightiv( GLenum(light), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('light', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1398
glGetMapdv = platform.createBaseFunction(
'glGetMapdv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLdoubleArray],
doc='glGetMapdv( GLenum(target), GLenum(query), arrays.GLdoubleArray(v) ) -> None',
argNames=('target', 'query', 'v'),
)
# /usr/include/GL/gl.h 1399
glGetMapfv = platform.createBaseFunction(
'glGetMapfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glGetMapfv( GLenum(target), GLenum(query), arrays.GLfloatArray(v) ) -> None',
argNames=('target', 'query', 'v'),
)
# /usr/include/GL/gl.h 1400
glGetMapiv = platform.createBaseFunction(
'glGetMapiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glGetMapiv( GLenum(target), GLenum(query), arrays.GLintArray(v) ) -> None',
argNames=('target', 'query', 'v'),
)
# /usr/include/GL/gl.h 1401
glGetMaterialfv = platform.createBaseFunction(
'glGetMaterialfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glGetMaterialfv( GLenum(face), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('face', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1402
glGetMaterialiv = platform.createBaseFunction(
'glGetMaterialiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glGetMaterialiv( GLenum(face), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('face', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1403
glGetPixelMapfv = platform.createBaseFunction(
'glGetPixelMapfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLfloatArray],
doc='glGetPixelMapfv( GLenum(map), arrays.GLfloatArray(values) ) -> None',
argNames=('map', 'values'),
)
# /usr/include/GL/gl.h 1404
glGetPixelMapuiv = platform.createBaseFunction(
'glGetPixelMapuiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLuintArray],
doc='glGetPixelMapuiv( GLenum(map), arrays.GLuintArray(values) ) -> None',
argNames=('map', 'values'),
)
# /usr/include/GL/gl.h 1405
glGetPixelMapusv = platform.createBaseFunction(
'glGetPixelMapusv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLushortArray],
doc='glGetPixelMapusv( GLenum(map), arrays.GLushortArray(values) ) -> None',
argNames=('map', 'values'),
)
# /usr/include/GL/gl.h 1406
glGetPointerv = platform.createBaseFunction(
'glGetPointerv', dll=platform.GL, resultType=None,
argTypes=[GLenum,POINTER(POINTER(GLvoid))],
doc='glGetPointerv( GLenum(pname), POINTER(POINTER(GLvoid))(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1407
glGetPolygonStipple = platform.createBaseFunction(
'glGetPolygonStipple', dll=platform.GL, resultType=None,
argTypes=[arrays.GLubyteArray],
doc='glGetPolygonStipple( arrays.GLubyteArray(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1408
glGetString = platform.createBaseFunction(
'glGetString', dll=platform.GL, resultType=POINTER(GLubyte),
argTypes=[GLenum],
doc='glGetString( GLenum(name) ) -> POINTER(GLubyte)',
argNames=('name',),
)
# /usr/include/GL/gl.h 1409
glGetTexEnvfv = platform.createBaseFunction(
'glGetTexEnvfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glGetTexEnvfv( GLenum(target), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1410
glGetTexEnviv = platform.createBaseFunction(
'glGetTexEnviv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glGetTexEnviv( GLenum(target), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1411
glGetTexGendv = platform.createBaseFunction(
'glGetTexGendv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLdoubleArray],
doc='glGetTexGendv( GLenum(coord), GLenum(pname), arrays.GLdoubleArray(params) ) -> None',
argNames=('coord', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1412
glGetTexGenfv = platform.createBaseFunction(
'glGetTexGenfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glGetTexGenfv( GLenum(coord), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('coord', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1413
glGetTexGeniv = platform.createBaseFunction(
'glGetTexGeniv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glGetTexGeniv( GLenum(coord), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('coord', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1414
glGetTexImage = platform.createBaseFunction(
'glGetTexImage', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLenum,GLenum,POINTER(GLvoid)],
doc='glGetTexImage( GLenum(target), GLint(level), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('target', 'level', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1415
glGetTexLevelParameterfv = platform.createBaseFunction(
'glGetTexLevelParameterfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLenum,arrays.GLfloatArray],
doc='glGetTexLevelParameterfv( GLenum(target), GLint(level), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('target', 'level', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1416
glGetTexLevelParameteriv = platform.createBaseFunction(
'glGetTexLevelParameteriv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLenum,arrays.GLintArray],
doc='glGetTexLevelParameteriv( GLenum(target), GLint(level), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('target', 'level', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1417
glGetTexParameterfv = platform.createBaseFunction(
'glGetTexParameterfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glGetTexParameterfv( GLenum(target), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1418
glGetTexParameteriv = platform.createBaseFunction(
'glGetTexParameteriv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glGetTexParameteriv( GLenum(target), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1419
glHint = platform.createBaseFunction(
'glHint', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum],
doc='glHint( GLenum(target), GLenum(mode) ) -> None',
argNames=('target', 'mode'),
)
# /usr/include/GL/gl.h 1420
glIndexMask = platform.createBaseFunction(
'glIndexMask', dll=platform.GL, resultType=None,
argTypes=[GLuint],
doc='glIndexMask( GLuint(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1421
glIndexPointer = platform.createBaseFunction(
'glIndexPointer', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,POINTER(GLvoid)],
doc='glIndexPointer( GLenum(type), GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('type', 'stride', 'pointer'),
)
# /usr/include/GL/gl.h 1422
glIndexd = platform.createBaseFunction(
'glIndexd', dll=platform.GL, resultType=None,
argTypes=[GLdouble],
doc='glIndexd( GLdouble(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1423
glIndexdv = platform.createBaseFunction(
'glIndexdv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glIndexdv( arrays.GLdoubleArray(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1424
glIndexf = platform.createBaseFunction(
'glIndexf', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glIndexf( GLfloat(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1425
glIndexfv = platform.createBaseFunction(
'glIndexfv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glIndexfv( arrays.GLfloatArray(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1426
glIndexi = platform.createBaseFunction(
'glIndexi', dll=platform.GL, resultType=None,
argTypes=[GLint],
doc='glIndexi( GLint(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1427
glIndexiv = platform.createBaseFunction(
'glIndexiv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glIndexiv( arrays.GLintArray(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1428
glIndexs = platform.createBaseFunction(
'glIndexs', dll=platform.GL, resultType=None,
argTypes=[GLshort],
doc='glIndexs( GLshort(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1429
glIndexsv = platform.createBaseFunction(
'glIndexsv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glIndexsv( arrays.GLshortArray(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1430
glIndexub = platform.createBaseFunction(
'glIndexub', dll=platform.GL, resultType=None,
argTypes=[GLubyte],
doc='glIndexub( GLubyte(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1431
glIndexubv = platform.createBaseFunction(
'glIndexubv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLubyteArray],
doc='glIndexubv( arrays.GLubyteArray(c) ) -> None',
argNames=('c',),
)
# /usr/include/GL/gl.h 1432
glInitNames = platform.createBaseFunction(
'glInitNames', dll=platform.GL, resultType=None,
argTypes=[],
doc='glInitNames( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1433
glInterleavedArrays = platform.createBaseFunction(
'glInterleavedArrays', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,POINTER(GLvoid)],
doc='glInterleavedArrays( GLenum(format), GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('format', 'stride', 'pointer'),
)
# /usr/include/GL/gl.h 1434
glIsEnabled = platform.createBaseFunction(
'glIsEnabled', dll=platform.GL, resultType=GLboolean,
argTypes=[GLenum],
doc='glIsEnabled( GLenum(cap) ) -> GLboolean',
argNames=('cap',),
)
# /usr/include/GL/gl.h 1435
glIsList = platform.createBaseFunction(
'glIsList', dll=platform.GL, resultType=GLboolean,
argTypes=[GLuint],
doc='glIsList( GLuint(list) ) -> GLboolean',
argNames=('list',),
)
# /usr/include/GL/gl.h 1436
glIsTexture = platform.createBaseFunction(
'glIsTexture', dll=platform.GL, resultType=GLboolean,
argTypes=[GLuint],
doc='glIsTexture( GLuint(texture) ) -> GLboolean',
argNames=('texture',),
)
# /usr/include/GL/gl.h 1437
glLightModelf = platform.createBaseFunction(
'glLightModelf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat],
doc='glLightModelf( GLenum(pname), GLfloat(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1438
glLightModelfv = platform.createBaseFunction(
'glLightModelfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLfloatArray],
doc='glLightModelfv( GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1439
glLightModeli = platform.createBaseFunction(
'glLightModeli', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint],
doc='glLightModeli( GLenum(pname), GLint(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1440
glLightModeliv = platform.createBaseFunction(
'glLightModeliv', dll=platform.GL, resultType=None,
argTypes=[GLenum,arrays.GLintArray],
doc='glLightModeliv( GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('pname', 'params'),
)
# /usr/include/GL/gl.h 1441
glLightf = platform.createBaseFunction(
'glLightf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLfloat],
doc='glLightf( GLenum(light), GLenum(pname), GLfloat(param) ) -> None',
argNames=('light', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1442
glLightfv = platform.createBaseFunction(
'glLightfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glLightfv( GLenum(light), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('light', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1443
glLighti = platform.createBaseFunction(
'glLighti', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLint],
doc='glLighti( GLenum(light), GLenum(pname), GLint(param) ) -> None',
argNames=('light', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1444
glLightiv = platform.createBaseFunction(
'glLightiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glLightiv( GLenum(light), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('light', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1445
glLineStipple = platform.createBaseFunction(
'glLineStipple', dll=platform.GL, resultType=None,
argTypes=[GLint,GLushort],
doc='glLineStipple( GLint(factor), GLushort(pattern) ) -> None',
argNames=('factor', 'pattern'),
)
# /usr/include/GL/gl.h 1446
glLineWidth = platform.createBaseFunction(
'glLineWidth', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glLineWidth( GLfloat(width) ) -> None',
argNames=('width',),
)
# /usr/include/GL/gl.h 1447
glListBase = platform.createBaseFunction(
'glListBase', dll=platform.GL, resultType=None,
argTypes=[GLuint],
doc='glListBase( GLuint(base) ) -> None',
argNames=('base',),
)
# /usr/include/GL/gl.h 1448
glLoadIdentity = platform.createBaseFunction(
'glLoadIdentity', dll=platform.GL, resultType=None,
argTypes=[],
doc='glLoadIdentity( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1449
glLoadMatrixd = platform.createBaseFunction(
'glLoadMatrixd', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glLoadMatrixd( arrays.GLdoubleArray(m) ) -> None',
argNames=('m',),
)
# /usr/include/GL/gl.h 1450
glLoadMatrixf = platform.createBaseFunction(
'glLoadMatrixf', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glLoadMatrixf( arrays.GLfloatArray(m) ) -> None',
argNames=('m',),
)
# /usr/include/GL/gl.h 1451
glLoadName = platform.createBaseFunction(
'glLoadName', dll=platform.GL, resultType=None,
argTypes=[GLuint],
doc='glLoadName( GLuint(name) ) -> None',
argNames=('name',),
)
# /usr/include/GL/gl.h 1452
glLogicOp = platform.createBaseFunction(
'glLogicOp', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glLogicOp( GLenum(opcode) ) -> None',
argNames=('opcode',),
)
# /usr/include/GL/gl.h 1453
glMap1d = platform.createBaseFunction(
'glMap1d', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLdouble,GLdouble,GLint,GLint,arrays.GLdoubleArray],
doc='glMap1d( GLenum(target), GLdouble(u1), GLdouble(u2), GLint(stride), GLint(order), arrays.GLdoubleArray(points) ) -> None',
argNames=('target', 'u1', 'u2', 'stride', 'order', 'points'),
)
# /usr/include/GL/gl.h 1454
glMap1f = platform.createBaseFunction(
'glMap1f', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat,GLfloat,GLint,GLint,arrays.GLfloatArray],
doc='glMap1f( GLenum(target), GLfloat(u1), GLfloat(u2), GLint(stride), GLint(order), arrays.GLfloatArray(points) ) -> None',
argNames=('target', 'u1', 'u2', 'stride', 'order', 'points'),
)
# /usr/include/GL/gl.h 1455
glMap2d = platform.createBaseFunction(
'glMap2d', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLdouble,GLdouble,GLint,GLint,GLdouble,GLdouble,GLint,GLint,arrays.GLdoubleArray],
doc='glMap2d( GLenum(target), GLdouble(u1), GLdouble(u2), GLint(ustride), GLint(uorder), GLdouble(v1), GLdouble(v2), GLint(vstride), GLint(vorder), arrays.GLdoubleArray(points) ) -> None',
argNames=('target', 'u1', 'u2', 'ustride', 'uorder', 'v1', 'v2', 'vstride', 'vorder', 'points'),
)
# /usr/include/GL/gl.h 1456
glMap2f = platform.createBaseFunction(
'glMap2f', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat,GLfloat,GLint,GLint,GLfloat,GLfloat,GLint,GLint,arrays.GLfloatArray],
doc='glMap2f( GLenum(target), GLfloat(u1), GLfloat(u2), GLint(ustride), GLint(uorder), GLfloat(v1), GLfloat(v2), GLint(vstride), GLint(vorder), arrays.GLfloatArray(points) ) -> None',
argNames=('target', 'u1', 'u2', 'ustride', 'uorder', 'v1', 'v2', 'vstride', 'vorder', 'points'),
)
# /usr/include/GL/gl.h 1457
glMapGrid1d = platform.createBaseFunction(
'glMapGrid1d', dll=platform.GL, resultType=None,
argTypes=[GLint,GLdouble,GLdouble],
doc='glMapGrid1d( GLint(un), GLdouble(u1), GLdouble(u2) ) -> None',
argNames=('un', 'u1', 'u2'),
)
# /usr/include/GL/gl.h 1458
glMapGrid1f = platform.createBaseFunction(
'glMapGrid1f', dll=platform.GL, resultType=None,
argTypes=[GLint,GLfloat,GLfloat],
doc='glMapGrid1f( GLint(un), GLfloat(u1), GLfloat(u2) ) -> None',
argNames=('un', 'u1', 'u2'),
)
# /usr/include/GL/gl.h 1459
glMapGrid2d = platform.createBaseFunction(
'glMapGrid2d', dll=platform.GL, resultType=None,
argTypes=[GLint,GLdouble,GLdouble,GLint,GLdouble,GLdouble],
doc='glMapGrid2d( GLint(un), GLdouble(u1), GLdouble(u2), GLint(vn), GLdouble(v1), GLdouble(v2) ) -> None',
argNames=('un', 'u1', 'u2', 'vn', 'v1', 'v2'),
)
# /usr/include/GL/gl.h 1460
glMapGrid2f = platform.createBaseFunction(
'glMapGrid2f', dll=platform.GL, resultType=None,
argTypes=[GLint,GLfloat,GLfloat,GLint,GLfloat,GLfloat],
doc='glMapGrid2f( GLint(un), GLfloat(u1), GLfloat(u2), GLint(vn), GLfloat(v1), GLfloat(v2) ) -> None',
argNames=('un', 'u1', 'u2', 'vn', 'v1', 'v2'),
)
# /usr/include/GL/gl.h 1461
glMaterialf = platform.createBaseFunction(
'glMaterialf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLfloat],
doc='glMaterialf( GLenum(face), GLenum(pname), GLfloat(param) ) -> None',
argNames=('face', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1462
glMaterialfv = platform.createBaseFunction(
'glMaterialfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glMaterialfv( GLenum(face), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('face', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1463
glMateriali = platform.createBaseFunction(
'glMateriali', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLint],
doc='glMateriali( GLenum(face), GLenum(pname), GLint(param) ) -> None',
argNames=('face', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1464
glMaterialiv = platform.createBaseFunction(
'glMaterialiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glMaterialiv( GLenum(face), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('face', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1465
glMatrixMode = platform.createBaseFunction(
'glMatrixMode', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glMatrixMode( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1466
glMultMatrixd = platform.createBaseFunction(
'glMultMatrixd', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glMultMatrixd( arrays.GLdoubleArray(m) ) -> None',
argNames=('m',),
)
# /usr/include/GL/gl.h 1467
glMultMatrixf = platform.createBaseFunction(
'glMultMatrixf', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glMultMatrixf( arrays.GLfloatArray(m) ) -> None',
argNames=('m',),
)
# /usr/include/GL/gl.h 1468
glNewList = platform.createBaseFunction(
'glNewList', dll=platform.GL, resultType=None,
argTypes=[GLuint,GLenum],
doc='glNewList( GLuint(list), GLenum(mode) ) -> None',
argNames=('list', 'mode'),
)
# /usr/include/GL/gl.h 1469
glNormal3b = platform.createBaseFunction(
'glNormal3b', dll=platform.GL, resultType=None,
argTypes=[GLbyte,GLbyte,GLbyte],
doc='glNormal3b( GLbyte(nx), GLbyte(ny), GLbyte(nz) ) -> None',
argNames=('nx', 'ny', 'nz'),
)
# /usr/include/GL/gl.h 1470
glNormal3bv = platform.createBaseFunction(
'glNormal3bv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLbyteArray],
doc='glNormal3bv( arrays.GLbyteArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1471
glNormal3d = platform.createBaseFunction(
'glNormal3d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glNormal3d( GLdouble(nx), GLdouble(ny), GLdouble(nz) ) -> None',
argNames=('nx', 'ny', 'nz'),
)
# /usr/include/GL/gl.h 1472
glNormal3dv = platform.createBaseFunction(
'glNormal3dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glNormal3dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1473
glNormal3f = platform.createBaseFunction(
'glNormal3f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glNormal3f( GLfloat(nx), GLfloat(ny), GLfloat(nz) ) -> None',
argNames=('nx', 'ny', 'nz'),
)
# /usr/include/GL/gl.h 1474
glNormal3fv = platform.createBaseFunction(
'glNormal3fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glNormal3fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1475
glNormal3i = platform.createBaseFunction(
'glNormal3i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint],
doc='glNormal3i( GLint(nx), GLint(ny), GLint(nz) ) -> None',
argNames=('nx', 'ny', 'nz'),
)
# /usr/include/GL/gl.h 1476
glNormal3iv = platform.createBaseFunction(
'glNormal3iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glNormal3iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1477
glNormal3s = platform.createBaseFunction(
'glNormal3s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort],
doc='glNormal3s( GLshort(nx), GLshort(ny), GLshort(nz) ) -> None',
argNames=('nx', 'ny', 'nz'),
)
# /usr/include/GL/gl.h 1478
glNormal3sv = platform.createBaseFunction(
'glNormal3sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glNormal3sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1479
glNormalPointer = platform.createBaseFunction(
'glNormalPointer', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,POINTER(GLvoid)],
doc='glNormalPointer( GLenum(type), GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('type', 'stride', 'pointer'),
)
# /usr/include/GL/gl.h 1480
glOrtho = platform.createBaseFunction(
'glOrtho', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble,GLdouble,GLdouble],
doc='glOrtho( GLdouble(left), GLdouble(right), GLdouble(bottom), GLdouble(top), GLdouble(zNear), GLdouble(zFar) ) -> None',
argNames=('left', 'right', 'bottom', 'top', 'zNear', 'zFar'),
)
# /usr/include/GL/gl.h 1481
glPassThrough = platform.createBaseFunction(
'glPassThrough', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glPassThrough( GLfloat(token) ) -> None',
argNames=('token',),
)
# /usr/include/GL/gl.h 1482
glPixelMapfv = platform.createBaseFunction(
'glPixelMapfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,arrays.GLfloatArray],
doc='glPixelMapfv( GLenum(map), GLsizei(mapsize), arrays.GLfloatArray(values) ) -> None',
argNames=('map', 'mapsize', 'values'),
)
# /usr/include/GL/gl.h 1483
glPixelMapuiv = platform.createBaseFunction(
'glPixelMapuiv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,arrays.GLuintArray],
doc='glPixelMapuiv( GLenum(map), GLsizei(mapsize), arrays.GLuintArray(values) ) -> None',
argNames=('map', 'mapsize', 'values'),
)
# /usr/include/GL/gl.h 1484
glPixelMapusv = platform.createBaseFunction(
'glPixelMapusv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLsizei,arrays.GLushortArray],
doc='glPixelMapusv( GLenum(map), GLsizei(mapsize), arrays.GLushortArray(values) ) -> None',
argNames=('map', 'mapsize', 'values'),
)
# /usr/include/GL/gl.h 1485
glPixelStoref = platform.createBaseFunction(
'glPixelStoref', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat],
doc='glPixelStoref( GLenum(pname), GLfloat(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1486
glPixelStorei = platform.createBaseFunction(
'glPixelStorei', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint],
doc='glPixelStorei( GLenum(pname), GLint(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1487
glPixelTransferf = platform.createBaseFunction(
'glPixelTransferf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLfloat],
doc='glPixelTransferf( GLenum(pname), GLfloat(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1488
glPixelTransferi = platform.createBaseFunction(
'glPixelTransferi', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint],
doc='glPixelTransferi( GLenum(pname), GLint(param) ) -> None',
argNames=('pname', 'param'),
)
# /usr/include/GL/gl.h 1489
glPixelZoom = platform.createBaseFunction(
'glPixelZoom', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat],
doc='glPixelZoom( GLfloat(xfactor), GLfloat(yfactor) ) -> None',
argNames=('xfactor', 'yfactor'),
)
# /usr/include/GL/gl.h 1490
glPointSize = platform.createBaseFunction(
'glPointSize', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glPointSize( GLfloat(size) ) -> None',
argNames=('size',),
)
# /usr/include/GL/gl.h 1491
glPolygonMode = platform.createBaseFunction(
'glPolygonMode', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum],
doc='glPolygonMode( GLenum(face), GLenum(mode) ) -> None',
argNames=('face', 'mode'),
)
# /usr/include/GL/gl.h 1492
glPolygonOffset = platform.createBaseFunction(
'glPolygonOffset', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat],
doc='glPolygonOffset( GLfloat(factor), GLfloat(units) ) -> None',
argNames=('factor', 'units'),
)
# /usr/include/GL/gl.h 1493
glPolygonStipple = platform.createBaseFunction(
'glPolygonStipple', dll=platform.GL, resultType=None,
argTypes=[arrays.GLubyteArray],
doc='glPolygonStipple( arrays.GLubyteArray(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1494
glPopAttrib = platform.createBaseFunction(
'glPopAttrib', dll=platform.GL, resultType=None,
argTypes=[],
doc='glPopAttrib( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1495
glPopClientAttrib = platform.createBaseFunction(
'glPopClientAttrib', dll=platform.GL, resultType=None,
argTypes=[],
doc='glPopClientAttrib( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1496
glPopMatrix = platform.createBaseFunction(
'glPopMatrix', dll=platform.GL, resultType=None,
argTypes=[],
doc='glPopMatrix( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1497
glPopName = platform.createBaseFunction(
'glPopName', dll=platform.GL, resultType=None,
argTypes=[],
doc='glPopName( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1498
glPrioritizeTextures = platform.createBaseFunction(
'glPrioritizeTextures', dll=platform.GL, resultType=None,
argTypes=[GLsizei,arrays.GLuintArray,arrays.GLclampfArray],
doc='glPrioritizeTextures( GLsizei(n), arrays.GLuintArray(textures), arrays.GLclampfArray(priorities) ) -> None',
argNames=('n', 'textures', 'priorities'),
)
# /usr/include/GL/gl.h 1499
glPushAttrib = platform.createBaseFunction(
'glPushAttrib', dll=platform.GL, resultType=None,
argTypes=[GLbitfield],
doc='glPushAttrib( GLbitfield(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1500
glPushClientAttrib = platform.createBaseFunction(
'glPushClientAttrib', dll=platform.GL, resultType=None,
argTypes=[GLbitfield],
doc='glPushClientAttrib( GLbitfield(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1501
glPushMatrix = platform.createBaseFunction(
'glPushMatrix', dll=platform.GL, resultType=None,
argTypes=[],
doc='glPushMatrix( ) -> None',
argNames=(),
)
# /usr/include/GL/gl.h 1502
glPushName = platform.createBaseFunction(
'glPushName', dll=platform.GL, resultType=None,
argTypes=[GLuint],
doc='glPushName( GLuint(name) ) -> None',
argNames=('name',),
)
# /usr/include/GL/gl.h 1503
glRasterPos2d = platform.createBaseFunction(
'glRasterPos2d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble],
doc='glRasterPos2d( GLdouble(x), GLdouble(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1504
glRasterPos2dv = platform.createBaseFunction(
'glRasterPos2dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glRasterPos2dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1505
glRasterPos2f = platform.createBaseFunction(
'glRasterPos2f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat],
doc='glRasterPos2f( GLfloat(x), GLfloat(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1506
glRasterPos2fv = platform.createBaseFunction(
'glRasterPos2fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glRasterPos2fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1507
glRasterPos2i = platform.createBaseFunction(
'glRasterPos2i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint],
doc='glRasterPos2i( GLint(x), GLint(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1508
glRasterPos2iv = platform.createBaseFunction(
'glRasterPos2iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glRasterPos2iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1509
glRasterPos2s = platform.createBaseFunction(
'glRasterPos2s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort],
doc='glRasterPos2s( GLshort(x), GLshort(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1510
glRasterPos2sv = platform.createBaseFunction(
'glRasterPos2sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glRasterPos2sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1511
glRasterPos3d = platform.createBaseFunction(
'glRasterPos3d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glRasterPos3d( GLdouble(x), GLdouble(y), GLdouble(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1512
glRasterPos3dv = platform.createBaseFunction(
'glRasterPos3dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glRasterPos3dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1513
glRasterPos3f = platform.createBaseFunction(
'glRasterPos3f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glRasterPos3f( GLfloat(x), GLfloat(y), GLfloat(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1514
glRasterPos3fv = platform.createBaseFunction(
'glRasterPos3fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glRasterPos3fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1515
glRasterPos3i = platform.createBaseFunction(
'glRasterPos3i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint],
doc='glRasterPos3i( GLint(x), GLint(y), GLint(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1516
glRasterPos3iv = platform.createBaseFunction(
'glRasterPos3iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glRasterPos3iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1517
glRasterPos3s = platform.createBaseFunction(
'glRasterPos3s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort],
doc='glRasterPos3s( GLshort(x), GLshort(y), GLshort(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1518
glRasterPos3sv = platform.createBaseFunction(
'glRasterPos3sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glRasterPos3sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1519
glRasterPos4d = platform.createBaseFunction(
'glRasterPos4d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble],
doc='glRasterPos4d( GLdouble(x), GLdouble(y), GLdouble(z), GLdouble(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1520
glRasterPos4dv = platform.createBaseFunction(
'glRasterPos4dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glRasterPos4dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1521
glRasterPos4f = platform.createBaseFunction(
'glRasterPos4f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glRasterPos4f( GLfloat(x), GLfloat(y), GLfloat(z), GLfloat(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1522
glRasterPos4fv = platform.createBaseFunction(
'glRasterPos4fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glRasterPos4fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1523
glRasterPos4i = platform.createBaseFunction(
'glRasterPos4i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint,GLint],
doc='glRasterPos4i( GLint(x), GLint(y), GLint(z), GLint(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1524
glRasterPos4iv = platform.createBaseFunction(
'glRasterPos4iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glRasterPos4iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1525
glRasterPos4s = platform.createBaseFunction(
'glRasterPos4s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort,GLshort],
doc='glRasterPos4s( GLshort(x), GLshort(y), GLshort(z), GLshort(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1526
glRasterPos4sv = platform.createBaseFunction(
'glRasterPos4sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glRasterPos4sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1527
glReadBuffer = platform.createBaseFunction(
'glReadBuffer', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glReadBuffer( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1528
glReadPixels = platform.createBaseFunction(
'glReadPixels', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLsizei,GLsizei,GLenum,GLenum,POINTER(GLvoid)],
doc='glReadPixels( GLint(x), GLint(y), GLsizei(width), GLsizei(height), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('x', 'y', 'width', 'height', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1529
glRectd = platform.createBaseFunction(
'glRectd', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble],
doc='glRectd( GLdouble(x1), GLdouble(y1), GLdouble(x2), GLdouble(y2) ) -> None',
argNames=('x1', 'y1', 'x2', 'y2'),
)
# /usr/include/GL/gl.h 1530
glRectdv = platform.createBaseFunction(
'glRectdv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray,arrays.GLdoubleArray],
doc='glRectdv( arrays.GLdoubleArray(v1), arrays.GLdoubleArray(v2) ) -> None',
argNames=('v1', 'v2'),
)
# /usr/include/GL/gl.h 1531
glRectf = platform.createBaseFunction(
'glRectf', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glRectf( GLfloat(x1), GLfloat(y1), GLfloat(x2), GLfloat(y2) ) -> None',
argNames=('x1', 'y1', 'x2', 'y2'),
)
# /usr/include/GL/gl.h 1532
glRectfv = platform.createBaseFunction(
'glRectfv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray,arrays.GLfloatArray],
doc='glRectfv( arrays.GLfloatArray(v1), arrays.GLfloatArray(v2) ) -> None',
argNames=('v1', 'v2'),
)
# /usr/include/GL/gl.h 1533
glRecti = platform.createBaseFunction(
'glRecti', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint,GLint],
doc='glRecti( GLint(x1), GLint(y1), GLint(x2), GLint(y2) ) -> None',
argNames=('x1', 'y1', 'x2', 'y2'),
)
# /usr/include/GL/gl.h 1534
glRectiv = platform.createBaseFunction(
'glRectiv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray,arrays.GLintArray],
doc='glRectiv( arrays.GLintArray(v1), arrays.GLintArray(v2) ) -> None',
argNames=('v1', 'v2'),
)
# /usr/include/GL/gl.h 1535
glRects = platform.createBaseFunction(
'glRects', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort,GLshort],
doc='glRects( GLshort(x1), GLshort(y1), GLshort(x2), GLshort(y2) ) -> None',
argNames=('x1', 'y1', 'x2', 'y2'),
)
# /usr/include/GL/gl.h 1536
glRectsv = platform.createBaseFunction(
'glRectsv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray,arrays.GLshortArray],
doc='glRectsv( arrays.GLshortArray(v1), arrays.GLshortArray(v2) ) -> None',
argNames=('v1', 'v2'),
)
# /usr/include/GL/gl.h 1537
glRenderMode = platform.createBaseFunction(
'glRenderMode', dll=platform.GL, resultType=GLint,
argTypes=[GLenum],
doc='glRenderMode( GLenum(mode) ) -> GLint',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1538
glRotated = platform.createBaseFunction(
'glRotated', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble],
doc='glRotated( GLdouble(angle), GLdouble(x), GLdouble(y), GLdouble(z) ) -> None',
argNames=('angle', 'x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1539
glRotatef = platform.createBaseFunction(
'glRotatef', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glRotatef( GLfloat(angle), GLfloat(x), GLfloat(y), GLfloat(z) ) -> None',
argNames=('angle', 'x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1540
glScaled = platform.createBaseFunction(
'glScaled', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glScaled( GLdouble(x), GLdouble(y), GLdouble(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1541
glScalef = platform.createBaseFunction(
'glScalef', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glScalef( GLfloat(x), GLfloat(y), GLfloat(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1542
glScissor = platform.createBaseFunction(
'glScissor', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLsizei,GLsizei],
doc='glScissor( GLint(x), GLint(y), GLsizei(width), GLsizei(height) ) -> None',
argNames=('x', 'y', 'width', 'height'),
)
# /usr/include/GL/gl.h 1543
glSelectBuffer = platform.createBaseFunction(
'glSelectBuffer', dll=platform.GL, resultType=None,
argTypes=[GLsizei,arrays.GLuintArray],
doc='glSelectBuffer( GLsizei(size), arrays.GLuintArray(buffer) ) -> None',
argNames=('size', 'buffer'),
)
# /usr/include/GL/gl.h 1544
glShadeModel = platform.createBaseFunction(
'glShadeModel', dll=platform.GL, resultType=None,
argTypes=[GLenum],
doc='glShadeModel( GLenum(mode) ) -> None',
argNames=('mode',),
)
# /usr/include/GL/gl.h 1545
glStencilFunc = platform.createBaseFunction(
'glStencilFunc', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLuint],
doc='glStencilFunc( GLenum(func), GLint(ref), GLuint(mask) ) -> None',
argNames=('func', 'ref', 'mask'),
)
# /usr/include/GL/gl.h 1546
glStencilMask = platform.createBaseFunction(
'glStencilMask', dll=platform.GL, resultType=None,
argTypes=[GLuint],
doc='glStencilMask( GLuint(mask) ) -> None',
argNames=('mask',),
)
# /usr/include/GL/gl.h 1547
glStencilOp = platform.createBaseFunction(
'glStencilOp', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLenum],
doc='glStencilOp( GLenum(fail), GLenum(zfail), GLenum(zpass) ) -> None',
argNames=('fail', 'zfail', 'zpass'),
)
# /usr/include/GL/gl.h 1548
glTexCoord1d = platform.createBaseFunction(
'glTexCoord1d', dll=platform.GL, resultType=None,
argTypes=[GLdouble],
doc='glTexCoord1d( GLdouble(s) ) -> None',
argNames=('s',),
)
# /usr/include/GL/gl.h 1549
glTexCoord1dv = platform.createBaseFunction(
'glTexCoord1dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glTexCoord1dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1550
glTexCoord1f = platform.createBaseFunction(
'glTexCoord1f', dll=platform.GL, resultType=None,
argTypes=[GLfloat],
doc='glTexCoord1f( GLfloat(s) ) -> None',
argNames=('s',),
)
# /usr/include/GL/gl.h 1551
glTexCoord1fv = platform.createBaseFunction(
'glTexCoord1fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glTexCoord1fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1552
glTexCoord1i = platform.createBaseFunction(
'glTexCoord1i', dll=platform.GL, resultType=None,
argTypes=[GLint],
doc='glTexCoord1i( GLint(s) ) -> None',
argNames=('s',),
)
# /usr/include/GL/gl.h 1553
glTexCoord1iv = platform.createBaseFunction(
'glTexCoord1iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glTexCoord1iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1554
glTexCoord1s = platform.createBaseFunction(
'glTexCoord1s', dll=platform.GL, resultType=None,
argTypes=[GLshort],
doc='glTexCoord1s( GLshort(s) ) -> None',
argNames=('s',),
)
# /usr/include/GL/gl.h 1555
glTexCoord1sv = platform.createBaseFunction(
'glTexCoord1sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glTexCoord1sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1556
glTexCoord2d = platform.createBaseFunction(
'glTexCoord2d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble],
doc='glTexCoord2d( GLdouble(s), GLdouble(t) ) -> None',
argNames=('s', 't'),
)
# /usr/include/GL/gl.h 1557
glTexCoord2dv = platform.createBaseFunction(
'glTexCoord2dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glTexCoord2dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1558
glTexCoord2f = platform.createBaseFunction(
'glTexCoord2f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat],
doc='glTexCoord2f( GLfloat(s), GLfloat(t) ) -> None',
argNames=('s', 't'),
)
# /usr/include/GL/gl.h 1559
glTexCoord2fv = platform.createBaseFunction(
'glTexCoord2fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glTexCoord2fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1560
glTexCoord2i = platform.createBaseFunction(
'glTexCoord2i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint],
doc='glTexCoord2i( GLint(s), GLint(t) ) -> None',
argNames=('s', 't'),
)
# /usr/include/GL/gl.h 1561
glTexCoord2iv = platform.createBaseFunction(
'glTexCoord2iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glTexCoord2iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1562
glTexCoord2s = platform.createBaseFunction(
'glTexCoord2s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort],
doc='glTexCoord2s( GLshort(s), GLshort(t) ) -> None',
argNames=('s', 't'),
)
# /usr/include/GL/gl.h 1563
glTexCoord2sv = platform.createBaseFunction(
'glTexCoord2sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glTexCoord2sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1564
glTexCoord3d = platform.createBaseFunction(
'glTexCoord3d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glTexCoord3d( GLdouble(s), GLdouble(t), GLdouble(r) ) -> None',
argNames=('s', 't', 'r'),
)
# /usr/include/GL/gl.h 1565
glTexCoord3dv = platform.createBaseFunction(
'glTexCoord3dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glTexCoord3dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1566
glTexCoord3f = platform.createBaseFunction(
'glTexCoord3f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glTexCoord3f( GLfloat(s), GLfloat(t), GLfloat(r) ) -> None',
argNames=('s', 't', 'r'),
)
# /usr/include/GL/gl.h 1567
glTexCoord3fv = platform.createBaseFunction(
'glTexCoord3fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glTexCoord3fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1568
glTexCoord3i = platform.createBaseFunction(
'glTexCoord3i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint],
doc='glTexCoord3i( GLint(s), GLint(t), GLint(r) ) -> None',
argNames=('s', 't', 'r'),
)
# /usr/include/GL/gl.h 1569
glTexCoord3iv = platform.createBaseFunction(
'glTexCoord3iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glTexCoord3iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1570
glTexCoord3s = platform.createBaseFunction(
'glTexCoord3s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort],
doc='glTexCoord3s( GLshort(s), GLshort(t), GLshort(r) ) -> None',
argNames=('s', 't', 'r'),
)
# /usr/include/GL/gl.h 1571
glTexCoord3sv = platform.createBaseFunction(
'glTexCoord3sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glTexCoord3sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1572
glTexCoord4d = platform.createBaseFunction(
'glTexCoord4d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble],
doc='glTexCoord4d( GLdouble(s), GLdouble(t), GLdouble(r), GLdouble(q) ) -> None',
argNames=('s', 't', 'r', 'q'),
)
# /usr/include/GL/gl.h 1573
glTexCoord4dv = platform.createBaseFunction(
'glTexCoord4dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glTexCoord4dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1574
glTexCoord4f = platform.createBaseFunction(
'glTexCoord4f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glTexCoord4f( GLfloat(s), GLfloat(t), GLfloat(r), GLfloat(q) ) -> None',
argNames=('s', 't', 'r', 'q'),
)
# /usr/include/GL/gl.h 1575
glTexCoord4fv = platform.createBaseFunction(
'glTexCoord4fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glTexCoord4fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1576
glTexCoord4i = platform.createBaseFunction(
'glTexCoord4i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint,GLint],
doc='glTexCoord4i( GLint(s), GLint(t), GLint(r), GLint(q) ) -> None',
argNames=('s', 't', 'r', 'q'),
)
# /usr/include/GL/gl.h 1577
glTexCoord4iv = platform.createBaseFunction(
'glTexCoord4iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glTexCoord4iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1578
glTexCoord4s = platform.createBaseFunction(
'glTexCoord4s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort,GLshort],
doc='glTexCoord4s( GLshort(s), GLshort(t), GLshort(r), GLshort(q) ) -> None',
argNames=('s', 't', 'r', 'q'),
)
# /usr/include/GL/gl.h 1579
glTexCoord4sv = platform.createBaseFunction(
'glTexCoord4sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glTexCoord4sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1580
glTexCoordPointer = platform.createBaseFunction(
'glTexCoordPointer', dll=platform.GL, resultType=None,
argTypes=[GLint,GLenum,GLsizei,POINTER(GLvoid)],
doc='glTexCoordPointer( GLint(size), GLenum(type), GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('size', 'type', 'stride', 'pointer'),
)
# /usr/include/GL/gl.h 1581
glTexEnvf = platform.createBaseFunction(
'glTexEnvf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLfloat],
doc='glTexEnvf( GLenum(target), GLenum(pname), GLfloat(param) ) -> None',
argNames=('target', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1582
glTexEnvfv = platform.createBaseFunction(
'glTexEnvfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glTexEnvfv( GLenum(target), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1583
glTexEnvi = platform.createBaseFunction(
'glTexEnvi', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLint],
doc='glTexEnvi( GLenum(target), GLenum(pname), GLint(param) ) -> None',
argNames=('target', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1584
glTexEnviv = platform.createBaseFunction(
'glTexEnviv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glTexEnviv( GLenum(target), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1585
glTexGend = platform.createBaseFunction(
'glTexGend', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLdouble],
doc='glTexGend( GLenum(coord), GLenum(pname), GLdouble(param) ) -> None',
argNames=('coord', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1586
glTexGendv = platform.createBaseFunction(
'glTexGendv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLdoubleArray],
doc='glTexGendv( GLenum(coord), GLenum(pname), arrays.GLdoubleArray(params) ) -> None',
argNames=('coord', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1587
glTexGenf = platform.createBaseFunction(
'glTexGenf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLfloat],
doc='glTexGenf( GLenum(coord), GLenum(pname), GLfloat(param) ) -> None',
argNames=('coord', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1588
glTexGenfv = platform.createBaseFunction(
'glTexGenfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glTexGenfv( GLenum(coord), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('coord', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1589
glTexGeni = platform.createBaseFunction(
'glTexGeni', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLint],
doc='glTexGeni( GLenum(coord), GLenum(pname), GLint(param) ) -> None',
argNames=('coord', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1590
glTexGeniv = platform.createBaseFunction(
'glTexGeniv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glTexGeniv( GLenum(coord), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('coord', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1591
glTexImage1D = platform.createBaseFunction(
'glTexImage1D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLsizei,GLint,GLenum,GLenum,POINTER(GLvoid)],
doc='glTexImage1D( GLenum(target), GLint(level), GLint(internalformat), GLsizei(width), GLint(border), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('target', 'level', 'internalformat', 'width', 'border', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1592
glTexImage2D = platform.createBaseFunction(
'glTexImage2D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLsizei,GLsizei,GLint,GLenum,GLenum,POINTER(GLvoid)],
doc='glTexImage2D( GLenum(target), GLint(level), GLint(internalformat), GLsizei(width), GLsizei(height), GLint(border), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('target', 'level', 'internalformat', 'width', 'height', 'border', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1593
glTexParameterf = platform.createBaseFunction(
'glTexParameterf', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLfloat],
doc='glTexParameterf( GLenum(target), GLenum(pname), GLfloat(param) ) -> None',
argNames=('target', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1594
glTexParameterfv = platform.createBaseFunction(
'glTexParameterfv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLfloatArray],
doc='glTexParameterfv( GLenum(target), GLenum(pname), arrays.GLfloatArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1595
glTexParameteri = platform.createBaseFunction(
'glTexParameteri', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,GLint],
doc='glTexParameteri( GLenum(target), GLenum(pname), GLint(param) ) -> None',
argNames=('target', 'pname', 'param'),
)
# /usr/include/GL/gl.h 1596
glTexParameteriv = platform.createBaseFunction(
'glTexParameteriv', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLenum,arrays.GLintArray],
doc='glTexParameteriv( GLenum(target), GLenum(pname), arrays.GLintArray(params) ) -> None',
argNames=('target', 'pname', 'params'),
)
# /usr/include/GL/gl.h 1597
glTexSubImage1D = platform.createBaseFunction(
'glTexSubImage1D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLsizei,GLenum,GLenum,POINTER(GLvoid)],
doc='glTexSubImage1D( GLenum(target), GLint(level), GLint(xoffset), GLsizei(width), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('target', 'level', 'xoffset', 'width', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1598
glTexSubImage2D = platform.createBaseFunction(
'glTexSubImage2D', dll=platform.GL, resultType=None,
argTypes=[GLenum,GLint,GLint,GLint,GLsizei,GLsizei,GLenum,GLenum,POINTER(GLvoid)],
doc='glTexSubImage2D( GLenum(target), GLint(level), GLint(xoffset), GLint(yoffset), GLsizei(width), GLsizei(height), GLenum(format), GLenum(type), POINTER(GLvoid)(pixels) ) -> None',
argNames=('target', 'level', 'xoffset', 'yoffset', 'width', 'height', 'format', 'type', 'pixels'),
)
# /usr/include/GL/gl.h 1599
glTranslated = platform.createBaseFunction(
'glTranslated', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glTranslated( GLdouble(x), GLdouble(y), GLdouble(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1600
glTranslatef = platform.createBaseFunction(
'glTranslatef', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glTranslatef( GLfloat(x), GLfloat(y), GLfloat(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1601
glVertex2d = platform.createBaseFunction(
'glVertex2d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble],
doc='glVertex2d( GLdouble(x), GLdouble(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1602
glVertex2dv = platform.createBaseFunction(
'glVertex2dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glVertex2dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1603
glVertex2f = platform.createBaseFunction(
'glVertex2f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat],
doc='glVertex2f( GLfloat(x), GLfloat(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1604
glVertex2fv = platform.createBaseFunction(
'glVertex2fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glVertex2fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1605
glVertex2i = platform.createBaseFunction(
'glVertex2i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint],
doc='glVertex2i( GLint(x), GLint(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1606
glVertex2iv = platform.createBaseFunction(
'glVertex2iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glVertex2iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1607
glVertex2s = platform.createBaseFunction(
'glVertex2s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort],
doc='glVertex2s( GLshort(x), GLshort(y) ) -> None',
argNames=('x', 'y'),
)
# /usr/include/GL/gl.h 1608
glVertex2sv = platform.createBaseFunction(
'glVertex2sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glVertex2sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1609
glVertex3d = platform.createBaseFunction(
'glVertex3d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble],
doc='glVertex3d( GLdouble(x), GLdouble(y), GLdouble(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1610
glVertex3dv = platform.createBaseFunction(
'glVertex3dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glVertex3dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1611
glVertex3f = platform.createBaseFunction(
'glVertex3f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat],
doc='glVertex3f( GLfloat(x), GLfloat(y), GLfloat(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1612
glVertex3fv = platform.createBaseFunction(
'glVertex3fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glVertex3fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1613
glVertex3i = platform.createBaseFunction(
'glVertex3i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint],
doc='glVertex3i( GLint(x), GLint(y), GLint(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1614
glVertex3iv = platform.createBaseFunction(
'glVertex3iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glVertex3iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1615
glVertex3s = platform.createBaseFunction(
'glVertex3s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort],
doc='glVertex3s( GLshort(x), GLshort(y), GLshort(z) ) -> None',
argNames=('x', 'y', 'z'),
)
# /usr/include/GL/gl.h 1616
glVertex3sv = platform.createBaseFunction(
'glVertex3sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glVertex3sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1617
glVertex4d = platform.createBaseFunction(
'glVertex4d', dll=platform.GL, resultType=None,
argTypes=[GLdouble,GLdouble,GLdouble,GLdouble],
doc='glVertex4d( GLdouble(x), GLdouble(y), GLdouble(z), GLdouble(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1618
glVertex4dv = platform.createBaseFunction(
'glVertex4dv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLdoubleArray],
doc='glVertex4dv( arrays.GLdoubleArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1619
glVertex4f = platform.createBaseFunction(
'glVertex4f', dll=platform.GL, resultType=None,
argTypes=[GLfloat,GLfloat,GLfloat,GLfloat],
doc='glVertex4f( GLfloat(x), GLfloat(y), GLfloat(z), GLfloat(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1620
glVertex4fv = platform.createBaseFunction(
'glVertex4fv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLfloatArray],
doc='glVertex4fv( arrays.GLfloatArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1621
glVertex4i = platform.createBaseFunction(
'glVertex4i', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLint,GLint],
doc='glVertex4i( GLint(x), GLint(y), GLint(z), GLint(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1622
glVertex4iv = platform.createBaseFunction(
'glVertex4iv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLintArray],
doc='glVertex4iv( arrays.GLintArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1623
glVertex4s = platform.createBaseFunction(
'glVertex4s', dll=platform.GL, resultType=None,
argTypes=[GLshort,GLshort,GLshort,GLshort],
doc='glVertex4s( GLshort(x), GLshort(y), GLshort(z), GLshort(w) ) -> None',
argNames=('x', 'y', 'z', 'w'),
)
# /usr/include/GL/gl.h 1624
glVertex4sv = platform.createBaseFunction(
'glVertex4sv', dll=platform.GL, resultType=None,
argTypes=[arrays.GLshortArray],
doc='glVertex4sv( arrays.GLshortArray(v) ) -> None',
argNames=('v',),
)
# /usr/include/GL/gl.h 1625
glVertexPointer = platform.createBaseFunction(
'glVertexPointer', dll=platform.GL, resultType=None,
argTypes=[GLint,GLenum,GLsizei,POINTER(GLvoid)],
doc='glVertexPointer( GLint(size), GLenum(type), GLsizei(stride), POINTER(GLvoid)(pointer) ) -> None',
argNames=('size', 'type', 'stride', 'pointer'),
)
# /usr/include/GL/gl.h 1626
glViewport = platform.createBaseFunction(
'glViewport', dll=platform.GL, resultType=None,
argTypes=[GLint,GLint,GLsizei,GLsizei],
doc='glViewport( GLint(x), GLint(y), GLsizei(width), GLsizei(height) ) -> None',
argNames=('x', 'y', 'width', 'height'),
)
__all__ = [
'GL_1PASS_EXT',
'GL_1PASS_SGIS',
'GL_2D',
'GL_2PASS_0_EXT',
'GL_2PASS_0_SGIS',
'GL_2PASS_1_EXT',
'GL_2PASS_1_SGIS',
'GL_2X_BIT_ATI',
'GL_2_BYTES',
'GL_3D',
'GL_3D_COLOR',
'GL_3D_COLOR_TEXTURE',
'GL_3_BYTES',
'GL_422_AVERAGE_EXT',
'GL_422_EXT',
'GL_422_REV_AVERAGE_EXT',
'GL_422_REV_EXT',
'GL_4D_COLOR_TEXTURE',
'GL_4PASS_0_EXT',
'GL_4PASS_0_SGIS',
'GL_4PASS_1_EXT',
'GL_4PASS_1_SGIS',
'GL_4PASS_2_EXT',
'GL_4PASS_2_SGIS',
'GL_4PASS_3_EXT',
'GL_4PASS_3_SGIS',
'GL_4X_BIT_ATI',
'GL_4_BYTES',
'GL_8X_BIT_ATI',
'GL_ABGR_EXT',
'GL_ACCUM',
'GL_ACCUM_ALPHA_BITS',
'GL_ACCUM_BLUE_BITS',
'GL_ACCUM_BUFFER_BIT',
'GL_ACCUM_CLEAR_VALUE',
'GL_ACCUM_GREEN_BITS',
'GL_ACCUM_RED_BITS',
'GL_ACTIVE_ATTRIBUTES',
'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',
'GL_ACTIVE_STENCIL_FACE_EXT',
'GL_ACTIVE_TEXTURE',
'GL_ACTIVE_TEXTURE_ARB',
'GL_ACTIVE_UNIFORMS',
'GL_ACTIVE_UNIFORM_MAX_LENGTH',
'GL_ACTIVE_VERTEX_UNITS_ARB',
'GL_ADD',
'GL_ADD_ATI',
'GL_ADD_SIGNED',
'GL_ADD_SIGNED_ARB',
'GL_ADD_SIGNED_EXT',
'GL_ALIASED_LINE_WIDTH_RANGE',
'GL_ALIASED_POINT_SIZE_RANGE',
'GL_ALLOW_DRAW_FRG_HINT_PGI',
'GL_ALLOW_DRAW_MEM_HINT_PGI',
'GL_ALLOW_DRAW_OBJ_HINT_PGI',
'GL_ALLOW_DRAW_WIN_HINT_PGI',
'GL_ALL_ATTRIB_BITS',
'GL_ALL_COMPLETED_NV',
'GL_ALPHA',
'GL_ALPHA12',
'GL_ALPHA12_EXT',
'GL_ALPHA16',
'GL_ALPHA16F_ARB',
'GL_ALPHA16_EXT',
'GL_ALPHA32F_ARB',
'GL_ALPHA4',
'GL_ALPHA4_EXT',
'GL_ALPHA8',
'GL_ALPHA8_EXT',
'GL_ALPHA_BIAS',
'GL_ALPHA_BITS',
'GL_ALPHA_FLOAT16_ATI',
'GL_ALPHA_FLOAT32_ATI',
'GL_ALPHA_MAX_CLAMP_INGR',
'GL_ALPHA_MAX_SGIX',
'GL_ALPHA_MIN_CLAMP_INGR',
'GL_ALPHA_MIN_SGIX',
'GL_ALPHA_SCALE',
'GL_ALPHA_TEST',
'GL_ALPHA_TEST_FUNC',
'GL_ALPHA_TEST_REF',
'GL_ALWAYS',
'GL_ALWAYS_FAST_HINT_PGI',
'GL_ALWAYS_SOFT_HINT_PGI',
'GL_AMBIENT',
'GL_AMBIENT_AND_DIFFUSE',
'GL_AND',
'GL_AND_INVERTED',
'GL_AND_REVERSE',
'GL_ARRAY_BUFFER',
'GL_ARRAY_BUFFER_ARB',
'GL_ARRAY_BUFFER_BINDING',
'GL_ARRAY_BUFFER_BINDING_ARB',
'GL_ARRAY_ELEMENT_LOCK_COUNT_EXT',
'GL_ARRAY_ELEMENT_LOCK_FIRST_EXT',
'GL_ARRAY_OBJECT_BUFFER_ATI',
'GL_ARRAY_OBJECT_OFFSET_ATI',
'GL_ASYNC_DRAW_PIXELS_SGIX',
'GL_ASYNC_HISTOGRAM_SGIX',
'GL_ASYNC_MARKER_SGIX',
'GL_ASYNC_READ_PIXELS_SGIX',
'GL_ASYNC_TEX_IMAGE_SGIX',
'GL_ATTACHED_SHADERS',
'GL_ATTENUATION_EXT',
'GL_ATTRIB_ARRAY_POINTER_NV',
'GL_ATTRIB_ARRAY_SIZE_NV',
'GL_ATTRIB_ARRAY_STRIDE_NV',
'GL_ATTRIB_ARRAY_TYPE_NV',
'GL_ATTRIB_STACK_DEPTH',
'GL_AUTO_NORMAL',
'GL_AUX0',
'GL_AUX1',
'GL_AUX2',
'GL_AUX3',
'GL_AUX_BUFFERS',
'GL_AVERAGE_EXT',
'GL_AVERAGE_HP',
'GL_BACK',
'GL_BACK_LEFT',
'GL_BACK_NORMALS_HINT_PGI',
'GL_BACK_RIGHT',
'GL_BGR',
'GL_BGRA',
'GL_BGRA_EXT',
'GL_BGR_EXT',
'GL_BIAS_BIT_ATI',
'GL_BIAS_BY_NEGATIVE_ONE_HALF_NV',
'GL_BINORMAL_ARRAY_EXT',
'GL_BINORMAL_ARRAY_POINTER_EXT',
'GL_BINORMAL_ARRAY_STRIDE_EXT',
'GL_BINORMAL_ARRAY_TYPE_EXT',
'GL_BITMAP',
'GL_BITMAP_TOKEN',
'GL_BLEND',
'GL_BLEND_COLOR',
'GL_BLEND_COLOR_EXT',
'GL_BLEND_DST',
'GL_BLEND_DST_ALPHA',
'GL_BLEND_DST_ALPHA_EXT',
'GL_BLEND_DST_RGB',
'GL_BLEND_DST_RGB_EXT',
'GL_BLEND_EQUATION',
'GL_BLEND_EQUATION_ALPHA',
'GL_BLEND_EQUATION_ALPHA_EXT',
'GL_BLEND_EQUATION_EXT',
'GL_BLEND_EQUATION_RGB',
'GL_BLEND_EQUATION_RGB_EXT',
'GL_BLEND_SRC',
'GL_BLEND_SRC_ALPHA',
'GL_BLEND_SRC_ALPHA_EXT',
'GL_BLEND_SRC_RGB',
'GL_BLEND_SRC_RGB_EXT',
'GL_BLUE',
'GL_BLUE_BIAS',
'GL_BLUE_BITS',
'GL_BLUE_BIT_ATI',
'GL_BLUE_MAX_CLAMP_INGR',
'GL_BLUE_MIN_CLAMP_INGR',
'GL_BLUE_SCALE',
'GL_BOOL',
'GL_BOOL_ARB',
'GL_BOOL_VEC2',
'GL_BOOL_VEC2_ARB',
'GL_BOOL_VEC3',
'GL_BOOL_VEC3_ARB',
'GL_BOOL_VEC4',
'GL_BOOL_VEC4_ARB',
'GL_BUFFER_ACCESS',
'GL_BUFFER_ACCESS_ARB',
'GL_BUFFER_MAPPED',
'GL_BUFFER_MAPPED_ARB',
'GL_BUFFER_MAP_POINTER',
'GL_BUFFER_MAP_POINTER_ARB',
'GL_BUFFER_SIZE',
'GL_BUFFER_SIZE_ARB',
'GL_BUFFER_USAGE',
'GL_BUFFER_USAGE_ARB',
'GL_BUMP_ENVMAP_ATI',
'GL_BUMP_NUM_TEX_UNITS_ATI',
'GL_BUMP_ROT_MATRIX_ATI',
'GL_BUMP_ROT_MATRIX_SIZE_ATI',
'GL_BUMP_TARGET_ATI',
'GL_BUMP_TEX_UNITS_ATI',
'GL_BYTE',
'GL_C3F_V3F',
'GL_C4F_N3F_V3F',
'GL_C4UB_V2F',
'GL_C4UB_V3F',
'GL_CALLIGRAPHIC_FRAGMENT_SGIX',
'GL_CCW',
'GL_CLAMP',
'GL_CLAMP_FRAGMENT_COLOR_ARB',
'GL_CLAMP_READ_COLOR_ARB',
'GL_CLAMP_TO_BORDER',
'GL_CLAMP_TO_BORDER_ARB',
'GL_CLAMP_TO_BORDER_SGIS',
'GL_CLAMP_TO_EDGE',
'GL_CLAMP_TO_EDGE_SGIS',
'GL_CLAMP_VERTEX_COLOR_ARB',
'GL_CLEAR',
'GL_CLIENT_ACTIVE_TEXTURE',
'GL_CLIENT_ACTIVE_TEXTURE_ARB',
'GL_CLIENT_ALL_ATTRIB_BITS',
'GL_CLIENT_ATTRIB_STACK_DEPTH',
'GL_CLIENT_PIXEL_STORE_BIT',
'GL_CLIENT_VERTEX_ARRAY_BIT',
'GL_CLIP_FAR_HINT_PGI',
'GL_CLIP_NEAR_HINT_PGI',
'GL_CLIP_PLANE0',
'GL_CLIP_PLANE1',
'GL_CLIP_PLANE2',
'GL_CLIP_PLANE3',
'GL_CLIP_PLANE4',
'GL_CLIP_PLANE5',
'GL_CLIP_VOLUME_CLIPPING_HINT_EXT',
'GL_CMYKA_EXT',
'GL_CMYK_EXT',
'GL_CND0_ATI',
'GL_CND_ATI',
'GL_COEFF',
'GL_COLOR',
'GL_COLOR3_BIT_PGI',
'GL_COLOR4_BIT_PGI',
'GL_COLOR_ALPHA_PAIRING_ATI',
'GL_COLOR_ARRAY',
'GL_COLOR_ARRAY_BUFFER_BINDING',
'GL_COLOR_ARRAY_BUFFER_BINDING_ARB',
'GL_COLOR_ARRAY_COUNT_EXT',
'GL_COLOR_ARRAY_EXT',
'GL_COLOR_ARRAY_LIST_IBM',
'GL_COLOR_ARRAY_LIST_STRIDE_IBM',
'GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_COLOR_ARRAY_POINTER',
'GL_COLOR_ARRAY_POINTER_EXT',
'GL_COLOR_ARRAY_SIZE',
'GL_COLOR_ARRAY_SIZE_EXT',
'GL_COLOR_ARRAY_STRIDE',
'GL_COLOR_ARRAY_STRIDE_EXT',
'GL_COLOR_ARRAY_TYPE',
'GL_COLOR_ARRAY_TYPE_EXT',
'GL_COLOR_ATTACHMENT0_EXT',
'GL_COLOR_ATTACHMENT10_EXT',
'GL_COLOR_ATTACHMENT11_EXT',
'GL_COLOR_ATTACHMENT12_EXT',
'GL_COLOR_ATTACHMENT13_EXT',
'GL_COLOR_ATTACHMENT14_EXT',
'GL_COLOR_ATTACHMENT15_EXT',
'GL_COLOR_ATTACHMENT1_EXT',
'GL_COLOR_ATTACHMENT2_EXT',
'GL_COLOR_ATTACHMENT3_EXT',
'GL_COLOR_ATTACHMENT4_EXT',
'GL_COLOR_ATTACHMENT5_EXT',
'GL_COLOR_ATTACHMENT6_EXT',
'GL_COLOR_ATTACHMENT7_EXT',
'GL_COLOR_ATTACHMENT8_EXT',
'GL_COLOR_ATTACHMENT9_EXT',
'GL_COLOR_BUFFER_BIT',
'GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI',
'GL_COLOR_CLEAR_VALUE',
'GL_COLOR_INDEX',
'GL_COLOR_INDEX12_EXT',
'GL_COLOR_INDEX16_EXT',
'GL_COLOR_INDEX1_EXT',
'GL_COLOR_INDEX2_EXT',
'GL_COLOR_INDEX4_EXT',
'GL_COLOR_INDEX8_EXT',
'GL_COLOR_INDEXES',
'GL_COLOR_LOGIC_OP',
'GL_COLOR_MATERIAL',
'GL_COLOR_MATERIAL_FACE',
'GL_COLOR_MATERIAL_PARAMETER',
'GL_COLOR_MATRIX',
'GL_COLOR_MATRIX_SGI',
'GL_COLOR_MATRIX_STACK_DEPTH',
'GL_COLOR_MATRIX_STACK_DEPTH_SGI',
'GL_COLOR_SUM',
'GL_COLOR_SUM_ARB',
'GL_COLOR_SUM_CLAMP_NV',
'GL_COLOR_SUM_EXT',
'GL_COLOR_TABLE',
'GL_COLOR_TABLE_ALPHA_SIZE',
'GL_COLOR_TABLE_ALPHA_SIZE_SGI',
'GL_COLOR_TABLE_BIAS',
'GL_COLOR_TABLE_BIAS_SGI',
'GL_COLOR_TABLE_BLUE_SIZE',
'GL_COLOR_TABLE_BLUE_SIZE_SGI',
'GL_COLOR_TABLE_FORMAT',
'GL_COLOR_TABLE_FORMAT_SGI',
'GL_COLOR_TABLE_GREEN_SIZE',
'GL_COLOR_TABLE_GREEN_SIZE_SGI',
'GL_COLOR_TABLE_INTENSITY_SIZE',
'GL_COLOR_TABLE_INTENSITY_SIZE_SGI',
'GL_COLOR_TABLE_LUMINANCE_SIZE',
'GL_COLOR_TABLE_LUMINANCE_SIZE_SGI',
'GL_COLOR_TABLE_RED_SIZE',
'GL_COLOR_TABLE_RED_SIZE_SGI',
'GL_COLOR_TABLE_SCALE',
'GL_COLOR_TABLE_SCALE_SGI',
'GL_COLOR_TABLE_SGI',
'GL_COLOR_TABLE_WIDTH',
'GL_COLOR_TABLE_WIDTH_SGI',
'GL_COLOR_WRITEMASK',
'GL_COMBINE',
'GL_COMBINE4_NV',
'GL_COMBINER0_NV',
'GL_COMBINER1_NV',
'GL_COMBINER2_NV',
'GL_COMBINER3_NV',
'GL_COMBINER4_NV',
'GL_COMBINER5_NV',
'GL_COMBINER6_NV',
'GL_COMBINER7_NV',
'GL_COMBINER_AB_DOT_PRODUCT_NV',
'GL_COMBINER_AB_OUTPUT_NV',
'GL_COMBINER_BIAS_NV',
'GL_COMBINER_CD_DOT_PRODUCT_NV',
'GL_COMBINER_CD_OUTPUT_NV',
'GL_COMBINER_COMPONENT_USAGE_NV',
'GL_COMBINER_INPUT_NV',
'GL_COMBINER_MAPPING_NV',
'GL_COMBINER_MUX_SUM_NV',
'GL_COMBINER_SCALE_NV',
'GL_COMBINER_SUM_OUTPUT_NV',
'GL_COMBINE_ALPHA',
'GL_COMBINE_ALPHA_ARB',
'GL_COMBINE_ALPHA_EXT',
'GL_COMBINE_ARB',
'GL_COMBINE_EXT',
'GL_COMBINE_RGB',
'GL_COMBINE_RGB_ARB',
'GL_COMBINE_RGB_EXT',
'GL_COMPARE_R_TO_TEXTURE',
'GL_COMPARE_R_TO_TEXTURE_ARB',
'GL_COMPILE',
'GL_COMPILE_AND_EXECUTE',
'GL_COMPILE_STATUS',
'GL_COMPRESSED_ALPHA',
'GL_COMPRESSED_ALPHA_ARB',
'GL_COMPRESSED_INTENSITY',
'GL_COMPRESSED_INTENSITY_ARB',
'GL_COMPRESSED_LUMINANCE',
'GL_COMPRESSED_LUMINANCE_ALPHA',
'GL_COMPRESSED_LUMINANCE_ALPHA_ARB',
'GL_COMPRESSED_LUMINANCE_ARB',
'GL_COMPRESSED_RGB',
'GL_COMPRESSED_RGBA',
'GL_COMPRESSED_RGBA_ARB',
'GL_COMPRESSED_RGBA_FXT1_3DFX',
'GL_COMPRESSED_RGBA_S3TC_DXT1_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT3_EXT',
'GL_COMPRESSED_RGBA_S3TC_DXT5_EXT',
'GL_COMPRESSED_RGB_ARB',
'GL_COMPRESSED_RGB_FXT1_3DFX',
'GL_COMPRESSED_RGB_S3TC_DXT1_EXT',
'GL_COMPRESSED_TEXTURE_FORMATS',
'GL_COMPRESSED_TEXTURE_FORMATS_ARB',
'GL_COMP_BIT_ATI',
'GL_CONSERVE_MEMORY_HINT_PGI',
'GL_CONSTANT',
'GL_CONSTANT_ALPHA',
'GL_CONSTANT_ALPHA_EXT',
'GL_CONSTANT_ARB',
'GL_CONSTANT_ATTENUATION',
'GL_CONSTANT_BORDER',
'GL_CONSTANT_BORDER_HP',
'GL_CONSTANT_COLOR',
'GL_CONSTANT_COLOR0_NV',
'GL_CONSTANT_COLOR1_NV',
'GL_CONSTANT_COLOR_EXT',
'GL_CONSTANT_EXT',
'GL_CONST_EYE_NV',
'GL_CONVOLUTION_1D',
'GL_CONVOLUTION_1D_EXT',
'GL_CONVOLUTION_2D',
'GL_CONVOLUTION_2D_EXT',
'GL_CONVOLUTION_BORDER_COLOR',
'GL_CONVOLUTION_BORDER_COLOR_HP',
'GL_CONVOLUTION_BORDER_MODE',
'GL_CONVOLUTION_BORDER_MODE_EXT',
'GL_CONVOLUTION_FILTER_BIAS',
'GL_CONVOLUTION_FILTER_BIAS_EXT',
'GL_CONVOLUTION_FILTER_SCALE',
'GL_CONVOLUTION_FILTER_SCALE_EXT',
'GL_CONVOLUTION_FORMAT',
'GL_CONVOLUTION_FORMAT_EXT',
'GL_CONVOLUTION_HEIGHT',
'GL_CONVOLUTION_HEIGHT_EXT',
'GL_CONVOLUTION_HINT_SGIX',
'GL_CONVOLUTION_WIDTH',
'GL_CONVOLUTION_WIDTH_EXT',
'GL_CON_0_ATI',
'GL_CON_10_ATI',
'GL_CON_11_ATI',
'GL_CON_12_ATI',
'GL_CON_13_ATI',
'GL_CON_14_ATI',
'GL_CON_15_ATI',
'GL_CON_16_ATI',
'GL_CON_17_ATI',
'GL_CON_18_ATI',
'GL_CON_19_ATI',
'GL_CON_1_ATI',
'GL_CON_20_ATI',
'GL_CON_21_ATI',
'GL_CON_22_ATI',
'GL_CON_23_ATI',
'GL_CON_24_ATI',
'GL_CON_25_ATI',
'GL_CON_26_ATI',
'GL_CON_27_ATI',
'GL_CON_28_ATI',
'GL_CON_29_ATI',
'GL_CON_2_ATI',
'GL_CON_30_ATI',
'GL_CON_31_ATI',
'GL_CON_3_ATI',
'GL_CON_4_ATI',
'GL_CON_5_ATI',
'GL_CON_6_ATI',
'GL_CON_7_ATI',
'GL_CON_8_ATI',
'GL_CON_9_ATI',
'GL_COORD_REPLACE',
'GL_COORD_REPLACE_ARB',
'GL_COORD_REPLACE_NV',
'GL_COPY',
'GL_COPY_INVERTED',
'GL_COPY_PIXEL_TOKEN',
'GL_CUBIC_EXT',
'GL_CUBIC_HP',
'GL_CULL_FACE',
'GL_CULL_FACE_MODE',
'GL_CULL_FRAGMENT_NV',
'GL_CULL_MODES_NV',
'GL_CULL_VERTEX_EXT',
'GL_CULL_VERTEX_EYE_POSITION_EXT',
'GL_CULL_VERTEX_IBM',
'GL_CULL_VERTEX_OBJECT_POSITION_EXT',
'GL_CURRENT_ATTRIB_NV',
'GL_CURRENT_BINORMAL_EXT',
'GL_CURRENT_BIT',
'GL_CURRENT_COLOR',
'GL_CURRENT_FOG_COORD',
'GL_CURRENT_FOG_COORDINATE',
'GL_CURRENT_FOG_COORDINATE_EXT',
'GL_CURRENT_INDEX',
'GL_CURRENT_MATRIX_ARB',
'GL_CURRENT_MATRIX_INDEX_ARB',
'GL_CURRENT_MATRIX_NV',
'GL_CURRENT_MATRIX_STACK_DEPTH_ARB',
'GL_CURRENT_MATRIX_STACK_DEPTH_NV',
'GL_CURRENT_NORMAL',
'GL_CURRENT_OCCLUSION_QUERY_ID_NV',
'GL_CURRENT_PALETTE_MATRIX_ARB',
'GL_CURRENT_PROGRAM',
'GL_CURRENT_QUERY',
'GL_CURRENT_QUERY_ARB',
'GL_CURRENT_RASTER_COLOR',
'GL_CURRENT_RASTER_DISTANCE',
'GL_CURRENT_RASTER_INDEX',
'GL_CURRENT_RASTER_NORMAL_SGIX',
'GL_CURRENT_RASTER_POSITION',
'GL_CURRENT_RASTER_POSITION_VALID',
'GL_CURRENT_RASTER_TEXTURE_COORDS',
'GL_CURRENT_SECONDARY_COLOR',
'GL_CURRENT_SECONDARY_COLOR_EXT',
'GL_CURRENT_TANGENT_EXT',
'GL_CURRENT_TEXTURE_COORDS',
'GL_CURRENT_VERTEX_ATTRIB',
'GL_CURRENT_VERTEX_ATTRIB_ARB',
'GL_CURRENT_VERTEX_EXT',
'GL_CURRENT_VERTEX_WEIGHT_EXT',
'GL_CURRENT_WEIGHT_ARB',
'GL_CW',
'GL_DECAL',
'GL_DECR',
'GL_DECR_WRAP',
'GL_DECR_WRAP_EXT',
'GL_DEFORMATIONS_MASK_SGIX',
'GL_DELETE_STATUS',
'GL_DEPENDENT_AR_TEXTURE_2D_NV',
'GL_DEPENDENT_GB_TEXTURE_2D_NV',
'GL_DEPENDENT_HILO_TEXTURE_2D_NV',
'GL_DEPENDENT_RGB_TEXTURE_3D_NV',
'GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV',
'GL_DEPTH',
'GL_DEPTH_ATTACHMENT_EXT',
'GL_DEPTH_BIAS',
'GL_DEPTH_BITS',
'GL_DEPTH_BOUNDS_EXT',
'GL_DEPTH_BOUNDS_TEST_EXT',
'GL_DEPTH_BUFFER_BIT',
'GL_DEPTH_CLAMP_NV',
'GL_DEPTH_CLEAR_VALUE',
'GL_DEPTH_COMPONENT',
'GL_DEPTH_COMPONENT16',
'GL_DEPTH_COMPONENT16_ARB',
'GL_DEPTH_COMPONENT16_SGIX',
'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT24_ARB',
'GL_DEPTH_COMPONENT24_SGIX',
'GL_DEPTH_COMPONENT32',
'GL_DEPTH_COMPONENT32_ARB',
'GL_DEPTH_COMPONENT32_SGIX',
'GL_DEPTH_FUNC',
'GL_DEPTH_PASS_INSTRUMENT_COUNTERS_SGIX',
'GL_DEPTH_PASS_INSTRUMENT_MAX_SGIX',
'GL_DEPTH_PASS_INSTRUMENT_SGIX',
'GL_DEPTH_RANGE',
'GL_DEPTH_SCALE',
'GL_DEPTH_STENCIL_NV',
'GL_DEPTH_STENCIL_TO_BGRA_NV',
'GL_DEPTH_STENCIL_TO_RGBA_NV',
'GL_DEPTH_TEST',
'GL_DEPTH_TEXTURE_MODE',
'GL_DEPTH_TEXTURE_MODE_ARB',
'GL_DEPTH_WRITEMASK',
'GL_DETAIL_TEXTURE_2D_BINDING_SGIS',
'GL_DETAIL_TEXTURE_2D_SGIS',
'GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS',
'GL_DETAIL_TEXTURE_LEVEL_SGIS',
'GL_DETAIL_TEXTURE_MODE_SGIS',
'GL_DIFFUSE',
'GL_DISCARD_ATI',
'GL_DISCARD_NV',
'GL_DISTANCE_ATTENUATION_EXT',
'GL_DISTANCE_ATTENUATION_SGIS',
'GL_DITHER',
'GL_DOMAIN',
'GL_DONT_CARE',
'GL_DOT2_ADD_ATI',
'GL_DOT3_ATI',
'GL_DOT3_RGB',
'GL_DOT3_RGBA',
'GL_DOT3_RGBA_ARB',
'GL_DOT3_RGBA_EXT',
'GL_DOT3_RGB_ARB',
'GL_DOT3_RGB_EXT',
'GL_DOT4_ATI',
'GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV',
'GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV',
'GL_DOT_PRODUCT_DEPTH_REPLACE_NV',
'GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV',
'GL_DOT_PRODUCT_NV',
'GL_DOT_PRODUCT_PASS_THROUGH_NV',
'GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV',
'GL_DOT_PRODUCT_TEXTURE_1D_NV',
'GL_DOT_PRODUCT_TEXTURE_2D_NV',
'GL_DOT_PRODUCT_TEXTURE_3D_NV',
'GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV',
'GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV',
'GL_DOUBLE',
'GL_DOUBLEBUFFER',
'GL_DOUBLE_EXT',
'GL_DRAW_BUFFER',
'GL_DRAW_BUFFER0',
'GL_DRAW_BUFFER0_ARB',
'GL_DRAW_BUFFER0_ATI',
'GL_DRAW_BUFFER1',
'GL_DRAW_BUFFER10',
'GL_DRAW_BUFFER10_ARB',
'GL_DRAW_BUFFER10_ATI',
'GL_DRAW_BUFFER11',
'GL_DRAW_BUFFER11_ARB',
'GL_DRAW_BUFFER11_ATI',
'GL_DRAW_BUFFER12',
'GL_DRAW_BUFFER12_ARB',
'GL_DRAW_BUFFER12_ATI',
'GL_DRAW_BUFFER13',
'GL_DRAW_BUFFER13_ARB',
'GL_DRAW_BUFFER13_ATI',
'GL_DRAW_BUFFER14',
'GL_DRAW_BUFFER14_ARB',
'GL_DRAW_BUFFER14_ATI',
'GL_DRAW_BUFFER15',
'GL_DRAW_BUFFER15_ARB',
'GL_DRAW_BUFFER15_ATI',
'GL_DRAW_BUFFER1_ARB',
'GL_DRAW_BUFFER1_ATI',
'GL_DRAW_BUFFER2',
'GL_DRAW_BUFFER2_ARB',
'GL_DRAW_BUFFER2_ATI',
'GL_DRAW_BUFFER3',
'GL_DRAW_BUFFER3_ARB',
'GL_DRAW_BUFFER3_ATI',
'GL_DRAW_BUFFER4',
'GL_DRAW_BUFFER4_ARB',
'GL_DRAW_BUFFER4_ATI',
'GL_DRAW_BUFFER5',
'GL_DRAW_BUFFER5_ARB',
'GL_DRAW_BUFFER5_ATI',
'GL_DRAW_BUFFER6',
'GL_DRAW_BUFFER6_ARB',
'GL_DRAW_BUFFER6_ATI',
'GL_DRAW_BUFFER7',
'GL_DRAW_BUFFER7_ARB',
'GL_DRAW_BUFFER7_ATI',
'GL_DRAW_BUFFER8',
'GL_DRAW_BUFFER8_ARB',
'GL_DRAW_BUFFER8_ATI',
'GL_DRAW_BUFFER9',
'GL_DRAW_BUFFER9_ARB',
'GL_DRAW_BUFFER9_ATI',
'GL_DRAW_PIXELS_APPLE',
'GL_DRAW_PIXEL_TOKEN',
'GL_DSDT8_MAG8_INTENSITY8_NV',
'GL_DSDT8_MAG8_NV',
'GL_DSDT8_NV',
'GL_DSDT_MAG_INTENSITY_NV',
'GL_DSDT_MAG_NV',
'GL_DSDT_MAG_VIB_NV',
'GL_DSDT_NV',
'GL_DST_ALPHA',
'GL_DST_COLOR',
'GL_DS_BIAS_NV',
'GL_DS_SCALE_NV',
'GL_DT_BIAS_NV',
'GL_DT_SCALE_NV',
'GL_DU8DV8_ATI',
'GL_DUAL_ALPHA12_SGIS',
'GL_DUAL_ALPHA16_SGIS',
'GL_DUAL_ALPHA4_SGIS',
'GL_DUAL_ALPHA8_SGIS',
'GL_DUAL_INTENSITY12_SGIS',
'GL_DUAL_INTENSITY16_SGIS',
'GL_DUAL_INTENSITY4_SGIS',
'GL_DUAL_INTENSITY8_SGIS',
'GL_DUAL_LUMINANCE12_SGIS',
'GL_DUAL_LUMINANCE16_SGIS',
'GL_DUAL_LUMINANCE4_SGIS',
'GL_DUAL_LUMINANCE8_SGIS',
'GL_DUAL_LUMINANCE_ALPHA4_SGIS',
'GL_DUAL_LUMINANCE_ALPHA8_SGIS',
'GL_DUAL_TEXTURE_SELECT_SGIS',
'GL_DUDV_ATI',
'GL_DYNAMIC_ATI',
'GL_DYNAMIC_COPY',
'GL_DYNAMIC_COPY_ARB',
'GL_DYNAMIC_DRAW',
'GL_DYNAMIC_DRAW_ARB',
'GL_DYNAMIC_READ',
'GL_DYNAMIC_READ_ARB',
'GL_EDGEFLAG_BIT_PGI',
'GL_EDGE_FLAG',
'GL_EDGE_FLAG_ARRAY',
'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING',
'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB',
'GL_EDGE_FLAG_ARRAY_COUNT_EXT',
'GL_EDGE_FLAG_ARRAY_EXT',
'GL_EDGE_FLAG_ARRAY_LIST_IBM',
'GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM',
'GL_EDGE_FLAG_ARRAY_POINTER',
'GL_EDGE_FLAG_ARRAY_POINTER_EXT',
'GL_EDGE_FLAG_ARRAY_STRIDE',
'GL_EDGE_FLAG_ARRAY_STRIDE_EXT',
'GL_EIGHTH_BIT_ATI',
'GL_ELEMENT_ARRAY_APPLE',
'GL_ELEMENT_ARRAY_ATI',
'GL_ELEMENT_ARRAY_BUFFER',
'GL_ELEMENT_ARRAY_BUFFER_ARB',
'GL_ELEMENT_ARRAY_BUFFER_BINDING',
'GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB',
'GL_ELEMENT_ARRAY_POINTER_APPLE',
'GL_ELEMENT_ARRAY_POINTER_ATI',
'GL_ELEMENT_ARRAY_TYPE_APPLE',
'GL_ELEMENT_ARRAY_TYPE_ATI',
'GL_EMBOSS_CONSTANT_NV',
'GL_EMBOSS_LIGHT_NV',
'GL_EMBOSS_MAP_NV',
'GL_EMISSION',
'GL_ENABLE_BIT',
'GL_EQUAL',
'GL_EQUIV',
'GL_EVAL_2D_NV',
'GL_EVAL_BIT',
'GL_EVAL_FRACTIONAL_TESSELLATION_NV',
'GL_EVAL_TRIANGULAR_2D_NV',
'GL_EVAL_VERTEX_ATTRIB0_NV',
'GL_EVAL_VERTEX_ATTRIB10_NV',
'GL_EVAL_VERTEX_ATTRIB11_NV',
'GL_EVAL_VERTEX_ATTRIB12_NV',
'GL_EVAL_VERTEX_ATTRIB13_NV',
'GL_EVAL_VERTEX_ATTRIB14_NV',
'GL_EVAL_VERTEX_ATTRIB15_NV',
'GL_EVAL_VERTEX_ATTRIB1_NV',
'GL_EVAL_VERTEX_ATTRIB2_NV',
'GL_EVAL_VERTEX_ATTRIB3_NV',
'GL_EVAL_VERTEX_ATTRIB4_NV',
'GL_EVAL_VERTEX_ATTRIB5_NV',
'GL_EVAL_VERTEX_ATTRIB6_NV',
'GL_EVAL_VERTEX_ATTRIB7_NV',
'GL_EVAL_VERTEX_ATTRIB8_NV',
'GL_EVAL_VERTEX_ATTRIB9_NV',
'GL_EXP',
'GL_EXP2',
'GL_EXPAND_NEGATE_NV',
'GL_EXPAND_NORMAL_NV',
'GL_EXTENSIONS',
'GL_EYE_DISTANCE_TO_LINE_SGIS',
'GL_EYE_DISTANCE_TO_POINT_SGIS',
'GL_EYE_LINEAR',
'GL_EYE_LINE_SGIS',
'GL_EYE_PLANE',
'GL_EYE_PLANE_ABSOLUTE_NV',
'GL_EYE_POINT_SGIS',
'GL_EYE_RADIAL_NV',
'GL_E_TIMES_F_NV',
'GL_FALSE',
'GL_FASTEST',
'GL_FEEDBACK',
'GL_FEEDBACK_BUFFER_POINTER',
'GL_FEEDBACK_BUFFER_SIZE',
'GL_FEEDBACK_BUFFER_TYPE',
'GL_FENCE_APPLE',
'GL_FENCE_CONDITION_NV',
'GL_FENCE_STATUS_NV',
'GL_FILL',
'GL_FILTER4_SGIS',
'GL_FIXED_ONLY_ARB',
'GL_FLAT',
'GL_FLOAT',
'GL_FLOAT_CLEAR_COLOR_VALUE_NV',
'GL_FLOAT_MAT2',
'GL_FLOAT_MAT2_ARB',
'GL_FLOAT_MAT3',
'GL_FLOAT_MAT3_ARB',
'GL_FLOAT_MAT4',
'GL_FLOAT_MAT4_ARB',
'GL_FLOAT_R16_NV',
'GL_FLOAT_R32_NV',
'GL_FLOAT_RG16_NV',
'GL_FLOAT_RG32_NV',
'GL_FLOAT_RGB16_NV',
'GL_FLOAT_RGB32_NV',
'GL_FLOAT_RGBA16_NV',
'GL_FLOAT_RGBA32_NV',
'GL_FLOAT_RGBA_MODE_NV',
'GL_FLOAT_RGBA_NV',
'GL_FLOAT_RGB_NV',
'GL_FLOAT_RG_NV',
'GL_FLOAT_R_NV',
'GL_FLOAT_VEC2',
'GL_FLOAT_VEC2_ARB',
'GL_FLOAT_VEC3',
'GL_FLOAT_VEC3_ARB',
'GL_FLOAT_VEC4',
'GL_FLOAT_VEC4_ARB',
'GL_FOG',
'GL_FOG_BIT',
'GL_FOG_COLOR',
'GL_FOG_COORD',
'GL_FOG_COORDINATE',
'GL_FOG_COORDINATE_ARRAY',
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING',
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB',
'GL_FOG_COORDINATE_ARRAY_EXT',
'GL_FOG_COORDINATE_ARRAY_LIST_IBM',
'GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM',
'GL_FOG_COORDINATE_ARRAY_POINTER',
'GL_FOG_COORDINATE_ARRAY_POINTER_EXT',
'GL_FOG_COORDINATE_ARRAY_STRIDE',
'GL_FOG_COORDINATE_ARRAY_STRIDE_EXT',
'GL_FOG_COORDINATE_ARRAY_TYPE',
'GL_FOG_COORDINATE_ARRAY_TYPE_EXT',
'GL_FOG_COORDINATE_EXT',
'GL_FOG_COORDINATE_SOURCE',
'GL_FOG_COORDINATE_SOURCE_EXT',
'GL_FOG_COORD_ARRAY',
'GL_FOG_COORD_ARRAY_BUFFER_BINDING',
'GL_FOG_COORD_ARRAY_POINTER',
'GL_FOG_COORD_ARRAY_STRIDE',
'GL_FOG_COORD_ARRAY_TYPE',
'GL_FOG_COORD_SRC',
'GL_FOG_DENSITY',
'GL_FOG_DISTANCE_MODE_NV',
'GL_FOG_END',
'GL_FOG_FUNC_POINTS_SGIS',
'GL_FOG_FUNC_SGIS',
'GL_FOG_HINT',
'GL_FOG_INDEX',
'GL_FOG_MODE',
'GL_FOG_OFFSET_SGIX',
'GL_FOG_OFFSET_VALUE_SGIX',
'GL_FOG_SCALE_SGIX',
'GL_FOG_SCALE_VALUE_SGIX',
'GL_FOG_SPECULAR_TEXTURE_WIN',
'GL_FOG_START',
'GL_FORCE_BLUE_TO_ONE_NV',
'GL_FORMAT_SUBSAMPLE_244_244_OML',
'GL_FORMAT_SUBSAMPLE_24_24_OML',
'GL_FRAGMENT_COLOR_EXT',
'GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX',
'GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX',
'GL_FRAGMENT_COLOR_MATERIAL_SGIX',
'GL_FRAGMENT_DEPTH',
'GL_FRAGMENT_DEPTH_EXT',
'GL_FRAGMENT_LIGHT0_SGIX',
'GL_FRAGMENT_LIGHT1_SGIX',
'GL_FRAGMENT_LIGHT2_SGIX',
'GL_FRAGMENT_LIGHT3_SGIX',
'GL_FRAGMENT_LIGHT4_SGIX',
'GL_FRAGMENT_LIGHT5_SGIX',
'GL_FRAGMENT_LIGHT6_SGIX',
'GL_FRAGMENT_LIGHT7_SGIX',
'GL_FRAGMENT_LIGHTING_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX',
'GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX',
'GL_FRAGMENT_MATERIAL_EXT',
'GL_FRAGMENT_NORMAL_EXT',
'GL_FRAGMENT_PROGRAM_ARB',
'GL_FRAGMENT_PROGRAM_BINDING_NV',
'GL_FRAGMENT_PROGRAM_NV',
'GL_FRAGMENT_SHADER',
'GL_FRAGMENT_SHADER_ARB',
'GL_FRAGMENT_SHADER_ATI',
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT',
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT',
'GL_FRAMEBUFFER_BINDING_EXT',
'GL_FRAMEBUFFER_COMPLETE_EXT',
'GL_FRAMEBUFFER_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_DUPLICATE_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT',
'GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT',
'GL_FRAMEBUFFER_UNSUPPORTED_EXT',
'GL_FRAMEZOOM_FACTOR_SGIX',
'GL_FRAMEZOOM_SGIX',
'GL_FRONT',
'GL_FRONT_AND_BACK',
'GL_FRONT_FACE',
'GL_FRONT_LEFT',
'GL_FRONT_RIGHT',
'GL_FULL_RANGE_EXT',
'GL_FULL_STIPPLE_HINT_PGI',
'GL_FUNC_ADD',
'GL_FUNC_ADD_EXT',
'GL_FUNC_REVERSE_SUBTRACT',
'GL_FUNC_REVERSE_SUBTRACT_EXT',
'GL_FUNC_SUBTRACT',
'GL_FUNC_SUBTRACT_EXT',
'GL_GENERATE_MIPMAP',
'GL_GENERATE_MIPMAP_HINT',
'GL_GENERATE_MIPMAP_HINT_SGIS',
'GL_GENERATE_MIPMAP_SGIS',
'GL_GEOMETRY_DEFORMATION_BIT_SGIX',
'GL_GEOMETRY_DEFORMATION_SGIX',
'GL_GEQUAL',
'GL_GLEXT_VERSION',
'GL_GLOBAL_ALPHA_FACTOR_SUN',
'GL_GLOBAL_ALPHA_SUN',
'GL_GREATER',
'GL_GREEN',
'GL_GREEN_BIAS',
'GL_GREEN_BITS',
'GL_GREEN_BIT_ATI',
'GL_GREEN_MAX_CLAMP_INGR',
'GL_GREEN_MIN_CLAMP_INGR',
'GL_GREEN_SCALE',
'GL_HALF_BIAS_NEGATE_NV',
'GL_HALF_BIAS_NORMAL_NV',
'GL_HALF_BIT_ATI',
'GL_HALF_FLOAT_ARB',
'GL_HALF_FLOAT_NV',
'GL_HILO16_NV',
'GL_HILO8_NV',
'GL_HILO_NV',
'GL_HINT_BIT',
'GL_HISTOGRAM',
'GL_HISTOGRAM_ALPHA_SIZE',
'GL_HISTOGRAM_ALPHA_SIZE_EXT',
'GL_HISTOGRAM_BLUE_SIZE',
'GL_HISTOGRAM_BLUE_SIZE_EXT',
'GL_HISTOGRAM_EXT',
'GL_HISTOGRAM_FORMAT',
'GL_HISTOGRAM_FORMAT_EXT',
'GL_HISTOGRAM_GREEN_SIZE',
'GL_HISTOGRAM_GREEN_SIZE_EXT',
'GL_HISTOGRAM_LUMINANCE_SIZE',
'GL_HISTOGRAM_LUMINANCE_SIZE_EXT',
'GL_HISTOGRAM_RED_SIZE',
'GL_HISTOGRAM_RED_SIZE_EXT',
'GL_HISTOGRAM_SINK',
'GL_HISTOGRAM_SINK_EXT',
'GL_HISTOGRAM_WIDTH',
'GL_HISTOGRAM_WIDTH_EXT',
'GL_HI_BIAS_NV',
'GL_HI_SCALE_NV',
'GL_IDENTITY_NV',
'GL_IGNORE_BORDER_HP',
'GL_IMAGE_CUBIC_WEIGHT_HP',
'GL_IMAGE_MAG_FILTER_HP',
'GL_IMAGE_MIN_FILTER_HP',
'GL_IMAGE_ROTATE_ANGLE_HP',
'GL_IMAGE_ROTATE_ORIGIN_X_HP',
'GL_IMAGE_ROTATE_ORIGIN_Y_HP',
'GL_IMAGE_SCALE_X_HP',
'GL_IMAGE_SCALE_Y_HP',
'GL_IMAGE_TRANSFORM_2D_HP',
'GL_IMAGE_TRANSLATE_X_HP',
'GL_IMAGE_TRANSLATE_Y_HP',
'GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES',
'GL_IMPLEMENTATION_COLOR_READ_TYPE_OES',
'GL_INCR',
'GL_INCR_WRAP',
'GL_INCR_WRAP_EXT',
'GL_INDEX_ARRAY',
'GL_INDEX_ARRAY_BUFFER_BINDING',
'GL_INDEX_ARRAY_BUFFER_BINDING_ARB',
'GL_INDEX_ARRAY_COUNT_EXT',
'GL_INDEX_ARRAY_EXT',
'GL_INDEX_ARRAY_LIST_IBM',
'GL_INDEX_ARRAY_LIST_STRIDE_IBM',
'GL_INDEX_ARRAY_POINTER',
'GL_INDEX_ARRAY_POINTER_EXT',
'GL_INDEX_ARRAY_STRIDE',
'GL_INDEX_ARRAY_STRIDE_EXT',
'GL_INDEX_ARRAY_TYPE',
'GL_INDEX_ARRAY_TYPE_EXT',
'GL_INDEX_BITS',
'GL_INDEX_BIT_PGI',
'GL_INDEX_CLEAR_VALUE',
'GL_INDEX_LOGIC_OP',
'GL_INDEX_MATERIAL_EXT',
'GL_INDEX_MATERIAL_FACE_EXT',
'GL_INDEX_MATERIAL_PARAMETER_EXT',
'GL_INDEX_MODE',
'GL_INDEX_OFFSET',
'GL_INDEX_SHIFT',
'GL_INDEX_TEST_EXT',
'GL_INDEX_TEST_FUNC_EXT',
'GL_INDEX_TEST_REF_EXT',
'GL_INDEX_WRITEMASK',
'GL_INFO_LOG_LENGTH',
'GL_INSTRUMENT_BUFFER_POINTER_SGIX',
'GL_INSTRUMENT_MEASUREMENTS_SGIX',
'GL_INT',
'GL_INTENSITY',
'GL_INTENSITY12',
'GL_INTENSITY12_EXT',
'GL_INTENSITY16',
'GL_INTENSITY16F_ARB',
'GL_INTENSITY16_EXT',
'GL_INTENSITY32F_ARB',
'GL_INTENSITY4',
'GL_INTENSITY4_EXT',
'GL_INTENSITY8',
'GL_INTENSITY8_EXT',
'GL_INTENSITY_EXT',
'GL_INTENSITY_FLOAT16_ATI',
'GL_INTENSITY_FLOAT32_ATI',
'GL_INTERLACE_OML',
'GL_INTERLACE_READ_INGR',
'GL_INTERLACE_READ_OML',
'GL_INTERLACE_SGIX',
'GL_INTERPOLATE',
'GL_INTERPOLATE_ARB',
'GL_INTERPOLATE_EXT',
'GL_INT_VEC2',
'GL_INT_VEC2_ARB',
'GL_INT_VEC3',
'GL_INT_VEC3_ARB',
'GL_INT_VEC4',
'GL_INT_VEC4_ARB',
'GL_INVALID_ENUM',
'GL_INVALID_FRAMEBUFFER_OPERATION_EXT',
'GL_INVALID_OPERATION',
'GL_INVALID_VALUE',
'GL_INVARIANT_DATATYPE_EXT',
'GL_INVARIANT_EXT',
'GL_INVARIANT_VALUE_EXT',
'GL_INVERSE_NV',
'GL_INVERSE_TRANSPOSE_NV',
'GL_INVERT',
'GL_INVERTED_SCREEN_W_REND',
'GL_IR_INSTRUMENT1_SGIX',
'GL_IUI_N3F_V2F_EXT',
'GL_IUI_N3F_V3F_EXT',
'GL_IUI_V2F_EXT',
'GL_IUI_V3F_EXT',
'GL_KEEP',
'GL_LEFT',
'GL_LEQUAL',
'GL_LERP_ATI',
'GL_LESS',
'GL_LIGHT0',
'GL_LIGHT1',
'GL_LIGHT2',
'GL_LIGHT3',
'GL_LIGHT4',
'GL_LIGHT5',
'GL_LIGHT6',
'GL_LIGHT7',
'GL_LIGHTING',
'GL_LIGHTING_BIT',
'GL_LIGHT_ENV_MODE_SGIX',
'GL_LIGHT_MODEL_AMBIENT',
'GL_LIGHT_MODEL_COLOR_CONTROL',
'GL_LIGHT_MODEL_COLOR_CONTROL_EXT',
'GL_LIGHT_MODEL_LOCAL_VIEWER',
'GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE',
'GL_LIGHT_MODEL_TWO_SIDE',
'GL_LINE',
'GL_LINEAR',
'GL_LINEAR_ATTENUATION',
'GL_LINEAR_CLIPMAP_LINEAR_SGIX',
'GL_LINEAR_CLIPMAP_NEAREST_SGIX',
'GL_LINEAR_DETAIL_ALPHA_SGIS',
'GL_LINEAR_DETAIL_COLOR_SGIS',
'GL_LINEAR_DETAIL_SGIS',
'GL_LINEAR_MIPMAP_LINEAR',
'GL_LINEAR_MIPMAP_NEAREST',
'GL_LINEAR_SHARPEN_ALPHA_SGIS',
'GL_LINEAR_SHARPEN_COLOR_SGIS',
'GL_LINEAR_SHARPEN_SGIS',
'GL_LINES',
'GL_LINE_BIT',
'GL_LINE_LOOP',
'GL_LINE_RESET_TOKEN',
'GL_LINE_SMOOTH',
'GL_LINE_SMOOTH_HINT',
'GL_LINE_STIPPLE',
'GL_LINE_STIPPLE_PATTERN',
'GL_LINE_STIPPLE_REPEAT',
'GL_LINE_STRIP',
'GL_LINE_TOKEN',
'GL_LINE_WIDTH',
'GL_LINE_WIDTH_GRANULARITY',
'GL_LINE_WIDTH_RANGE',
'GL_LINK_STATUS',
'GL_LIST_BASE',
'GL_LIST_BIT',
'GL_LIST_INDEX',
'GL_LIST_MODE',
'GL_LIST_PRIORITY_SGIX',
'GL_LOAD',
'GL_LOCAL_CONSTANT_DATATYPE_EXT',
'GL_LOCAL_CONSTANT_EXT',
'GL_LOCAL_CONSTANT_VALUE_EXT',
'GL_LOCAL_EXT',
'GL_LOGIC_OP',
'GL_LOGIC_OP_MODE',
'GL_LOWER_LEFT',
'GL_LO_BIAS_NV',
'GL_LO_SCALE_NV',
'GL_LUMINANCE',
'GL_LUMINANCE12',
'GL_LUMINANCE12_ALPHA12',
'GL_LUMINANCE12_ALPHA12_EXT',
'GL_LUMINANCE12_ALPHA4',
'GL_LUMINANCE12_ALPHA4_EXT',
'GL_LUMINANCE12_EXT',
'GL_LUMINANCE16',
'GL_LUMINANCE16F_ARB',
'GL_LUMINANCE16_ALPHA16',
'GL_LUMINANCE16_ALPHA16_EXT',
'GL_LUMINANCE16_EXT',
'GL_LUMINANCE32F_ARB',
'GL_LUMINANCE4',
'GL_LUMINANCE4_ALPHA4',
'GL_LUMINANCE4_ALPHA4_EXT',
'GL_LUMINANCE4_EXT',
'GL_LUMINANCE6_ALPHA2',
'GL_LUMINANCE6_ALPHA2_EXT',
'GL_LUMINANCE8',
'GL_LUMINANCE8_ALPHA8',
'GL_LUMINANCE8_ALPHA8_EXT',
'GL_LUMINANCE8_EXT',
'GL_LUMINANCE_ALPHA',
'GL_LUMINANCE_ALPHA16F_ARB',
'GL_LUMINANCE_ALPHA32F_ARB',
'GL_LUMINANCE_ALPHA_FLOAT16_ATI',
'GL_LUMINANCE_ALPHA_FLOAT32_ATI',
'GL_LUMINANCE_FLOAT16_ATI',
'GL_LUMINANCE_FLOAT32_ATI',
'GL_MAD_ATI',
'GL_MAGNITUDE_BIAS_NV',
'GL_MAGNITUDE_SCALE_NV',
'GL_MAP1_BINORMAL_EXT',
'GL_MAP1_COLOR_4',
'GL_MAP1_GRID_DOMAIN',
'GL_MAP1_GRID_SEGMENTS',
'GL_MAP1_INDEX',
'GL_MAP1_NORMAL',
'GL_MAP1_TANGENT_EXT',
'GL_MAP1_TEXTURE_COORD_1',
'GL_MAP1_TEXTURE_COORD_2',
'GL_MAP1_TEXTURE_COORD_3',
'GL_MAP1_TEXTURE_COORD_4',
'GL_MAP1_VERTEX_3',
'GL_MAP1_VERTEX_4',
'GL_MAP1_VERTEX_ATTRIB0_4_NV',
'GL_MAP1_VERTEX_ATTRIB10_4_NV',
'GL_MAP1_VERTEX_ATTRIB11_4_NV',
'GL_MAP1_VERTEX_ATTRIB12_4_NV',
'GL_MAP1_VERTEX_ATTRIB13_4_NV',
'GL_MAP1_VERTEX_ATTRIB14_4_NV',
'GL_MAP1_VERTEX_ATTRIB15_4_NV',
'GL_MAP1_VERTEX_ATTRIB1_4_NV',
'GL_MAP1_VERTEX_ATTRIB2_4_NV',
'GL_MAP1_VERTEX_ATTRIB3_4_NV',
'GL_MAP1_VERTEX_ATTRIB4_4_NV',
'GL_MAP1_VERTEX_ATTRIB5_4_NV',
'GL_MAP1_VERTEX_ATTRIB6_4_NV',
'GL_MAP1_VERTEX_ATTRIB7_4_NV',
'GL_MAP1_VERTEX_ATTRIB8_4_NV',
'GL_MAP1_VERTEX_ATTRIB9_4_NV',
'GL_MAP2_BINORMAL_EXT',
'GL_MAP2_COLOR_4',
'GL_MAP2_GRID_DOMAIN',
'GL_MAP2_GRID_SEGMENTS',
'GL_MAP2_INDEX',
'GL_MAP2_NORMAL',
'GL_MAP2_TANGENT_EXT',
'GL_MAP2_TEXTURE_COORD_1',
'GL_MAP2_TEXTURE_COORD_2',
'GL_MAP2_TEXTURE_COORD_3',
'GL_MAP2_TEXTURE_COORD_4',
'GL_MAP2_VERTEX_3',
'GL_MAP2_VERTEX_4',
'GL_MAP2_VERTEX_ATTRIB0_4_NV',
'GL_MAP2_VERTEX_ATTRIB10_4_NV',
'GL_MAP2_VERTEX_ATTRIB11_4_NV',
'GL_MAP2_VERTEX_ATTRIB12_4_NV',
'GL_MAP2_VERTEX_ATTRIB13_4_NV',
'GL_MAP2_VERTEX_ATTRIB14_4_NV',
'GL_MAP2_VERTEX_ATTRIB15_4_NV',
'GL_MAP2_VERTEX_ATTRIB1_4_NV',
'GL_MAP2_VERTEX_ATTRIB2_4_NV',
'GL_MAP2_VERTEX_ATTRIB3_4_NV',
'GL_MAP2_VERTEX_ATTRIB4_4_NV',
'GL_MAP2_VERTEX_ATTRIB5_4_NV',
'GL_MAP2_VERTEX_ATTRIB6_4_NV',
'GL_MAP2_VERTEX_ATTRIB7_4_NV',
'GL_MAP2_VERTEX_ATTRIB8_4_NV',
'GL_MAP2_VERTEX_ATTRIB9_4_NV',
'GL_MAP_ATTRIB_U_ORDER_NV',
'GL_MAP_ATTRIB_V_ORDER_NV',
'GL_MAP_COLOR',
'GL_MAP_STENCIL',
'GL_MAP_TESSELLATION_NV',
'GL_MATERIAL_SIDE_HINT_PGI',
'GL_MATRIX0_ARB',
'GL_MATRIX0_NV',
'GL_MATRIX10_ARB',
'GL_MATRIX11_ARB',
'GL_MATRIX12_ARB',
'GL_MATRIX13_ARB',
'GL_MATRIX14_ARB',
'GL_MATRIX15_ARB',
'GL_MATRIX16_ARB',
'GL_MATRIX17_ARB',
'GL_MATRIX18_ARB',
'GL_MATRIX19_ARB',
'GL_MATRIX1_ARB',
'GL_MATRIX1_NV',
'GL_MATRIX20_ARB',
'GL_MATRIX21_ARB',
'GL_MATRIX22_ARB',
'GL_MATRIX23_ARB',
'GL_MATRIX24_ARB',
'GL_MATRIX25_ARB',
'GL_MATRIX26_ARB',
'GL_MATRIX27_ARB',
'GL_MATRIX28_ARB',
'GL_MATRIX29_ARB',
'GL_MATRIX2_ARB',
'GL_MATRIX2_NV',
'GL_MATRIX30_ARB',
'GL_MATRIX31_ARB',
'GL_MATRIX3_ARB',
'GL_MATRIX3_NV',
'GL_MATRIX4_ARB',
'GL_MATRIX4_NV',
'GL_MATRIX5_ARB',
'GL_MATRIX5_NV',
'GL_MATRIX6_ARB',
'GL_MATRIX6_NV',
'GL_MATRIX7_ARB',
'GL_MATRIX7_NV',
'GL_MATRIX8_ARB',
'GL_MATRIX9_ARB',
'GL_MATRIX_EXT',
'GL_MATRIX_INDEX_ARRAY_ARB',
'GL_MATRIX_INDEX_ARRAY_POINTER_ARB',
'GL_MATRIX_INDEX_ARRAY_SIZE_ARB',
'GL_MATRIX_INDEX_ARRAY_STRIDE_ARB',
'GL_MATRIX_INDEX_ARRAY_TYPE_ARB',
'GL_MATRIX_MODE',
'GL_MATRIX_PALETTE_ARB',
'GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI',
'GL_MAT_AMBIENT_BIT_PGI',
'GL_MAT_COLOR_INDEXES_BIT_PGI',
'GL_MAT_DIFFUSE_BIT_PGI',
'GL_MAT_EMISSION_BIT_PGI',
'GL_MAT_SHININESS_BIT_PGI',
'GL_MAT_SPECULAR_BIT_PGI',
'GL_MAX',
'GL_MAX_3D_TEXTURE_SIZE',
'GL_MAX_3D_TEXTURE_SIZE_EXT',
'GL_MAX_4D_TEXTURE_SIZE_SGIS',
'GL_MAX_ACTIVE_LIGHTS_SGIX',
'GL_MAX_ASYNC_DRAW_PIXELS_SGIX',
'GL_MAX_ASYNC_HISTOGRAM_SGIX',
'GL_MAX_ASYNC_READ_PIXELS_SGIX',
'GL_MAX_ASYNC_TEX_IMAGE_SGIX',
'GL_MAX_ATTRIB_STACK_DEPTH',
'GL_MAX_CLIENT_ATTRIB_STACK_DEPTH',
'GL_MAX_CLIPMAP_DEPTH_SGIX',
'GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX',
'GL_MAX_CLIP_PLANES',
'GL_MAX_COLOR_ATTACHMENTS_EXT',
'GL_MAX_COLOR_MATRIX_STACK_DEPTH',
'GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB',
'GL_MAX_CONVOLUTION_HEIGHT',
'GL_MAX_CONVOLUTION_HEIGHT_EXT',
'GL_MAX_CONVOLUTION_WIDTH',
'GL_MAX_CONVOLUTION_WIDTH_EXT',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT',
'GL_MAX_DEFORMATION_ORDER_SGIX',
'GL_MAX_DRAW_BUFFERS',
'GL_MAX_DRAW_BUFFERS_ARB',
'GL_MAX_DRAW_BUFFERS_ATI',
'GL_MAX_ELEMENTS_INDICES',
'GL_MAX_ELEMENTS_INDICES_EXT',
'GL_MAX_ELEMENTS_VERTICES',
'GL_MAX_ELEMENTS_VERTICES_EXT',
'GL_MAX_EVAL_ORDER',
'GL_MAX_EXT',
'GL_MAX_FOG_FUNC_POINTS_SGIS',
'GL_MAX_FRAGMENT_LIGHTS_SGIX',
'GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV',
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS',
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB',
'GL_MAX_FRAMEZOOM_FACTOR_SGIX',
'GL_MAX_GENERAL_COMBINERS_NV',
'GL_MAX_LIGHTS',
'GL_MAX_LIST_NESTING',
'GL_MAX_MAP_TESSELLATION_NV',
'GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB',
'GL_MAX_MODELVIEW_STACK_DEPTH',
'GL_MAX_NAME_STACK_DEPTH',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',
'GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT',
'GL_MAX_PALETTE_MATRICES_ARB',
'GL_MAX_PIXEL_MAP_TABLE',
'GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT',
'GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI',
'GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB',
'GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_ATTRIBS_ARB',
'GL_MAX_PROGRAM_CALL_DEPTH_NV',
'GL_MAX_PROGRAM_ENV_PARAMETERS_ARB',
'GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV',
'GL_MAX_PROGRAM_IF_DEPTH_NV',
'GL_MAX_PROGRAM_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB',
'GL_MAX_PROGRAM_LOOP_COUNT_NV',
'GL_MAX_PROGRAM_LOOP_DEPTH_NV',
'GL_MAX_PROGRAM_MATRICES_ARB',
'GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB',
'GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB',
'GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB',
'GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB',
'GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB',
'GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB',
'GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB',
'GL_MAX_PROGRAM_PARAMETERS_ARB',
'GL_MAX_PROGRAM_TEMPORARIES_ARB',
'GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB',
'GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB',
'GL_MAX_PROJECTION_STACK_DEPTH',
'GL_MAX_RATIONAL_EVAL_ORDER_NV',
'GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB',
'GL_MAX_RECTANGLE_TEXTURE_SIZE_NV',
'GL_MAX_RENDERBUFFER_SIZE_EXT',
'GL_MAX_SHININESS_NV',
'GL_MAX_SPOT_EXPONENT_NV',
'GL_MAX_TEXTURE_COORDS',
'GL_MAX_TEXTURE_COORDS_ARB',
'GL_MAX_TEXTURE_COORDS_NV',
'GL_MAX_TEXTURE_IMAGE_UNITS',
'GL_MAX_TEXTURE_IMAGE_UNITS_ARB',
'GL_MAX_TEXTURE_IMAGE_UNITS_NV',
'GL_MAX_TEXTURE_LOD_BIAS',
'GL_MAX_TEXTURE_LOD_BIAS_EXT',
'GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT',
'GL_MAX_TEXTURE_SIZE',
'GL_MAX_TEXTURE_STACK_DEPTH',
'GL_MAX_TEXTURE_UNITS',
'GL_MAX_TEXTURE_UNITS_ARB',
'GL_MAX_TRACK_MATRICES_NV',
'GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV',
'GL_MAX_VARYING_FLOATS',
'GL_MAX_VARYING_FLOATS_ARB',
'GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV',
'GL_MAX_VERTEX_ATTRIBS',
'GL_MAX_VERTEX_ATTRIBS_ARB',
'GL_MAX_VERTEX_HINT_PGI',
'GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT',
'GL_MAX_VERTEX_SHADER_INVARIANTS_EXT',
'GL_MAX_VERTEX_SHADER_LOCALS_EXT',
'GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',
'GL_MAX_VERTEX_SHADER_VARIANTS_EXT',
'GL_MAX_VERTEX_STREAMS_ATI',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB',
'GL_MAX_VERTEX_UNIFORM_COMPONENTS',
'GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB',
'GL_MAX_VERTEX_UNITS_ARB',
'GL_MAX_VIEWPORT_DIMS',
'GL_MIN',
'GL_MINMAX',
'GL_MINMAX_EXT',
'GL_MINMAX_FORMAT',
'GL_MINMAX_FORMAT_EXT',
'GL_MINMAX_SINK',
'GL_MINMAX_SINK_EXT',
'GL_MIN_EXT',
'GL_MIRRORED_REPEAT',
'GL_MIRRORED_REPEAT_ARB',
'GL_MIRRORED_REPEAT_IBM',
'GL_MIRROR_CLAMP_ATI',
'GL_MIRROR_CLAMP_EXT',
'GL_MIRROR_CLAMP_TO_BORDER_EXT',
'GL_MIRROR_CLAMP_TO_EDGE_ATI',
'GL_MIRROR_CLAMP_TO_EDGE_EXT',
'GL_MODELVIEW',
'GL_MODELVIEW0_ARB',
'GL_MODELVIEW0_EXT',
'GL_MODELVIEW0_MATRIX_EXT',
'GL_MODELVIEW0_STACK_DEPTH_EXT',
'GL_MODELVIEW10_ARB',
'GL_MODELVIEW11_ARB',
'GL_MODELVIEW12_ARB',
'GL_MODELVIEW13_ARB',
'GL_MODELVIEW14_ARB',
'GL_MODELVIEW15_ARB',
'GL_MODELVIEW16_ARB',
'GL_MODELVIEW17_ARB',
'GL_MODELVIEW18_ARB',
'GL_MODELVIEW19_ARB',
'GL_MODELVIEW1_ARB',
'GL_MODELVIEW1_EXT',
'GL_MODELVIEW1_MATRIX_EXT',
'GL_MODELVIEW1_STACK_DEPTH_EXT',
'GL_MODELVIEW20_ARB',
'GL_MODELVIEW21_ARB',
'GL_MODELVIEW22_ARB',
'GL_MODELVIEW23_ARB',
'GL_MODELVIEW24_ARB',
'GL_MODELVIEW25_ARB',
'GL_MODELVIEW26_ARB',
'GL_MODELVIEW27_ARB',
'GL_MODELVIEW28_ARB',
'GL_MODELVIEW29_ARB',
'GL_MODELVIEW2_ARB',
'GL_MODELVIEW30_ARB',
'GL_MODELVIEW31_ARB',
'GL_MODELVIEW3_ARB',
'GL_MODELVIEW4_ARB',
'GL_MODELVIEW5_ARB',
'GL_MODELVIEW6_ARB',
'GL_MODELVIEW7_ARB',
'GL_MODELVIEW8_ARB',
'GL_MODELVIEW9_ARB',
'GL_MODELVIEW_MATRIX',
'GL_MODELVIEW_PROJECTION_NV',
'GL_MODELVIEW_STACK_DEPTH',
'GL_MODULATE',
'GL_MODULATE_ADD_ATI',
'GL_MODULATE_SIGNED_ADD_ATI',
'GL_MODULATE_SUBTRACT_ATI',
'GL_MOV_ATI',
'GL_MULT',
'GL_MULTISAMPLE',
'GL_MULTISAMPLE_3DFX',
'GL_MULTISAMPLE_ARB',
'GL_MULTISAMPLE_BIT',
'GL_MULTISAMPLE_BIT_3DFX',
'GL_MULTISAMPLE_BIT_ARB',
'GL_MULTISAMPLE_BIT_EXT',
'GL_MULTISAMPLE_EXT',
'GL_MULTISAMPLE_FILTER_HINT_NV',
'GL_MULTISAMPLE_SGIS',
'GL_MUL_ATI',
'GL_MVP_MATRIX_EXT',
'GL_N3F_V3F',
'GL_NAME_STACK_DEPTH',
'GL_NAND',
'GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI',
'GL_NATIVE_GRAPHICS_END_HINT_PGI',
'GL_NATIVE_GRAPHICS_HANDLE_PGI',
'GL_NEAREST',
'GL_NEAREST_CLIPMAP_LINEAR_SGIX',
'GL_NEAREST_CLIPMAP_NEAREST_SGIX',
'GL_NEAREST_MIPMAP_LINEAR',
'GL_NEAREST_MIPMAP_NEAREST',
'GL_NEGATE_BIT_ATI',
'GL_NEGATIVE_ONE_EXT',
'GL_NEGATIVE_W_EXT',
'GL_NEGATIVE_X_EXT',
'GL_NEGATIVE_Y_EXT',
'GL_NEGATIVE_Z_EXT',
'GL_NEVER',
'GL_NICEST',
'GL_NONE',
'GL_NOOP',
'GL_NOR',
'GL_NORMALIZE',
'GL_NORMALIZED_RANGE_EXT',
'GL_NORMAL_ARRAY',
'GL_NORMAL_ARRAY_BUFFER_BINDING',
'GL_NORMAL_ARRAY_BUFFER_BINDING_ARB',
'GL_NORMAL_ARRAY_COUNT_EXT',
'GL_NORMAL_ARRAY_EXT',
'GL_NORMAL_ARRAY_LIST_IBM',
'GL_NORMAL_ARRAY_LIST_STRIDE_IBM',
'GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_NORMAL_ARRAY_POINTER',
'GL_NORMAL_ARRAY_POINTER_EXT',
'GL_NORMAL_ARRAY_STRIDE',
'GL_NORMAL_ARRAY_STRIDE_EXT',
'GL_NORMAL_ARRAY_TYPE',
'GL_NORMAL_ARRAY_TYPE_EXT',
'GL_NORMAL_BIT_PGI',
'GL_NORMAL_MAP',
'GL_NORMAL_MAP_ARB',
'GL_NORMAL_MAP_EXT',
'GL_NORMAL_MAP_NV',
'GL_NOTEQUAL',
'GL_NO_ERROR',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB',
'GL_NUM_FRAGMENT_CONSTANTS_ATI',
'GL_NUM_FRAGMENT_REGISTERS_ATI',
'GL_NUM_GENERAL_COMBINERS_NV',
'GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI',
'GL_NUM_INSTRUCTIONS_PER_PASS_ATI',
'GL_NUM_INSTRUCTIONS_TOTAL_ATI',
'GL_NUM_LOOPBACK_COMPONENTS_ATI',
'GL_NUM_PASSES_ATI',
'GL_OBJECT_ACTIVE_ATTRIBUTES_ARB',
'GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB',
'GL_OBJECT_ACTIVE_UNIFORMS_ARB',
'GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB',
'GL_OBJECT_ATTACHED_OBJECTS_ARB',
'GL_OBJECT_BUFFER_SIZE_ATI',
'GL_OBJECT_BUFFER_USAGE_ATI',
'GL_OBJECT_COMPILE_STATUS_ARB',
'GL_OBJECT_DELETE_STATUS_ARB',
'GL_OBJECT_DISTANCE_TO_LINE_SGIS',
'GL_OBJECT_DISTANCE_TO_POINT_SGIS',
'GL_OBJECT_INFO_LOG_LENGTH_ARB',
'GL_OBJECT_LINEAR',
'GL_OBJECT_LINE_SGIS',
'GL_OBJECT_LINK_STATUS_ARB',
'GL_OBJECT_PLANE',
'GL_OBJECT_POINT_SGIS',
'GL_OBJECT_SHADER_SOURCE_LENGTH_ARB',
'GL_OBJECT_SUBTYPE_ARB',
'GL_OBJECT_TYPE_ARB',
'GL_OBJECT_VALIDATE_STATUS_ARB',
'GL_OCCLUSION_TEST_HP',
'GL_OCCLUSION_TEST_RESULT_HP',
'GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV',
'GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV',
'GL_OFFSET_HILO_TEXTURE_2D_NV',
'GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV',
'GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV',
'GL_OFFSET_TEXTURE_2D_BIAS_NV',
'GL_OFFSET_TEXTURE_2D_MATRIX_NV',
'GL_OFFSET_TEXTURE_2D_NV',
'GL_OFFSET_TEXTURE_2D_SCALE_NV',
'GL_OFFSET_TEXTURE_BIAS_NV',
'GL_OFFSET_TEXTURE_MATRIX_NV',
'GL_OFFSET_TEXTURE_RECTANGLE_NV',
'GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV',
'GL_OFFSET_TEXTURE_SCALE_NV',
'GL_ONE',
'GL_ONE_EXT',
'GL_ONE_MINUS_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA_EXT',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR_EXT',
'GL_ONE_MINUS_DST_ALPHA',
'GL_ONE_MINUS_DST_COLOR',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_ONE_MINUS_SRC_COLOR',
'GL_OPERAND0_ALPHA',
'GL_OPERAND0_ALPHA_ARB',
'GL_OPERAND0_ALPHA_EXT',
'GL_OPERAND0_RGB',
'GL_OPERAND0_RGB_ARB',
'GL_OPERAND0_RGB_EXT',
'GL_OPERAND1_ALPHA',
'GL_OPERAND1_ALPHA_ARB',
'GL_OPERAND1_ALPHA_EXT',
'GL_OPERAND1_RGB',
'GL_OPERAND1_RGB_ARB',
'GL_OPERAND1_RGB_EXT',
'GL_OPERAND2_ALPHA',
'GL_OPERAND2_ALPHA_ARB',
'GL_OPERAND2_ALPHA_EXT',
'GL_OPERAND2_RGB',
'GL_OPERAND2_RGB_ARB',
'GL_OPERAND2_RGB_EXT',
'GL_OPERAND3_ALPHA_NV',
'GL_OPERAND3_RGB_NV',
'GL_OP_ADD_EXT',
'GL_OP_CLAMP_EXT',
'GL_OP_CROSS_PRODUCT_EXT',
'GL_OP_DOT3_EXT',
'GL_OP_DOT4_EXT',
'GL_OP_EXP_BASE_2_EXT',
'GL_OP_FLOOR_EXT',
'GL_OP_FRAC_EXT',
'GL_OP_INDEX_EXT',
'GL_OP_LOG_BASE_2_EXT',
'GL_OP_MADD_EXT',
'GL_OP_MAX_EXT',
'GL_OP_MIN_EXT',
'GL_OP_MOV_EXT',
'GL_OP_MULTIPLY_MATRIX_EXT',
'GL_OP_MUL_EXT',
'GL_OP_NEGATE_EXT',
'GL_OP_POWER_EXT',
'GL_OP_RECIP_EXT',
'GL_OP_RECIP_SQRT_EXT',
'GL_OP_ROUND_EXT',
'GL_OP_SET_GE_EXT',
'GL_OP_SET_LT_EXT',
'GL_OP_SUB_EXT',
'GL_OR',
'GL_ORDER',
'GL_OR_INVERTED',
'GL_OR_REVERSE',
'GL_OUTPUT_COLOR0_EXT',
'GL_OUTPUT_COLOR1_EXT',
'GL_OUTPUT_FOG_EXT',
'GL_OUTPUT_TEXTURE_COORD0_EXT',
'GL_OUTPUT_TEXTURE_COORD10_EXT',
'GL_OUTPUT_TEXTURE_COORD11_EXT',
'GL_OUTPUT_TEXTURE_COORD12_EXT',
'GL_OUTPUT_TEXTURE_COORD13_EXT',
'GL_OUTPUT_TEXTURE_COORD14_EXT',
'GL_OUTPUT_TEXTURE_COORD15_EXT',
'GL_OUTPUT_TEXTURE_COORD16_EXT',
'GL_OUTPUT_TEXTURE_COORD17_EXT',
'GL_OUTPUT_TEXTURE_COORD18_EXT',
'GL_OUTPUT_TEXTURE_COORD19_EXT',
'GL_OUTPUT_TEXTURE_COORD1_EXT',
'GL_OUTPUT_TEXTURE_COORD20_EXT',
'GL_OUTPUT_TEXTURE_COORD21_EXT',
'GL_OUTPUT_TEXTURE_COORD22_EXT',
'GL_OUTPUT_TEXTURE_COORD23_EXT',
'GL_OUTPUT_TEXTURE_COORD24_EXT',
'GL_OUTPUT_TEXTURE_COORD25_EXT',
'GL_OUTPUT_TEXTURE_COORD26_EXT',
'GL_OUTPUT_TEXTURE_COORD27_EXT',
'GL_OUTPUT_TEXTURE_COORD28_EXT',
'GL_OUTPUT_TEXTURE_COORD29_EXT',
'GL_OUTPUT_TEXTURE_COORD2_EXT',
'GL_OUTPUT_TEXTURE_COORD30_EXT',
'GL_OUTPUT_TEXTURE_COORD31_EXT',
'GL_OUTPUT_TEXTURE_COORD3_EXT',
'GL_OUTPUT_TEXTURE_COORD4_EXT',
'GL_OUTPUT_TEXTURE_COORD5_EXT',
'GL_OUTPUT_TEXTURE_COORD6_EXT',
'GL_OUTPUT_TEXTURE_COORD7_EXT',
'GL_OUTPUT_TEXTURE_COORD8_EXT',
'GL_OUTPUT_TEXTURE_COORD9_EXT',
'GL_OUTPUT_VERTEX_EXT',
'GL_OUT_OF_MEMORY',
'GL_PACK_ALIGNMENT',
'GL_PACK_CMYK_HINT_EXT',
'GL_PACK_IMAGE_DEPTH_SGIS',
'GL_PACK_IMAGE_HEIGHT',
'GL_PACK_IMAGE_HEIGHT_EXT',
'GL_PACK_INVERT_MESA',
'GL_PACK_LSB_FIRST',
'GL_PACK_RESAMPLE_OML',
'GL_PACK_RESAMPLE_SGIX',
'GL_PACK_ROW_LENGTH',
'GL_PACK_SKIP_IMAGES',
'GL_PACK_SKIP_IMAGES_EXT',
'GL_PACK_SKIP_PIXELS',
'GL_PACK_SKIP_ROWS',
'GL_PACK_SKIP_VOLUMES_SGIS',
'GL_PACK_SUBSAMPLE_RATE_SGIX',
'GL_PACK_SWAP_BYTES',
'GL_PARALLEL_ARRAYS_INTEL',
'GL_PASS_THROUGH_NV',
'GL_PASS_THROUGH_TOKEN',
'GL_PERSPECTIVE_CORRECTION_HINT',
'GL_PERTURB_EXT',
'GL_PER_STAGE_CONSTANTS_NV',
'GL_PHONG_HINT_WIN',
'GL_PHONG_WIN',
'GL_PIXEL_COUNTER_BITS_NV',
'GL_PIXEL_COUNT_AVAILABLE_NV',
'GL_PIXEL_COUNT_NV',
'GL_PIXEL_CUBIC_WEIGHT_EXT',
'GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS',
'GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS',
'GL_PIXEL_GROUP_COLOR_SGIS',
'GL_PIXEL_MAG_FILTER_EXT',
'GL_PIXEL_MAP_A_TO_A',
'GL_PIXEL_MAP_A_TO_A_SIZE',
'GL_PIXEL_MAP_B_TO_B',
'GL_PIXEL_MAP_B_TO_B_SIZE',
'GL_PIXEL_MAP_G_TO_G',
'GL_PIXEL_MAP_G_TO_G_SIZE',
'GL_PIXEL_MAP_I_TO_A',
'GL_PIXEL_MAP_I_TO_A_SIZE',
'GL_PIXEL_MAP_I_TO_B',
'GL_PIXEL_MAP_I_TO_B_SIZE',
'GL_PIXEL_MAP_I_TO_G',
'GL_PIXEL_MAP_I_TO_G_SIZE',
'GL_PIXEL_MAP_I_TO_I',
'GL_PIXEL_MAP_I_TO_I_SIZE',
'GL_PIXEL_MAP_I_TO_R',
'GL_PIXEL_MAP_I_TO_R_SIZE',
'GL_PIXEL_MAP_R_TO_R',
'GL_PIXEL_MAP_R_TO_R_SIZE',
'GL_PIXEL_MAP_S_TO_S',
'GL_PIXEL_MAP_S_TO_S_SIZE',
'GL_PIXEL_MIN_FILTER_EXT',
'GL_PIXEL_MODE_BIT',
'GL_PIXEL_PACK_BUFFER_ARB',
'GL_PIXEL_PACK_BUFFER_BINDING_ARB',
'GL_PIXEL_PACK_BUFFER_BINDING_EXT',
'GL_PIXEL_PACK_BUFFER_EXT',
'GL_PIXEL_SUBSAMPLE_2424_SGIX',
'GL_PIXEL_SUBSAMPLE_4242_SGIX',
'GL_PIXEL_SUBSAMPLE_4444_SGIX',
'GL_PIXEL_TEXTURE_SGIS',
'GL_PIXEL_TEX_GEN_ALPHA_LS_SGIX',
'GL_PIXEL_TEX_GEN_ALPHA_MS_SGIX',
'GL_PIXEL_TEX_GEN_ALPHA_NO_REPLACE_SGIX',
'GL_PIXEL_TEX_GEN_ALPHA_REPLACE_SGIX',
'GL_PIXEL_TEX_GEN_MODE_SGIX',
'GL_PIXEL_TEX_GEN_Q_CEILING_SGIX',
'GL_PIXEL_TEX_GEN_Q_FLOOR_SGIX',
'GL_PIXEL_TEX_GEN_Q_ROUND_SGIX',
'GL_PIXEL_TEX_GEN_SGIX',
'GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX',
'GL_PIXEL_TILE_CACHE_INCREMENT_SGIX',
'GL_PIXEL_TILE_CACHE_SIZE_SGIX',
'GL_PIXEL_TILE_GRID_DEPTH_SGIX',
'GL_PIXEL_TILE_GRID_HEIGHT_SGIX',
'GL_PIXEL_TILE_GRID_WIDTH_SGIX',
'GL_PIXEL_TILE_HEIGHT_SGIX',
'GL_PIXEL_TILE_WIDTH_SGIX',
'GL_PIXEL_TRANSFORM_2D_EXT',
'GL_PIXEL_TRANSFORM_2D_MATRIX_EXT',
'GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT',
'GL_PIXEL_UNPACK_BUFFER_ARB',
'GL_PIXEL_UNPACK_BUFFER_BINDING_ARB',
'GL_PIXEL_UNPACK_BUFFER_BINDING_EXT',
'GL_PIXEL_UNPACK_BUFFER_EXT',
'GL_PN_TRIANGLES_ATI',
'GL_PN_TRIANGLES_NORMAL_MODE_ATI',
'GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI',
'GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI',
'GL_PN_TRIANGLES_POINT_MODE_ATI',
'GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI',
'GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI',
'GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI',
'GL_POINT',
'GL_POINTS',
'GL_POINT_BIT',
'GL_POINT_DISTANCE_ATTENUATION',
'GL_POINT_DISTANCE_ATTENUATION_ARB',
'GL_POINT_FADE_THRESHOLD_SIZE',
'GL_POINT_FADE_THRESHOLD_SIZE_ARB',
'GL_POINT_FADE_THRESHOLD_SIZE_EXT',
'GL_POINT_FADE_THRESHOLD_SIZE_SGIS',
'GL_POINT_SIZE',
'GL_POINT_SIZE_GRANULARITY',
'GL_POINT_SIZE_MAX',
'GL_POINT_SIZE_MAX_ARB',
'GL_POINT_SIZE_MAX_EXT',
'GL_POINT_SIZE_MAX_SGIS',
'GL_POINT_SIZE_MIN',
'GL_POINT_SIZE_MIN_ARB',
'GL_POINT_SIZE_MIN_EXT',
'GL_POINT_SIZE_MIN_SGIS',
'GL_POINT_SIZE_RANGE',
'GL_POINT_SMOOTH',
'GL_POINT_SMOOTH_HINT',
'GL_POINT_SPRITE',
'GL_POINT_SPRITE_ARB',
'GL_POINT_SPRITE_COORD_ORIGIN',
'GL_POINT_SPRITE_NV',
'GL_POINT_SPRITE_R_MODE_NV',
'GL_POINT_TOKEN',
'GL_POLYGON',
'GL_POLYGON_BIT',
'GL_POLYGON_MODE',
'GL_POLYGON_OFFSET_BIAS_EXT',
'GL_POLYGON_OFFSET_EXT',
'GL_POLYGON_OFFSET_FACTOR',
'GL_POLYGON_OFFSET_FACTOR_EXT',
'GL_POLYGON_OFFSET_FILL',
'GL_POLYGON_OFFSET_LINE',
'GL_POLYGON_OFFSET_POINT',
'GL_POLYGON_OFFSET_UNITS',
'GL_POLYGON_SMOOTH',
'GL_POLYGON_SMOOTH_HINT',
'GL_POLYGON_STIPPLE',
'GL_POLYGON_STIPPLE_BIT',
'GL_POLYGON_TOKEN',
'GL_POSITION',
'GL_POST_COLOR_MATRIX_ALPHA_BIAS',
'GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI',
'GL_POST_COLOR_MATRIX_ALPHA_SCALE',
'GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI',
'GL_POST_COLOR_MATRIX_BLUE_BIAS',
'GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI',
'GL_POST_COLOR_MATRIX_BLUE_SCALE',
'GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI',
'GL_POST_COLOR_MATRIX_COLOR_TABLE',
'GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI',
'GL_POST_COLOR_MATRIX_GREEN_BIAS',
'GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI',
'GL_POST_COLOR_MATRIX_GREEN_SCALE',
'GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI',
'GL_POST_COLOR_MATRIX_RED_BIAS',
'GL_POST_COLOR_MATRIX_RED_BIAS_SGI',
'GL_POST_COLOR_MATRIX_RED_SCALE',
'GL_POST_COLOR_MATRIX_RED_SCALE_SGI',
'GL_POST_CONVOLUTION_ALPHA_BIAS',
'GL_POST_CONVOLUTION_ALPHA_BIAS_EXT',
'GL_POST_CONVOLUTION_ALPHA_SCALE',
'GL_POST_CONVOLUTION_ALPHA_SCALE_EXT',
'GL_POST_CONVOLUTION_BLUE_BIAS',
'GL_POST_CONVOLUTION_BLUE_BIAS_EXT',
'GL_POST_CONVOLUTION_BLUE_SCALE',
'GL_POST_CONVOLUTION_BLUE_SCALE_EXT',
'GL_POST_CONVOLUTION_COLOR_TABLE',
'GL_POST_CONVOLUTION_COLOR_TABLE_SGI',
'GL_POST_CONVOLUTION_GREEN_BIAS',
'GL_POST_CONVOLUTION_GREEN_BIAS_EXT',
'GL_POST_CONVOLUTION_GREEN_SCALE',
'GL_POST_CONVOLUTION_GREEN_SCALE_EXT',
'GL_POST_CONVOLUTION_RED_BIAS',
'GL_POST_CONVOLUTION_RED_BIAS_EXT',
'GL_POST_CONVOLUTION_RED_SCALE',
'GL_POST_CONVOLUTION_RED_SCALE_EXT',
'GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP',
'GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX',
'GL_POST_TEXTURE_FILTER_BIAS_SGIX',
'GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX',
'GL_POST_TEXTURE_FILTER_SCALE_SGIX',
'GL_PREFER_DOUBLEBUFFER_HINT_PGI',
'GL_PRESERVE_ATI',
'GL_PREVIOUS',
'GL_PREVIOUS_ARB',
'GL_PREVIOUS_EXT',
'GL_PREVIOUS_TEXTURE_INPUT_NV',
'GL_PRIMARY_COLOR',
'GL_PRIMARY_COLOR_ARB',
'GL_PRIMARY_COLOR_EXT',
'GL_PRIMARY_COLOR_NV',
'GL_PRIMITIVE_RESTART_INDEX_NV',
'GL_PRIMITIVE_RESTART_NV',
'GL_PROGRAM_ADDRESS_REGISTERS_ARB',
'GL_PROGRAM_ALU_INSTRUCTIONS_ARB',
'GL_PROGRAM_ATTRIBS_ARB',
'GL_PROGRAM_BINDING_ARB',
'GL_PROGRAM_ERROR_POSITION_ARB',
'GL_PROGRAM_ERROR_POSITION_NV',
'GL_PROGRAM_ERROR_STRING_ARB',
'GL_PROGRAM_ERROR_STRING_NV',
'GL_PROGRAM_FORMAT_ARB',
'GL_PROGRAM_FORMAT_ASCII_ARB',
'GL_PROGRAM_INSTRUCTIONS_ARB',
'GL_PROGRAM_LENGTH_ARB',
'GL_PROGRAM_LENGTH_NV',
'GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB',
'GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB',
'GL_PROGRAM_NATIVE_ATTRIBS_ARB',
'GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB',
'GL_PROGRAM_NATIVE_PARAMETERS_ARB',
'GL_PROGRAM_NATIVE_TEMPORARIES_ARB',
'GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB',
'GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB',
'GL_PROGRAM_OBJECT_ARB',
'GL_PROGRAM_PARAMETERS_ARB',
'GL_PROGRAM_PARAMETER_NV',
'GL_PROGRAM_RESIDENT_NV',
'GL_PROGRAM_STRING_ARB',
'GL_PROGRAM_STRING_NV',
'GL_PROGRAM_TARGET_NV',
'GL_PROGRAM_TEMPORARIES_ARB',
'GL_PROGRAM_TEX_INDIRECTIONS_ARB',
'GL_PROGRAM_TEX_INSTRUCTIONS_ARB',
'GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB',
'GL_PROJECTION',
'GL_PROJECTION_MATRIX',
'GL_PROJECTION_STACK_DEPTH',
'GL_PROXY_COLOR_TABLE',
'GL_PROXY_COLOR_TABLE_SGI',
'GL_PROXY_HISTOGRAM',
'GL_PROXY_HISTOGRAM_EXT',
'GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE',
'GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI',
'GL_PROXY_POST_CONVOLUTION_COLOR_TABLE',
'GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI',
'GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP',
'GL_PROXY_TEXTURE_1D',
'GL_PROXY_TEXTURE_1D_EXT',
'GL_PROXY_TEXTURE_2D',
'GL_PROXY_TEXTURE_2D_EXT',
'GL_PROXY_TEXTURE_3D',
'GL_PROXY_TEXTURE_3D_EXT',
'GL_PROXY_TEXTURE_4D_SGIS',
'GL_PROXY_TEXTURE_COLOR_TABLE_SGI',
'GL_PROXY_TEXTURE_CUBE_MAP',
'GL_PROXY_TEXTURE_CUBE_MAP_ARB',
'GL_PROXY_TEXTURE_CUBE_MAP_EXT',
'GL_PROXY_TEXTURE_RECTANGLE_ARB',
'GL_PROXY_TEXTURE_RECTANGLE_NV',
'GL_Q',
'GL_QUADRATIC_ATTENUATION',
'GL_QUADS',
'GL_QUAD_ALPHA4_SGIS',
'GL_QUAD_ALPHA8_SGIS',
'GL_QUAD_INTENSITY4_SGIS',
'GL_QUAD_INTENSITY8_SGIS',
'GL_QUAD_LUMINANCE4_SGIS',
'GL_QUAD_LUMINANCE8_SGIS',
'GL_QUAD_MESH_SUN',
'GL_QUAD_STRIP',
'GL_QUAD_TEXTURE_SELECT_SGIS',
'GL_QUARTER_BIT_ATI',
'GL_QUERY_COUNTER_BITS',
'GL_QUERY_COUNTER_BITS_ARB',
'GL_QUERY_RESULT',
'GL_QUERY_RESULT_ARB',
'GL_QUERY_RESULT_AVAILABLE',
'GL_QUERY_RESULT_AVAILABLE_ARB',
'GL_R',
'GL_R1UI_C3F_V3F_SUN',
'GL_R1UI_C4F_N3F_V3F_SUN',
'GL_R1UI_C4UB_V3F_SUN',
'GL_R1UI_N3F_V3F_SUN',
'GL_R1UI_T2F_C4F_N3F_V3F_SUN',
'GL_R1UI_T2F_N3F_V3F_SUN',
'GL_R1UI_T2F_V3F_SUN',
'GL_R1UI_V3F_SUN',
'GL_R3_G3_B2',
'GL_RASTER_POSITION_UNCLIPPED_IBM',
'GL_READ_BUFFER',
'GL_READ_ONLY',
'GL_READ_ONLY_ARB',
'GL_READ_PIXEL_DATA_RANGE_LENGTH_NV',
'GL_READ_PIXEL_DATA_RANGE_NV',
'GL_READ_PIXEL_DATA_RANGE_POINTER_NV',
'GL_READ_WRITE',
'GL_READ_WRITE_ARB',
'GL_RECLAIM_MEMORY_HINT_PGI',
'GL_RED',
'GL_REDUCE',
'GL_REDUCE_EXT',
'GL_RED_BIAS',
'GL_RED_BITS',
'GL_RED_BIT_ATI',
'GL_RED_MAX_CLAMP_INGR',
'GL_RED_MIN_CLAMP_INGR',
'GL_RED_SCALE',
'GL_REFERENCE_PLANE_EQUATION_SGIX',
'GL_REFERENCE_PLANE_SGIX',
'GL_REFLECTION_MAP',
'GL_REFLECTION_MAP_ARB',
'GL_REFLECTION_MAP_EXT',
'GL_REFLECTION_MAP_NV',
'GL_REGISTER_COMBINERS_NV',
'GL_REG_0_ATI',
'GL_REG_10_ATI',
'GL_REG_11_ATI',
'GL_REG_12_ATI',
'GL_REG_13_ATI',
'GL_REG_14_ATI',
'GL_REG_15_ATI',
'GL_REG_16_ATI',
'GL_REG_17_ATI',
'GL_REG_18_ATI',
'GL_REG_19_ATI',
'GL_REG_1_ATI',
'GL_REG_20_ATI',
'GL_REG_21_ATI',
'GL_REG_22_ATI',
'GL_REG_23_ATI',
'GL_REG_24_ATI',
'GL_REG_25_ATI',
'GL_REG_26_ATI',
'GL_REG_27_ATI',
'GL_REG_28_ATI',
'GL_REG_29_ATI',
'GL_REG_2_ATI',
'GL_REG_30_ATI',
'GL_REG_31_ATI',
'GL_REG_3_ATI',
'GL_REG_4_ATI',
'GL_REG_5_ATI',
'GL_REG_6_ATI',
'GL_REG_7_ATI',
'GL_REG_8_ATI',
'GL_REG_9_ATI',
'GL_RENDER',
'GL_RENDERBUFFER_ALPHA_SIZE_EXT',
'GL_RENDERBUFFER_BINDING_EXT',
'GL_RENDERBUFFER_BLUE_SIZE_EXT',
'GL_RENDERBUFFER_DEPTH_SIZE_EXT',
'GL_RENDERBUFFER_EXT',
'GL_RENDERBUFFER_GREEN_SIZE_EXT',
'GL_RENDERBUFFER_HEIGHT_EXT',
'GL_RENDERBUFFER_INTERNAL_FORMAT_EXT',
'GL_RENDERBUFFER_RED_SIZE_EXT',
'GL_RENDERBUFFER_STENCIL_SIZE_EXT',
'GL_RENDERBUFFER_WIDTH_EXT',
'GL_RENDERER',
'GL_RENDER_MODE',
'GL_REPEAT',
'GL_REPLACE',
'GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN',
'GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN',
'GL_REPLACEMENT_CODE_ARRAY_SUN',
'GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN',
'GL_REPLACEMENT_CODE_SUN',
'GL_REPLACE_EXT',
'GL_REPLACE_MIDDLE_SUN',
'GL_REPLACE_OLDEST_SUN',
'GL_REPLICATE_BORDER',
'GL_REPLICATE_BORDER_HP',
'GL_RESAMPLE_AVERAGE_OML',
'GL_RESAMPLE_DECIMATE_OML',
'GL_RESAMPLE_DECIMATE_SGIX',
'GL_RESAMPLE_REPLICATE_OML',
'GL_RESAMPLE_REPLICATE_SGIX',
'GL_RESAMPLE_ZERO_FILL_OML',
'GL_RESAMPLE_ZERO_FILL_SGIX',
'GL_RESCALE_NORMAL',
'GL_RESCALE_NORMAL_EXT',
'GL_RESTART_SUN',
'GL_RETURN',
'GL_RGB',
'GL_RGB10',
'GL_RGB10_A2',
'GL_RGB10_A2_EXT',
'GL_RGB10_EXT',
'GL_RGB12',
'GL_RGB12_EXT',
'GL_RGB16',
'GL_RGB16F_ARB',
'GL_RGB16_EXT',
'GL_RGB2_EXT',
'GL_RGB32F_ARB',
'GL_RGB4',
'GL_RGB4_EXT',
'GL_RGB4_S3TC',
'GL_RGB5',
'GL_RGB5_A1',
'GL_RGB5_A1_EXT',
'GL_RGB5_EXT',
'GL_RGB8',
'GL_RGB8_EXT',
'GL_RGBA',
'GL_RGBA12',
'GL_RGBA12_EXT',
'GL_RGBA16',
'GL_RGBA16F_ARB',
'GL_RGBA16_EXT',
'GL_RGBA2',
'GL_RGBA2_EXT',
'GL_RGBA32F_ARB',
'GL_RGBA4',
'GL_RGBA4_EXT',
'GL_RGBA4_S3TC',
'GL_RGBA8',
'GL_RGBA8_EXT',
'GL_RGBA_FLOAT16_ATI',
'GL_RGBA_FLOAT32_ATI',
'GL_RGBA_FLOAT_MODE_ARB',
'GL_RGBA_MODE',
'GL_RGBA_S3TC',
'GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV',
'GL_RGB_FLOAT16_ATI',
'GL_RGB_FLOAT32_ATI',
'GL_RGB_S3TC',
'GL_RGB_SCALE',
'GL_RGB_SCALE_ARB',
'GL_RGB_SCALE_EXT',
'GL_RIGHT',
'GL_S',
'GL_SAMPLER_1D',
'GL_SAMPLER_1D_ARB',
'GL_SAMPLER_1D_SHADOW',
'GL_SAMPLER_1D_SHADOW_ARB',
'GL_SAMPLER_2D',
'GL_SAMPLER_2D_ARB',
'GL_SAMPLER_2D_RECT_ARB',
'GL_SAMPLER_2D_RECT_SHADOW_ARB',
'GL_SAMPLER_2D_SHADOW',
'GL_SAMPLER_2D_SHADOW_ARB',
'GL_SAMPLER_3D',
'GL_SAMPLER_3D_ARB',
'GL_SAMPLER_CUBE',
'GL_SAMPLER_CUBE_ARB',
'GL_SAMPLES',
'GL_SAMPLES_3DFX',
'GL_SAMPLES_ARB',
'GL_SAMPLES_EXT',
'GL_SAMPLES_PASSED',
'GL_SAMPLES_PASSED_ARB',
'GL_SAMPLES_SGIS',
'GL_SAMPLE_ALPHA_TO_COVERAGE',
'GL_SAMPLE_ALPHA_TO_COVERAGE_ARB',
'GL_SAMPLE_ALPHA_TO_MASK_EXT',
'GL_SAMPLE_ALPHA_TO_MASK_SGIS',
'GL_SAMPLE_ALPHA_TO_ONE',
'GL_SAMPLE_ALPHA_TO_ONE_ARB',
'GL_SAMPLE_ALPHA_TO_ONE_EXT',
'GL_SAMPLE_ALPHA_TO_ONE_SGIS',
'GL_SAMPLE_BUFFERS',
'GL_SAMPLE_BUFFERS_3DFX',
'GL_SAMPLE_BUFFERS_ARB',
'GL_SAMPLE_BUFFERS_EXT',
'GL_SAMPLE_BUFFERS_SGIS',
'GL_SAMPLE_COVERAGE',
'GL_SAMPLE_COVERAGE_ARB',
'GL_SAMPLE_COVERAGE_INVERT',
'GL_SAMPLE_COVERAGE_INVERT_ARB',
'GL_SAMPLE_COVERAGE_VALUE',
'GL_SAMPLE_COVERAGE_VALUE_ARB',
'GL_SAMPLE_MASK_EXT',
'GL_SAMPLE_MASK_INVERT_EXT',
'GL_SAMPLE_MASK_INVERT_SGIS',
'GL_SAMPLE_MASK_SGIS',
'GL_SAMPLE_MASK_VALUE_EXT',
'GL_SAMPLE_MASK_VALUE_SGIS',
'GL_SAMPLE_PATTERN_EXT',
'GL_SAMPLE_PATTERN_SGIS',
'GL_SATURATE_BIT_ATI',
'GL_SCALAR_EXT',
'GL_SCALEBIAS_HINT_SGIX',
'GL_SCALE_BY_FOUR_NV',
'GL_SCALE_BY_ONE_HALF_NV',
'GL_SCALE_BY_TWO_NV',
'GL_SCISSOR_BIT',
'GL_SCISSOR_BOX',
'GL_SCISSOR_TEST',
'GL_SCREEN_COORDINATES_REND',
'GL_SECONDARY_COLOR_ARRAY',
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING',
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB',
'GL_SECONDARY_COLOR_ARRAY_EXT',
'GL_SECONDARY_COLOR_ARRAY_LIST_IBM',
'GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM',
'GL_SECONDARY_COLOR_ARRAY_POINTER',
'GL_SECONDARY_COLOR_ARRAY_POINTER_EXT',
'GL_SECONDARY_COLOR_ARRAY_SIZE',
'GL_SECONDARY_COLOR_ARRAY_SIZE_EXT',
'GL_SECONDARY_COLOR_ARRAY_STRIDE',
'GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT',
'GL_SECONDARY_COLOR_ARRAY_TYPE',
'GL_SECONDARY_COLOR_ARRAY_TYPE_EXT',
'GL_SECONDARY_COLOR_NV',
'GL_SECONDARY_INTERPOLATOR_ATI',
'GL_SELECT',
'GL_SELECTION_BUFFER_POINTER',
'GL_SELECTION_BUFFER_SIZE',
'GL_SEPARABLE_2D',
'GL_SEPARABLE_2D_EXT',
'GL_SEPARATE_SPECULAR_COLOR',
'GL_SEPARATE_SPECULAR_COLOR_EXT',
'GL_SET',
'GL_SHADER_CONSISTENT_NV',
'GL_SHADER_OBJECT_ARB',
'GL_SHADER_OPERATION_NV',
'GL_SHADER_SOURCE_LENGTH',
'GL_SHADER_TYPE',
'GL_SHADE_MODEL',
'GL_SHADING_LANGUAGE_VERSION',
'GL_SHADING_LANGUAGE_VERSION_ARB',
'GL_SHADOW_AMBIENT_SGIX',
'GL_SHADOW_ATTENUATION_EXT',
'GL_SHARED_TEXTURE_PALETTE_EXT',
'GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS',
'GL_SHININESS',
'GL_SHORT',
'GL_SIGNED_ALPHA8_NV',
'GL_SIGNED_ALPHA_NV',
'GL_SIGNED_HILO16_NV',
'GL_SIGNED_HILO8_NV',
'GL_SIGNED_HILO_NV',
'GL_SIGNED_IDENTITY_NV',
'GL_SIGNED_INTENSITY8_NV',
'GL_SIGNED_INTENSITY_NV',
'GL_SIGNED_LUMINANCE8_ALPHA8_NV',
'GL_SIGNED_LUMINANCE8_NV',
'GL_SIGNED_LUMINANCE_ALPHA_NV',
'GL_SIGNED_LUMINANCE_NV',
'GL_SIGNED_NEGATE_NV',
'GL_SIGNED_RGB8_NV',
'GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV',
'GL_SIGNED_RGBA8_NV',
'GL_SIGNED_RGBA_NV',
'GL_SIGNED_RGB_NV',
'GL_SIGNED_RGB_UNSIGNED_ALPHA_NV',
'GL_SINGLE_COLOR',
'GL_SINGLE_COLOR_EXT',
'GL_SLICE_ACCUM_SUN',
'GL_SMOOTH',
'GL_SMOOTH_LINE_WIDTH_GRANULARITY',
'GL_SMOOTH_LINE_WIDTH_RANGE',
'GL_SMOOTH_POINT_SIZE_GRANULARITY',
'GL_SMOOTH_POINT_SIZE_RANGE',
'GL_SOURCE0_ALPHA',
'GL_SOURCE0_ALPHA_ARB',
'GL_SOURCE0_ALPHA_EXT',
'GL_SOURCE0_RGB',
'GL_SOURCE0_RGB_ARB',
'GL_SOURCE0_RGB_EXT',
'GL_SOURCE1_ALPHA',
'GL_SOURCE1_ALPHA_ARB',
'GL_SOURCE1_ALPHA_EXT',
'GL_SOURCE1_RGB',
'GL_SOURCE1_RGB_ARB',
'GL_SOURCE1_RGB_EXT',
'GL_SOURCE2_ALPHA',
'GL_SOURCE2_ALPHA_ARB',
'GL_SOURCE2_ALPHA_EXT',
'GL_SOURCE2_RGB',
'GL_SOURCE2_RGB_ARB',
'GL_SOURCE2_RGB_EXT',
'GL_SOURCE3_ALPHA_NV',
'GL_SOURCE3_RGB_NV',
'GL_SPARE0_NV',
'GL_SPARE0_PLUS_SECONDARY_COLOR_NV',
'GL_SPARE1_NV',
'GL_SPECULAR',
'GL_SPHERE_MAP',
'GL_SPOT_CUTOFF',
'GL_SPOT_DIRECTION',
'GL_SPOT_EXPONENT',
'GL_SPRITE_AXIAL_SGIX',
'GL_SPRITE_AXIS_SGIX',
'GL_SPRITE_EYE_ALIGNED_SGIX',
'GL_SPRITE_MODE_SGIX',
'GL_SPRITE_OBJECT_ALIGNED_SGIX',
'GL_SPRITE_SGIX',
'GL_SPRITE_TRANSLATION_SGIX',
'GL_SRC0_ALPHA',
'GL_SRC0_RGB',
'GL_SRC1_ALPHA',
'GL_SRC1_RGB',
'GL_SRC2_ALPHA',
'GL_SRC2_RGB',
'GL_SRC_ALPHA',
'GL_SRC_ALPHA_SATURATE',
'GL_SRC_COLOR',
'GL_STACK_OVERFLOW',
'GL_STACK_UNDERFLOW',
'GL_STATIC_ATI',
'GL_STATIC_COPY',
'GL_STATIC_COPY_ARB',
'GL_STATIC_DRAW',
'GL_STATIC_DRAW_ARB',
'GL_STATIC_READ',
'GL_STATIC_READ_ARB',
'GL_STENCIL',
'GL_STENCIL_ATTACHMENT_EXT',
'GL_STENCIL_BACK_FAIL',
'GL_STENCIL_BACK_FAIL_ATI',
'GL_STENCIL_BACK_FUNC',
'GL_STENCIL_BACK_FUNC_ATI',
'GL_STENCIL_BACK_PASS_DEPTH_FAIL',
'GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI',
'GL_STENCIL_BACK_PASS_DEPTH_PASS',
'GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI',
'GL_STENCIL_BACK_REF',
'GL_STENCIL_BACK_VALUE_MASK',
'GL_STENCIL_BACK_WRITEMASK',
'GL_STENCIL_BITS',
'GL_STENCIL_BUFFER_BIT',
'GL_STENCIL_CLEAR_VALUE',
'GL_STENCIL_FAIL',
'GL_STENCIL_FUNC',
'GL_STENCIL_INDEX',
'GL_STENCIL_INDEX16_EXT',
'GL_STENCIL_INDEX1_EXT',
'GL_STENCIL_INDEX4_EXT',
'GL_STENCIL_INDEX8_EXT',
'GL_STENCIL_PASS_DEPTH_FAIL',
'GL_STENCIL_PASS_DEPTH_PASS',
'GL_STENCIL_REF',
'GL_STENCIL_TEST',
'GL_STENCIL_TEST_TWO_SIDE_EXT',
'GL_STENCIL_VALUE_MASK',
'GL_STENCIL_WRITEMASK',
'GL_STEREO',
'GL_STORAGE_CACHED_APPLE',
'GL_STORAGE_SHARED_APPLE',
'GL_STREAM_COPY',
'GL_STREAM_COPY_ARB',
'GL_STREAM_DRAW',
'GL_STREAM_DRAW_ARB',
'GL_STREAM_READ',
'GL_STREAM_READ_ARB',
'GL_STRICT_DEPTHFUNC_HINT_PGI',
'GL_STRICT_LIGHTING_HINT_PGI',
'GL_STRICT_SCISSOR_HINT_PGI',
'GL_SUBPIXEL_BITS',
'GL_SUBTRACT',
'GL_SUBTRACT_ARB',
'GL_SUB_ATI',
'GL_SWIZZLE_STQ_ATI',
'GL_SWIZZLE_STQ_DQ_ATI',
'GL_SWIZZLE_STRQ_ATI',
'GL_SWIZZLE_STRQ_DQ_ATI',
'GL_SWIZZLE_STR_ATI',
'GL_SWIZZLE_STR_DR_ATI',
'GL_T',
'GL_T2F_C3F_V3F',
'GL_T2F_C4F_N3F_V3F',
'GL_T2F_C4UB_V3F',
'GL_T2F_IUI_N3F_V2F_EXT',
'GL_T2F_IUI_N3F_V3F_EXT',
'GL_T2F_IUI_V2F_EXT',
'GL_T2F_IUI_V3F_EXT',
'GL_T2F_N3F_V3F',
'GL_T2F_V3F',
'GL_T4F_C4F_N3F_V4F',
'GL_T4F_V4F',
'GL_TABLE_TOO_LARGE',
'GL_TABLE_TOO_LARGE_EXT',
'GL_TANGENT_ARRAY_EXT',
'GL_TANGENT_ARRAY_POINTER_EXT',
'GL_TANGENT_ARRAY_STRIDE_EXT',
'GL_TANGENT_ARRAY_TYPE_EXT',
'GL_TEXCOORD1_BIT_PGI',
'GL_TEXCOORD2_BIT_PGI',
'GL_TEXCOORD3_BIT_PGI',
'GL_TEXCOORD4_BIT_PGI',
'GL_TEXTURE',
'GL_TEXTURE0',
'GL_TEXTURE0_ARB',
'GL_TEXTURE1',
'GL_TEXTURE10',
'GL_TEXTURE10_ARB',
'GL_TEXTURE11',
'GL_TEXTURE11_ARB',
'GL_TEXTURE12',
'GL_TEXTURE12_ARB',
'GL_TEXTURE13',
'GL_TEXTURE13_ARB',
'GL_TEXTURE14',
'GL_TEXTURE14_ARB',
'GL_TEXTURE15',
'GL_TEXTURE15_ARB',
'GL_TEXTURE16',
'GL_TEXTURE16_ARB',
'GL_TEXTURE17',
'GL_TEXTURE17_ARB',
'GL_TEXTURE18',
'GL_TEXTURE18_ARB',
'GL_TEXTURE19',
'GL_TEXTURE19_ARB',
'GL_TEXTURE1_ARB',
'GL_TEXTURE2',
'GL_TEXTURE20',
'GL_TEXTURE20_ARB',
'GL_TEXTURE21',
'GL_TEXTURE21_ARB',
'GL_TEXTURE22',
'GL_TEXTURE22_ARB',
'GL_TEXTURE23',
'GL_TEXTURE23_ARB',
'GL_TEXTURE24',
'GL_TEXTURE24_ARB',
'GL_TEXTURE25',
'GL_TEXTURE25_ARB',
'GL_TEXTURE26',
'GL_TEXTURE26_ARB',
'GL_TEXTURE27',
'GL_TEXTURE27_ARB',
'GL_TEXTURE28',
'GL_TEXTURE28_ARB',
'GL_TEXTURE29',
'GL_TEXTURE29_ARB',
'GL_TEXTURE2_ARB',
'GL_TEXTURE3',
'GL_TEXTURE30',
'GL_TEXTURE30_ARB',
'GL_TEXTURE31',
'GL_TEXTURE31_ARB',
'GL_TEXTURE3_ARB',
'GL_TEXTURE4',
'GL_TEXTURE4_ARB',
'GL_TEXTURE5',
'GL_TEXTURE5_ARB',
'GL_TEXTURE6',
'GL_TEXTURE6_ARB',
'GL_TEXTURE7',
'GL_TEXTURE7_ARB',
'GL_TEXTURE8',
'GL_TEXTURE8_ARB',
'GL_TEXTURE9',
'GL_TEXTURE9_ARB',
'GL_TEXTURE_1D',
'GL_TEXTURE_1D_BINDING_EXT',
'GL_TEXTURE_2D',
'GL_TEXTURE_2D_BINDING_EXT',
'GL_TEXTURE_3D',
'GL_TEXTURE_3D_BINDING_EXT',
'GL_TEXTURE_3D_EXT',
'GL_TEXTURE_4DSIZE_SGIS',
'GL_TEXTURE_4D_BINDING_SGIS',
'GL_TEXTURE_4D_SGIS',
'GL_TEXTURE_ALPHA_SIZE',
'GL_TEXTURE_ALPHA_SIZE_EXT',
'GL_TEXTURE_ALPHA_TYPE_ARB',
'GL_TEXTURE_APPLICATION_MODE_EXT',
'GL_TEXTURE_BASE_LEVEL',
'GL_TEXTURE_BASE_LEVEL_SGIS',
'GL_TEXTURE_BINDING_1D',
'GL_TEXTURE_BINDING_2D',
'GL_TEXTURE_BINDING_3D',
'GL_TEXTURE_BINDING_CUBE_MAP',
'GL_TEXTURE_BINDING_CUBE_MAP_ARB',
'GL_TEXTURE_BINDING_CUBE_MAP_EXT',
'GL_TEXTURE_BINDING_RECTANGLE_ARB',
'GL_TEXTURE_BINDING_RECTANGLE_NV',
'GL_TEXTURE_BIT',
'GL_TEXTURE_BLUE_SIZE',
'GL_TEXTURE_BLUE_SIZE_EXT',
'GL_TEXTURE_BLUE_TYPE_ARB',
'GL_TEXTURE_BORDER',
'GL_TEXTURE_BORDER_COLOR',
'GL_TEXTURE_BORDER_VALUES_NV',
'GL_TEXTURE_CLIPMAP_CENTER_SGIX',
'GL_TEXTURE_CLIPMAP_DEPTH_SGIX',
'GL_TEXTURE_CLIPMAP_FRAME_SGIX',
'GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX',
'GL_TEXTURE_CLIPMAP_OFFSET_SGIX',
'GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX',
'GL_TEXTURE_COLOR_TABLE_SGI',
'GL_TEXTURE_COLOR_WRITEMASK_SGIS',
'GL_TEXTURE_COMPARE_FAIL_VALUE_ARB',
'GL_TEXTURE_COMPARE_FUNC',
'GL_TEXTURE_COMPARE_FUNC_ARB',
'GL_TEXTURE_COMPARE_MODE',
'GL_TEXTURE_COMPARE_MODE_ARB',
'GL_TEXTURE_COMPARE_OPERATOR_SGIX',
'GL_TEXTURE_COMPARE_SGIX',
'GL_TEXTURE_COMPONENTS',
'GL_TEXTURE_COMPRESSED',
'GL_TEXTURE_COMPRESSED_ARB',
'GL_TEXTURE_COMPRESSED_IMAGE_SIZE',
'GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB',
'GL_TEXTURE_COMPRESSION_HINT',
'GL_TEXTURE_COMPRESSION_HINT_ARB',
'GL_TEXTURE_CONSTANT_DATA_SUNX',
'GL_TEXTURE_COORD_ARRAY',
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING',
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB',
'GL_TEXTURE_COORD_ARRAY_COUNT_EXT',
'GL_TEXTURE_COORD_ARRAY_EXT',
'GL_TEXTURE_COORD_ARRAY_LIST_IBM',
'GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM',
'GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_TEXTURE_COORD_ARRAY_POINTER',
'GL_TEXTURE_COORD_ARRAY_POINTER_EXT',
'GL_TEXTURE_COORD_ARRAY_SIZE',
'GL_TEXTURE_COORD_ARRAY_SIZE_EXT',
'GL_TEXTURE_COORD_ARRAY_STRIDE',
'GL_TEXTURE_COORD_ARRAY_STRIDE_EXT',
'GL_TEXTURE_COORD_ARRAY_TYPE',
'GL_TEXTURE_COORD_ARRAY_TYPE_EXT',
'GL_TEXTURE_CUBE_MAP',
'GL_TEXTURE_CUBE_MAP_ARB',
'GL_TEXTURE_CUBE_MAP_EXT',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT',
'GL_TEXTURE_DEFORMATION_BIT_SGIX',
'GL_TEXTURE_DEFORMATION_SGIX',
'GL_TEXTURE_DEPTH',
'GL_TEXTURE_DEPTH_EXT',
'GL_TEXTURE_DEPTH_SIZE',
'GL_TEXTURE_DEPTH_SIZE_ARB',
'GL_TEXTURE_DEPTH_TYPE_ARB',
'GL_TEXTURE_DS_SIZE_NV',
'GL_TEXTURE_DT_SIZE_NV',
'GL_TEXTURE_ENV',
'GL_TEXTURE_ENV_BIAS_SGIX',
'GL_TEXTURE_ENV_COLOR',
'GL_TEXTURE_ENV_MODE',
'GL_TEXTURE_FILTER4_SIZE_SGIS',
'GL_TEXTURE_FILTER_CONTROL',
'GL_TEXTURE_FILTER_CONTROL_EXT',
'GL_TEXTURE_FLOAT_COMPONENTS_NV',
'GL_TEXTURE_GEN_MODE',
'GL_TEXTURE_GEN_Q',
'GL_TEXTURE_GEN_R',
'GL_TEXTURE_GEN_S',
'GL_TEXTURE_GEN_T',
'GL_TEXTURE_GEQUAL_R_SGIX',
'GL_TEXTURE_GREEN_SIZE',
'GL_TEXTURE_GREEN_SIZE_EXT',
'GL_TEXTURE_GREEN_TYPE_ARB',
'GL_TEXTURE_HEIGHT',
'GL_TEXTURE_HI_SIZE_NV',
'GL_TEXTURE_INDEX_SIZE_EXT',
'GL_TEXTURE_INTENSITY_SIZE',
'GL_TEXTURE_INTENSITY_SIZE_EXT',
'GL_TEXTURE_INTENSITY_TYPE_ARB',
'GL_TEXTURE_INTERNAL_FORMAT',
'GL_TEXTURE_LEQUAL_R_SGIX',
'GL_TEXTURE_LIGHTING_MODE_HP',
'GL_TEXTURE_LIGHT_EXT',
'GL_TEXTURE_LOD_BIAS',
'GL_TEXTURE_LOD_BIAS_EXT',
'GL_TEXTURE_LOD_BIAS_R_SGIX',
'GL_TEXTURE_LOD_BIAS_S_SGIX',
'GL_TEXTURE_LOD_BIAS_T_SGIX',
'GL_TEXTURE_LO_SIZE_NV',
'GL_TEXTURE_LUMINANCE_SIZE',
'GL_TEXTURE_LUMINANCE_SIZE_EXT',
'GL_TEXTURE_LUMINANCE_TYPE_ARB',
'GL_TEXTURE_MAG_FILTER',
'GL_TEXTURE_MAG_SIZE_NV',
'GL_TEXTURE_MATERIAL_FACE_EXT',
'GL_TEXTURE_MATERIAL_PARAMETER_EXT',
'GL_TEXTURE_MATRIX',
'GL_TEXTURE_MAX_ANISOTROPY_EXT',
'GL_TEXTURE_MAX_CLAMP_R_SGIX',
'GL_TEXTURE_MAX_CLAMP_S_SGIX',
'GL_TEXTURE_MAX_CLAMP_T_SGIX',
'GL_TEXTURE_MAX_LEVEL',
'GL_TEXTURE_MAX_LEVEL_SGIS',
'GL_TEXTURE_MAX_LOD',
'GL_TEXTURE_MAX_LOD_SGIS',
'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_MIN_LOD',
'GL_TEXTURE_MIN_LOD_SGIS',
'GL_TEXTURE_MULTI_BUFFER_HINT_SGIX',
'GL_TEXTURE_NORMAL_EXT',
'GL_TEXTURE_POST_SPECULAR_HP',
'GL_TEXTURE_PRE_SPECULAR_HP',
'GL_TEXTURE_PRIORITY',
'GL_TEXTURE_PRIORITY_EXT',
'GL_TEXTURE_RECTANGLE_ARB',
'GL_TEXTURE_RECTANGLE_NV',
'GL_TEXTURE_RED_SIZE',
'GL_TEXTURE_RED_SIZE_EXT',
'GL_TEXTURE_RED_TYPE_ARB',
'GL_TEXTURE_RESIDENT',
'GL_TEXTURE_RESIDENT_EXT',
'GL_TEXTURE_SHADER_NV',
'GL_TEXTURE_STACK_DEPTH',
'GL_TEXTURE_TOO_LARGE_EXT',
'GL_TEXTURE_UNSIGNED_REMAP_MODE_NV',
'GL_TEXTURE_WIDTH',
'GL_TEXTURE_WRAP_Q_SGIS',
'GL_TEXTURE_WRAP_R',
'GL_TEXTURE_WRAP_R_EXT',
'GL_TEXTURE_WRAP_S',
'GL_TEXTURE_WRAP_T',
'GL_TEXT_FRAGMENT_SHADER_ATI',
'GL_TRACK_MATRIX_NV',
'GL_TRACK_MATRIX_TRANSFORM_NV',
'GL_TRANSFORM_BIT',
'GL_TRANSFORM_HINT_APPLE',
'GL_TRANSPOSE_COLOR_MATRIX',
'GL_TRANSPOSE_COLOR_MATRIX_ARB',
'GL_TRANSPOSE_CURRENT_MATRIX_ARB',
'GL_TRANSPOSE_MODELVIEW_MATRIX',
'GL_TRANSPOSE_MODELVIEW_MATRIX_ARB',
'GL_TRANSPOSE_NV',
'GL_TRANSPOSE_PROJECTION_MATRIX',
'GL_TRANSPOSE_PROJECTION_MATRIX_ARB',
'GL_TRANSPOSE_TEXTURE_MATRIX',
'GL_TRANSPOSE_TEXTURE_MATRIX_ARB',
'GL_TRIANGLES',
'GL_TRIANGLE_FAN',
'GL_TRIANGLE_LIST_SUN',
'GL_TRIANGLE_MESH_SUN',
'GL_TRIANGLE_STRIP',
'GL_TRUE',
'GL_TYPE_RGBA_FLOAT_ATI',
'GL_UNPACK_ALIGNMENT',
'GL_UNPACK_CLIENT_STORAGE_APPLE',
'GL_UNPACK_CMYK_HINT_EXT',
'GL_UNPACK_CONSTANT_DATA_SUNX',
'GL_UNPACK_IMAGE_DEPTH_SGIS',
'GL_UNPACK_IMAGE_HEIGHT',
'GL_UNPACK_IMAGE_HEIGHT_EXT',
'GL_UNPACK_LSB_FIRST',
'GL_UNPACK_RESAMPLE_OML',
'GL_UNPACK_RESAMPLE_SGIX',
'GL_UNPACK_ROW_LENGTH',
'GL_UNPACK_SKIP_IMAGES',
'GL_UNPACK_SKIP_IMAGES_EXT',
'GL_UNPACK_SKIP_PIXELS',
'GL_UNPACK_SKIP_ROWS',
'GL_UNPACK_SKIP_VOLUMES_SGIS',
'GL_UNPACK_SUBSAMPLE_RATE_SGIX',
'GL_UNPACK_SWAP_BYTES',
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_BYTE_2_3_3_REV',
'GL_UNSIGNED_BYTE_3_3_2',
'GL_UNSIGNED_BYTE_3_3_2_EXT',
'GL_UNSIGNED_IDENTITY_NV',
'GL_UNSIGNED_INT',
'GL_UNSIGNED_INT_10_10_10_2',
'GL_UNSIGNED_INT_10_10_10_2_EXT',
'GL_UNSIGNED_INT_24_8_NV',
'GL_UNSIGNED_INT_2_10_10_10_REV',
'GL_UNSIGNED_INT_8_8_8_8',
'GL_UNSIGNED_INT_8_8_8_8_EXT',
'GL_UNSIGNED_INT_8_8_8_8_REV',
'GL_UNSIGNED_INT_8_8_S8_S8_REV_NV',
'GL_UNSIGNED_INT_S8_S8_8_8_NV',
'GL_UNSIGNED_INVERT_NV',
'GL_UNSIGNED_NORMALIZED_ARB',
'GL_UNSIGNED_SHORT',
'GL_UNSIGNED_SHORT_1_5_5_5_REV',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_4_4_4_4_EXT',
'GL_UNSIGNED_SHORT_4_4_4_4_REV',
'GL_UNSIGNED_SHORT_5_5_5_1',
'GL_UNSIGNED_SHORT_5_5_5_1_EXT',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_5_6_5_REV',
'GL_UNSIGNED_SHORT_8_8_APPLE',
'GL_UNSIGNED_SHORT_8_8_MESA',
'GL_UNSIGNED_SHORT_8_8_REV_APPLE',
'GL_UNSIGNED_SHORT_8_8_REV_MESA',
'GL_UPPER_LEFT',
'GL_V2F',
'GL_V3F',
'GL_VALIDATE_STATUS',
'GL_VARIABLE_A_NV',
'GL_VARIABLE_B_NV',
'GL_VARIABLE_C_NV',
'GL_VARIABLE_D_NV',
'GL_VARIABLE_E_NV',
'GL_VARIABLE_F_NV',
'GL_VARIABLE_G_NV',
'GL_VARIANT_ARRAY_EXT',
'GL_VARIANT_ARRAY_POINTER_EXT',
'GL_VARIANT_ARRAY_STRIDE_EXT',
'GL_VARIANT_ARRAY_TYPE_EXT',
'GL_VARIANT_DATATYPE_EXT',
'GL_VARIANT_EXT',
'GL_VARIANT_VALUE_EXT',
'GL_VECTOR_EXT',
'GL_VENDOR',
'GL_VERSION',
'GL_VERSION_1_1',
'GL_VERSION_1_2',
'GL_VERSION_1_3',
'GL_VERSION_1_4',
'GL_VERSION_1_5',
'GL_VERSION_2_0',
'GL_VERTEX23_BIT_PGI',
'GL_VERTEX4_BIT_PGI',
'GL_VERTEX_ARRAY',
'GL_VERTEX_ARRAY_BINDING_APPLE',
'GL_VERTEX_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ARRAY_BUFFER_BINDING_ARB',
'GL_VERTEX_ARRAY_COUNT_EXT',
'GL_VERTEX_ARRAY_EXT',
'GL_VERTEX_ARRAY_LIST_IBM',
'GL_VERTEX_ARRAY_LIST_STRIDE_IBM',
'GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL',
'GL_VERTEX_ARRAY_POINTER',
'GL_VERTEX_ARRAY_POINTER_EXT',
'GL_VERTEX_ARRAY_RANGE_APPLE',
'GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE',
'GL_VERTEX_ARRAY_RANGE_LENGTH_NV',
'GL_VERTEX_ARRAY_RANGE_NV',
'GL_VERTEX_ARRAY_RANGE_POINTER_APPLE',
'GL_VERTEX_ARRAY_RANGE_POINTER_NV',
'GL_VERTEX_ARRAY_RANGE_VALID_NV',
'GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV',
'GL_VERTEX_ARRAY_SIZE',
'GL_VERTEX_ARRAY_SIZE_EXT',
'GL_VERTEX_ARRAY_STORAGE_HINT_APPLE',
'GL_VERTEX_ARRAY_STRIDE',
'GL_VERTEX_ARRAY_STRIDE_EXT',
'GL_VERTEX_ARRAY_TYPE',
'GL_VERTEX_ARRAY_TYPE_EXT',
'GL_VERTEX_ATTRIB_ARRAY0_NV',
'GL_VERTEX_ATTRIB_ARRAY10_NV',
'GL_VERTEX_ATTRIB_ARRAY11_NV',
'GL_VERTEX_ATTRIB_ARRAY12_NV',
'GL_VERTEX_ATTRIB_ARRAY13_NV',
'GL_VERTEX_ATTRIB_ARRAY14_NV',
'GL_VERTEX_ATTRIB_ARRAY15_NV',
'GL_VERTEX_ATTRIB_ARRAY1_NV',
'GL_VERTEX_ATTRIB_ARRAY2_NV',
'GL_VERTEX_ATTRIB_ARRAY3_NV',
'GL_VERTEX_ATTRIB_ARRAY4_NV',
'GL_VERTEX_ATTRIB_ARRAY5_NV',
'GL_VERTEX_ATTRIB_ARRAY6_NV',
'GL_VERTEX_ATTRIB_ARRAY7_NV',
'GL_VERTEX_ATTRIB_ARRAY8_NV',
'GL_VERTEX_ATTRIB_ARRAY9_NV',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB',
'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB',
'GL_VERTEX_ATTRIB_ARRAY_POINTER',
'GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB',
'GL_VERTEX_ATTRIB_ARRAY_SIZE',
'GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB',
'GL_VERTEX_ATTRIB_ARRAY_TYPE',
'GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB',
'GL_VERTEX_BLEND_ARB',
'GL_VERTEX_CONSISTENT_HINT_PGI',
'GL_VERTEX_DATA_HINT_PGI',
'GL_VERTEX_PRECLIP_HINT_SGIX',
'GL_VERTEX_PRECLIP_SGIX',
'GL_VERTEX_PROGRAM_ARB',
'GL_VERTEX_PROGRAM_BINDING_NV',
'GL_VERTEX_PROGRAM_NV',
'GL_VERTEX_PROGRAM_POINT_SIZE',
'GL_VERTEX_PROGRAM_POINT_SIZE_ARB',
'GL_VERTEX_PROGRAM_POINT_SIZE_NV',
'GL_VERTEX_PROGRAM_TWO_SIDE',
'GL_VERTEX_PROGRAM_TWO_SIDE_ARB',
'GL_VERTEX_PROGRAM_TWO_SIDE_NV',
'GL_VERTEX_SHADER',
'GL_VERTEX_SHADER_ARB',
'GL_VERTEX_SHADER_BINDING_EXT',
'GL_VERTEX_SHADER_EXT',
'GL_VERTEX_SHADER_INSTRUCTIONS_EXT',
'GL_VERTEX_SHADER_INVARIANTS_EXT',
'GL_VERTEX_SHADER_LOCALS_EXT',
'GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',
'GL_VERTEX_SHADER_OPTIMIZED_EXT',
'GL_VERTEX_SHADER_VARIANTS_EXT',
'GL_VERTEX_SOURCE_ATI',
'GL_VERTEX_STATE_PROGRAM_NV',
'GL_VERTEX_STREAM0_ATI',
'GL_VERTEX_STREAM1_ATI',
'GL_VERTEX_STREAM2_ATI',
'GL_VERTEX_STREAM3_ATI',
'GL_VERTEX_STREAM4_ATI',
'GL_VERTEX_STREAM5_ATI',
'GL_VERTEX_STREAM6_ATI',
'GL_VERTEX_STREAM7_ATI',
'GL_VERTEX_WEIGHTING_EXT',
'GL_VERTEX_WEIGHT_ARRAY_EXT',
'GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT',
'GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT',
'GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT',
'GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT',
'GL_VIBRANCE_BIAS_NV',
'GL_VIBRANCE_SCALE_NV',
'GL_VIEWPORT',
'GL_VIEWPORT_BIT',
'GL_WEIGHT_ARRAY_ARB',
'GL_WEIGHT_ARRAY_BUFFER_BINDING',
'GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB',
'GL_WEIGHT_ARRAY_POINTER_ARB',
'GL_WEIGHT_ARRAY_SIZE_ARB',
'GL_WEIGHT_ARRAY_STRIDE_ARB',
'GL_WEIGHT_ARRAY_TYPE_ARB',
'GL_WEIGHT_SUM_UNITY_ARB',
'GL_WIDE_LINE_HINT_PGI',
'GL_WRAP_BORDER_SUN',
'GL_WRITE_ONLY',
'GL_WRITE_ONLY_ARB',
'GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV',
'GL_WRITE_PIXEL_DATA_RANGE_NV',
'GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV',
'GL_W_EXT',
'GL_XOR',
'GL_X_EXT',
'GL_YCBCR_422_APPLE',
'GL_YCBCR_MESA',
'GL_YCRCBA_SGIX',
'GL_YCRCB_422_SGIX',
'GL_YCRCB_444_SGIX',
'GL_YCRCB_SGIX',
'GL_Y_EXT',
'GL_ZERO',
'GL_ZERO_EXT',
'GL_ZOOM_X',
'GL_ZOOM_Y',
'GL_Z_EXT',
'GLbitfield',
'GLboolean',
'GLbyte',
'GLclampd',
'GLclampf',
'GLdouble',
'GLenum',
'GLfloat',
'GLint',
'GLshort',
'GLsizei',
'GLubyte',
'GLuint',
'GLushort',
'GLvoid',
'glAccum',
'glAlphaFunc',
'glAreTexturesResident',
'glArrayElement',
'glBegin',
'glBindTexture',
'glBitmap',
'glBlendFunc',
'glCallList',
'glCallLists',
'glClear',
'glClearAccum',
'glClearColor',
'glClearDepth',
'glClearIndex',
'glClearStencil',
'glClipPlane',
'glColor3b',
'glColor3bv',
'glColor3d',
'glColor3dv',
'glColor3f',
'glColor3fv',
'glColor3i',
'glColor3iv',
'glColor3s',
'glColor3sv',
'glColor3ub',
'glColor3ubv',
'glColor3ui',
'glColor3uiv',
'glColor3us',
'glColor3usv',
'glColor4b',
'glColor4bv',
'glColor4d',
'glColor4dv',
'glColor4f',
'glColor4fv',
'glColor4i',
'glColor4iv',
'glColor4s',
'glColor4sv',
'glColor4ub',
'glColor4ubv',
'glColor4ui',
'glColor4uiv',
'glColor4us',
'glColor4usv',
'glColorMask',
'glColorMaterial',
'glColorPointer',
'glCopyPixels',
'glCopyTexImage1D',
'glCopyTexImage2D',
'glCopyTexSubImage1D',
'glCopyTexSubImage2D',
'glCullFace',
'glDeleteLists',
'glDeleteTextures',
'glDepthFunc',
'glDepthMask',
'glDepthRange',
'glDisable',
'glDisableClientState',
'glDrawArrays',
'glDrawBuffer',
'glDrawElements',
'glDrawPixels',
'glEdgeFlag',
'glEdgeFlagPointer',
'glEdgeFlagv',
'glEnable',
'glEnableClientState',
'glEnd',
'glEndList',
'glEvalCoord1d',
'glEvalCoord1dv',
'glEvalCoord1f',
'glEvalCoord1fv',
'glEvalCoord2d',
'glEvalCoord2dv',
'glEvalCoord2f',
'glEvalCoord2fv',
'glEvalMesh1',
'glEvalMesh2',
'glEvalPoint1',
'glEvalPoint2',
'glFeedbackBuffer',
'glFinish',
'glFlush',
'glFogf',
'glFogfv',
'glFogi',
'glFogiv',
'glFrontFace',
'glFrustum',
'glGenLists',
'glGenTextures',
'glGetBooleanv',
'glGetClipPlane',
'glGetDoublev',
'glGetError',
'glGetFloatv',
'glGetIntegerv',
'glGetLightfv',
'glGetLightiv',
'glGetMapdv',
'glGetMapfv',
'glGetMapiv',
'glGetMaterialfv',
'glGetMaterialiv',
'glGetPixelMapfv',
'glGetPixelMapuiv',
'glGetPixelMapusv',
'glGetPointerv',
'glGetPolygonStipple',
'glGetString',
'glGetTexEnvfv',
'glGetTexEnviv',
'glGetTexGendv',
'glGetTexGenfv',
'glGetTexGeniv',
'glGetTexImage',
'glGetTexLevelParameterfv',
'glGetTexLevelParameteriv',
'glGetTexParameterfv',
'glGetTexParameteriv',
'glHint',
'glIndexMask',
'glIndexPointer',
'glIndexd',
'glIndexdv',
'glIndexf',
'glIndexfv',
'glIndexi',
'glIndexiv',
'glIndexs',
'glIndexsv',
'glIndexub',
'glIndexubv',
'glInitNames',
'glInterleavedArrays',
'glIsEnabled',
'glIsList',
'glIsTexture',
'glLightModelf',
'glLightModelfv',
'glLightModeli',
'glLightModeliv',
'glLightf',
'glLightfv',
'glLighti',
'glLightiv',
'glLineStipple',
'glLineWidth',
'glListBase',
'glLoadIdentity',
'glLoadMatrixd',
'glLoadMatrixf',
'glLoadName',
'glLogicOp',
'glMap1d',
'glMap1f',
'glMap2d',
'glMap2f',
'glMapGrid1d',
'glMapGrid1f',
'glMapGrid2d',
'glMapGrid2f',
'glMaterialf',
'glMaterialfv',
'glMateriali',
'glMaterialiv',
'glMatrixMode',
'glMultMatrixd',
'glMultMatrixf',
'glNewList',
'glNormal3b',
'glNormal3bv',
'glNormal3d',
'glNormal3dv',
'glNormal3f',
'glNormal3fv',
'glNormal3i',
'glNormal3iv',
'glNormal3s',
'glNormal3sv',
'glNormalPointer',
'glOrtho',
'glPassThrough',
'glPixelMapfv',
'glPixelMapuiv',
'glPixelMapusv',
'glPixelStoref',
'glPixelStorei',
'glPixelTransferf',
'glPixelTransferi',
'glPixelZoom',
'glPointSize',
'glPolygonMode',
'glPolygonOffset',
'glPolygonStipple',
'glPopAttrib',
'glPopClientAttrib',
'glPopMatrix',
'glPopName',
'glPrioritizeTextures',
'glPushAttrib',
'glPushClientAttrib',
'glPushMatrix',
'glPushName',
'glRasterPos2d',
'glRasterPos2dv',
'glRasterPos2f',
'glRasterPos2fv',
'glRasterPos2i',
'glRasterPos2iv',
'glRasterPos2s',
'glRasterPos2sv',
'glRasterPos3d',
'glRasterPos3dv',
'glRasterPos3f',
'glRasterPos3fv',
'glRasterPos3i',
'glRasterPos3iv',
'glRasterPos3s',
'glRasterPos3sv',
'glRasterPos4d',
'glRasterPos4dv',
'glRasterPos4f',
'glRasterPos4fv',
'glRasterPos4i',
'glRasterPos4iv',
'glRasterPos4s',
'glRasterPos4sv',
'glReadBuffer',
'glReadPixels',
'glRectd',
'glRectdv',
'glRectf',
'glRectfv',
'glRecti',
'glRectiv',
'glRects',
'glRectsv',
'glRenderMode',
'glRotated',
'glRotatef',
'glScaled',
'glScalef',
'glScissor',
'glSelectBuffer',
'glShadeModel',
'glStencilFunc',
'glStencilMask',
'glStencilOp',
'glTexCoord1d',
'glTexCoord1dv',
'glTexCoord1f',
'glTexCoord1fv',
'glTexCoord1i',
'glTexCoord1iv',
'glTexCoord1s',
'glTexCoord1sv',
'glTexCoord2d',
'glTexCoord2dv',
'glTexCoord2f',
'glTexCoord2fv',
'glTexCoord2i',
'glTexCoord2iv',
'glTexCoord2s',
'glTexCoord2sv',
'glTexCoord3d',
'glTexCoord3dv',
'glTexCoord3f',
'glTexCoord3fv',
'glTexCoord3i',
'glTexCoord3iv',
'glTexCoord3s',
'glTexCoord3sv',
'glTexCoord4d',
'glTexCoord4dv',
'glTexCoord4f',
'glTexCoord4fv',
'glTexCoord4i',
'glTexCoord4iv',
'glTexCoord4s',
'glTexCoord4sv',
'glTexCoordPointer',
'glTexEnvf',
'glTexEnvfv',
'glTexEnvi',
'glTexEnviv',
'glTexGend',
'glTexGendv',
'glTexGenf',
'glTexGenfv',
'glTexGeni',
'glTexGeniv',
'glTexImage1D',
'glTexImage2D',
'glTexParameterf',
'glTexParameterfv',
'glTexParameteri',
'glTexParameteriv',
'glTexSubImage1D',
'glTexSubImage2D',
'glTranslated',
'glTranslatef',
'glVertex2d',
'glVertex2dv',
'glVertex2f',
'glVertex2fv',
'glVertex2i',
'glVertex2iv',
'glVertex2s',
'glVertex2sv',
'glVertex3d',
'glVertex3dv',
'glVertex3f',
'glVertex3fv',
'glVertex3i',
'glVertex3iv',
'glVertex3s',
'glVertex3sv',
'glVertex4d',
'glVertex4dv',
'glVertex4f',
'glVertex4fv',
'glVertex4i',
'glVertex4iv',
'glVertex4s',
'glVertex4sv',
'glVertexPointer',
'glViewport'
]
| agpl-3.0 |
pozdnyakov/chromium-crosswalk | tools/telemetry/telemetry/core/chrome/inspector_console.py | 29 | 1991 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
class InspectorConsole(object):
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Console',
self._OnNotification,
self._OnClose)
self._message_output_stream = None
self._last_message = None
self._console_enabled = False
def _OnNotification(self, msg):
logging.debug('Notification: %s', json.dumps(msg, indent=2))
if msg['method'] == 'Console.messageAdded':
if msg['params']['message']['url'] == 'chrome://newtab/':
return
self._last_message = 'At %s:%i: %s' % (
msg['params']['message']['url'],
msg['params']['message']['line'],
msg['params']['message']['text'])
if self._message_output_stream:
self._message_output_stream.write(
'%s\n' % self._last_message)
elif msg['method'] == 'Console.messageRepeatCountUpdated':
if self._message_output_stream:
self._message_output_stream.write(
'%s\n' % self._last_message)
def _OnClose(self):
pass
# False positive in PyLint 0.25.1: http://www.logilab.org/89092
@property
def message_output_stream(self): # pylint: disable=E0202
return self._message_output_stream
@message_output_stream.setter
def message_output_stream(self, stream): # pylint: disable=E0202
self._message_output_stream = stream
self._UpdateConsoleEnabledState()
def _UpdateConsoleEnabledState(self):
enabled = self._message_output_stream != None
if enabled == self._console_enabled:
return
if enabled:
method_name = 'enable'
else:
method_name = 'disable'
self._inspector_backend.SyncRequest({
'method': 'Console.%s' % method_name
})
self._console_enabled = enabled
| bsd-3-clause |
IllusionRom-deprecated/android_platform_external_chromium_org | third_party/tlslite/tlslite/utils/Cryptlib_AES.py | 359 | 1364 | """Cryptlib AES implementation."""
from cryptomath import *
from AES import *
if cryptlibpyLoaded:
def new(key, mode, IV):
return Cryptlib_AES(key, mode, IV)
class Cryptlib_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "cryptlib")
self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key))
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key)
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV)
def __del__(self):
cryptlib_py.cryptDestroyContext(self.context)
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
bytes = stringToBytes(plaintext)
cryptlib_py.cryptEncrypt(self.context, bytes)
return bytesToString(bytes)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
bytes = stringToBytes(ciphertext)
cryptlib_py.cryptDecrypt(self.context, bytes)
return bytesToString(bytes)
| bsd-3-clause |
saurabh6790/medsynaptic-app | accounts/report/gross_profit/gross_profit.py | 29 | 4693 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
from stock.utils import get_buying_amount, get_sales_bom_buying_amount
def execute(filters=None):
if not filters: filters = {}
stock_ledger_entries = get_stock_ledger_entries(filters)
source = get_source_data(filters)
item_sales_bom = get_item_sales_bom()
columns = ["Delivery Note/Sales Invoice::120", "Link::30", "Posting Date:Date", "Posting Time",
"Item Code:Link/Item", "Item Name", "Description", "Warehouse:Link/Warehouse",
"Qty:Float", "Selling Rate:Currency", "Avg. Buying Rate:Currency",
"Selling Amount:Currency", "Buying Amount:Currency",
"Gross Profit:Currency", "Gross Profit %:Percent", "Project:Link/Project"]
data = []
for row in source:
selling_amount = flt(row.amount)
item_sales_bom_map = item_sales_bom.get(row.parenttype, {}).get(row.name, webnotes._dict())
if item_sales_bom_map.get(row.item_code):
buying_amount = get_sales_bom_buying_amount(row.item_code, row.warehouse,
row.parenttype, row.name, row.item_row, stock_ledger_entries, item_sales_bom_map)
else:
buying_amount = get_buying_amount(row.parenttype, row.name, row.item_row,
stock_ledger_entries.get((row.item_code, row.warehouse), []))
buying_amount = buying_amount > 0 and buying_amount or 0
gross_profit = selling_amount - buying_amount
if selling_amount:
gross_profit_percent = (gross_profit / selling_amount) * 100.0
else:
gross_profit_percent = 0.0
icon = """<a href="%s"><i class="icon icon-share" style="cursor: pointer;"></i></a>""" \
% ("/".join(["#Form", row.parenttype, row.name]),)
data.append([row.name, icon, row.posting_date, row.posting_time, row.item_code, row.item_name,
row.description, row.warehouse, row.qty, row.basic_rate,
row.qty and (buying_amount / row.qty) or 0, row.amount, buying_amount,
gross_profit, gross_profit_percent, row.project])
return columns, data
def get_stock_ledger_entries(filters):
query = """select item_code, voucher_type, voucher_no,
voucher_detail_no, posting_date, posting_time, stock_value,
warehouse, actual_qty as qty
from `tabStock Ledger Entry`"""
if filters.get("company"):
query += """ where company=%(company)s"""
query += " order by item_code desc, warehouse desc, posting_date desc, posting_time desc, name desc"
res = webnotes.conn.sql(query, filters, as_dict=True)
out = {}
for r in res:
if (r.item_code, r.warehouse) not in out:
out[(r.item_code, r.warehouse)] = []
out[(r.item_code, r.warehouse)].append(r)
return out
def get_item_sales_bom():
item_sales_bom = {}
for d in webnotes.conn.sql("""select parenttype, parent, parent_item,
item_code, warehouse, -1*qty as total_qty, parent_detail_docname
from `tabPacked Item` where docstatus=1""", as_dict=True):
item_sales_bom.setdefault(d.parenttype, webnotes._dict()).setdefault(d.parent,
webnotes._dict()).setdefault(d.parent_item, []).append(d)
return item_sales_bom
def get_source_data(filters):
conditions = ""
if filters.get("company"):
conditions += " and company=%(company)s"
if filters.get("from_date"):
conditions += " and posting_date>=%(from_date)s"
if filters.get("to_date"):
conditions += " and posting_date<=%(to_date)s"
delivery_note_items = webnotes.conn.sql("""select item.parenttype, dn.name,
dn.posting_date, dn.posting_time, dn.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.basic_rate, item.amount, item.name as "item_row",
timestamp(dn.posting_date, dn.posting_time) as posting_datetime
from `tabDelivery Note` dn, `tabDelivery Note Item` item
where item.parent = dn.name and dn.docstatus = 1 %s
order by dn.posting_date desc, dn.posting_time desc""" % (conditions,), filters, as_dict=1)
sales_invoice_items = webnotes.conn.sql("""select item.parenttype, si.name,
si.posting_date, si.posting_time, si.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.basic_rate, item.amount, item.name as "item_row",
timestamp(si.posting_date, si.posting_time) as posting_datetime
from `tabSales Invoice` si, `tabSales Invoice Item` item
where item.parent = si.name and si.docstatus = 1 %s
and si.update_stock = 1
order by si.posting_date desc, si.posting_time desc""" % (conditions,), filters, as_dict=1)
source = delivery_note_items + sales_invoice_items
if len(source) > len(delivery_note_items):
source.sort(key=lambda d: d.posting_datetime, reverse=True)
return source | agpl-3.0 |
openstack/nova | nova/virt/hyperv/constants.py | 2 | 2669 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Constants used in ops classes
"""
from os_win import constants
from oslo_utils import units
from nova.compute import power_state
from nova.objects import fields as obj_fields
HYPERV_POWER_STATE = {
constants.HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN,
constants.HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
constants.HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
constants.HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
}
WMI_WIN32_PROCESSOR_ARCHITECTURE = {
constants.ARCH_I686: obj_fields.Architecture.I686,
constants.ARCH_MIPS: obj_fields.Architecture.MIPS,
constants.ARCH_ALPHA: obj_fields.Architecture.ALPHA,
constants.ARCH_PPC: obj_fields.Architecture.PPC,
constants.ARCH_ARMV7: obj_fields.Architecture.ARMV7,
constants.ARCH_IA64: obj_fields.Architecture.IA64,
constants.ARCH_X86_64: obj_fields.Architecture.X86_64,
}
CTRL_TYPE_IDE = "IDE"
CTRL_TYPE_SCSI = "SCSI"
DISK = "VHD"
DISK_FORMAT = DISK
DVD = "DVD"
DVD_FORMAT = "ISO"
VOLUME = "VOLUME"
DISK_FORMAT_MAP = {
DISK_FORMAT.lower(): DISK,
DVD_FORMAT.lower(): DVD
}
BDI_DEVICE_TYPE_TO_DRIVE_TYPE = {'disk': DISK}
DISK_FORMAT_VHD = "VHD"
DISK_FORMAT_VHDX = "VHDX"
HOST_POWER_ACTION_SHUTDOWN = "shutdown"
HOST_POWER_ACTION_REBOOT = "reboot"
HOST_POWER_ACTION_STARTUP = "startup"
FLAVOR_SPEC_SECURE_BOOT = "os:secure_boot"
IMAGE_PROP_VM_GEN_1 = "hyperv-gen1"
IMAGE_PROP_VM_GEN_2 = "hyperv-gen2"
VM_GEN_1 = 1
VM_GEN_2 = 2
SERIAL_CONSOLE_BUFFER_SIZE = 4 * units.Ki
SERIAL_PORT_TYPE_RO = 'ro'
SERIAL_PORT_TYPE_RW = 'rw'
# The default serial console port number used for
# logging and interactive sessions.
DEFAULT_SERIAL_CONSOLE_PORT = 1
FLAVOR_ESPEC_REMOTEFX_RES = 'os:resolution'
FLAVOR_ESPEC_REMOTEFX_MONITORS = 'os:monitors'
FLAVOR_ESPEC_REMOTEFX_VRAM = 'os:vram'
IOPS_BASE_SIZE = 8 * units.Ki
STORAGE_PROTOCOL_ISCSI = 'iscsi'
STORAGE_PROTOCOL_FC = 'fibre_channel'
STORAGE_PROTOCOL_SMBFS = 'smbfs'
STORAGE_PROTOCOL_RBD = 'rbd'
| apache-2.0 |
aurelijusb/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32com/demos/outlookAddin.py | 34 | 4637 | # A demo plugin for Microsoft Outlook (NOT Outlook Express)
#
# This addin simply adds a new button to the main Outlook toolbar,
# and displays a message box when clicked. Thus, it demonstrates
# how to plug in to Outlook itself, and hook outlook events.
#
# Additionally, each time a new message arrives in the Inbox, a message
# is printed with the subject of the message.
#
# To register the addin, simply execute:
# outlookAddin.py
# This will install the COM server, and write the necessary
# AddIn key to Outlook
#
# To unregister completely:
# outlookAddin.py --unregister
#
# To debug, execute:
# outlookAddin.py --debug
#
# Then open Pythonwin, and select "Tools->Trace Collector Debugging Tool"
# Restart Outlook, and you should see some output generated.
#
# NOTE: If the AddIn fails with an error, Outlook will re-register
# the addin to not automatically load next time Outlook starts. To
# correct this, simply re-register the addin (see above)
from win32com import universal
from win32com.server.exception import COMException
from win32com.client import gencache, DispatchWithEvents
import winerror
import pythoncom
from win32com.client import constants
import sys
# Support for COM objects we use.
gencache.EnsureModule('{00062FFF-0000-0000-C000-000000000046}', 0, 9, 0, bForDemand=True) # Outlook 9
gencache.EnsureModule('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1, bForDemand=True) # Office 9
# The TLB defiining the interfaces we implement
universal.RegisterInterfaces('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0, ["_IDTExtensibility2"])
class ButtonEvent:
def OnClick(self, button, cancel):
import win32ui # Possible, but not necessary, to use a Pythonwin GUI
win32ui.MessageBox("Hello from Python")
return cancel
class FolderEvent:
def OnItemAdd(self, item):
try:
print "An item was added to the inbox with subject:", item.Subject
except AttributeError:
print "An item was added to the inbox, but it has no subject! - ", repr(item)
class OutlookAddin:
_com_interfaces_ = ['_IDTExtensibility2']
_public_methods_ = []
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
_reg_clsid_ = "{0F47D9F3-598B-4d24-B7E3-92AC15ED27E2}"
_reg_progid_ = "Python.Test.OutlookAddin"
_reg_policy_spec_ = "win32com.server.policy.EventHandlerPolicy"
def OnConnection(self, application, connectMode, addin, custom):
print "OnConnection", application, connectMode, addin, custom
# ActiveExplorer may be none when started without a UI (eg, WinCE synchronisation)
activeExplorer = application.ActiveExplorer()
if activeExplorer is not None:
bars = activeExplorer.CommandBars
toolbar = bars.Item("Standard")
item = toolbar.Controls.Add(Type=constants.msoControlButton, Temporary=True)
# Hook events for the item
item = self.toolbarButton = DispatchWithEvents(item, ButtonEvent)
item.Caption="Python"
item.TooltipText = "Click for Python"
item.Enabled = True
# And now, for the sake of demonstration, setup a hook for all new messages
inbox = application.Session.GetDefaultFolder(constants.olFolderInbox)
self.inboxItems = DispatchWithEvents(inbox.Items, FolderEvent)
def OnDisconnection(self, mode, custom):
print "OnDisconnection"
def OnAddInsUpdate(self, custom):
print "OnAddInsUpdate", custom
def OnStartupComplete(self, custom):
print "OnStartupComplete", custom
def OnBeginShutdown(self, custom):
print "OnBeginShutdown", custom
def RegisterAddin(klass):
import _winreg
key = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins")
subkey = _winreg.CreateKey(key, klass._reg_progid_)
_winreg.SetValueEx(subkey, "CommandLineSafe", 0, _winreg.REG_DWORD, 0)
_winreg.SetValueEx(subkey, "LoadBehavior", 0, _winreg.REG_DWORD, 3)
_winreg.SetValueEx(subkey, "Description", 0, _winreg.REG_SZ, klass._reg_progid_)
_winreg.SetValueEx(subkey, "FriendlyName", 0, _winreg.REG_SZ, klass._reg_progid_)
def UnregisterAddin(klass):
import _winreg
try:
_winreg.DeleteKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins\\" + klass._reg_progid_)
except WindowsError:
pass
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine(OutlookAddin)
if "--unregister" in sys.argv:
UnregisterAddin(OutlookAddin)
else:
RegisterAddin(OutlookAddin)
| apache-2.0 |
mousebrains/kayak | scripts/MyLogger.py | 1 | 1526 | #
# Construct a logger given a standard set of command line arguments
#
# Oct-2019, Pat Welch, pat@mousebrains.com
import logging
import logging.handlers
import argparse
def addArgs(parser: argparse.ArgumentParser):
""" Add my options to an argparse object """
grp = parser.add_argument_group('Log related options')
grp.add_argument('--logfile', type=str, metavar='filename', help='Log filename')
grp.add_argument('--verbose', action='store_true', help='Enable debug messages')
grp.add_argument('--maxlogsize', type=int, default=10000000, metavar='size',
help='Maximum logfile size')
grp.add_argument('--backupcount', type=int, default=7, metavar='count',
help='Number of logfiles to keep')
def mkLogger(args:argparse.ArgumentParser, name:str,
fmt:str=None) -> logging.Logger:
""" Make a logging object based on the options in args """
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if args.logfile is None:
ch = logging.StreamHandler()
else:
ch = logging.handlers.RotatingFileHandler(args.logfile,
maxBytes=args.maxlogsize,
backupCount=args.backupcount)
ch.setLevel(logging.DEBUG if args.verbose else logging.INFO)
if fmt is None:
fmt = '%(asctime)s: %(threadName)s:%(levelname)s - %(message)s'
formatter = logging.Formatter(fmt)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
| gpl-3.0 |
ahu-odoo/odoo | addons/crm/wizard/crm_partner_binding.py | 177 | 4544 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_partner_binding(osv.osv_memory):
"""
Handle the partner binding or generation in any CRM wizard that requires
such feature, like the lead2opportunity wizard, or the
phonecall2opportunity wizard. Try to find a matching partner from the
CRM model's information (name, email, phone number, etc) or create a new
one on the fly.
Use it like a mixin with the wizard of your choice.
"""
_name = 'crm.partner.binding'
_description = 'Handle partner binding or generation in CRM wizards.'
_columns = {
'action': fields.selection([
('exist', 'Link to an existing customer'),
('create', 'Create a new customer'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'partner_id': fields.many2one('res.partner', 'Customer'),
}
def _find_matching_partner(self, cr, uid, context=None):
"""
Try to find a matching partner regarding the active model data, like
the customer's name, email, phone number, etc.
:return int partner_id if any, False otherwise
"""
if context is None:
context = {}
partner_id = False
partner_obj = self.pool.get('res.partner')
# The active model has to be a lead or a phonecall
if (context.get('active_model') == 'crm.lead') and context.get('active_id'):
active_model = self.pool.get('crm.lead').browse(cr, uid, context.get('active_id'), context=context)
elif (context.get('active_model') == 'crm.phonecall') and context.get('active_id'):
active_model = self.pool.get('crm.phonecall').browse(cr, uid, context.get('active_id'), context=context)
# Find the best matching partner for the active model
if (active_model):
partner_obj = self.pool.get('res.partner')
# A partner is set already
if active_model.partner_id:
partner_id = active_model.partner_id.id
# Search through the existing partners based on the lead's email
elif active_model.email_from:
partner_ids = partner_obj.search(cr, uid, [('email', '=', active_model.email_from)], context=context)
if partner_ids:
partner_id = partner_ids[0]
# Search through the existing partners based on the lead's partner or contact name
elif active_model.partner_name:
partner_ids = partner_obj.search(cr, uid, [('name', 'ilike', '%'+active_model.partner_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
elif active_model.contact_name:
partner_ids = partner_obj.search(cr, uid, [
('name', 'ilike', '%'+active_model.contact_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
return partner_id
def default_get(self, cr, uid, fields, context=None):
res = super(crm_partner_binding, self).default_get(cr, uid, fields, context=context)
partner_id = self._find_matching_partner(cr, uid, context=context)
if 'action' in fields:
res['action'] = partner_id and 'exist' or 'create'
if 'partner_id' in fields:
res['partner_id'] = partner_id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
csrocha/OpenUpgrade | addons/base_import/models.py | 90 | 13824 | import csv
import itertools
import logging
import operator
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import psycopg2
from openerp.osv import orm, fields
from openerp.tools.translate import _
FIELDS_RECURSION_LIMIT = 2
ERROR_PREVIEW_BYTES = 200
_logger = logging.getLogger(__name__)
class ir_import(orm.TransientModel):
_name = 'base_import.import'
# allow imports to survive for 12h in case user is slow
_transient_max_hours = 12.0
_columns = {
'res_model': fields.char('Model'),
'file': fields.binary(
'File', help="File to check and/or import, raw binary (not base64)"),
'file_name': fields.char('File Name'),
'file_type': fields.char('File Type'),
}
def get_fields(self, cr, uid, model, context=None,
depth=FIELDS_RECURSION_LIMIT):
""" Recursively get fields for the provided model (through
fields_get) and filter them according to importability
The output format is a list of ``Field``, with ``Field``
defined as:
.. class:: Field
.. attribute:: id (str)
A non-unique identifier for the field, used to compute
the span of the ``required`` attribute: if multiple
``required`` fields have the same id, only one of them
is necessary.
.. attribute:: name (str)
The field's logical (Odoo) name within the scope of
its parent.
.. attribute:: string (str)
The field's human-readable name (``@string``)
.. attribute:: required (bool)
Whether the field is marked as required in the
model. Clients must provide non-empty import values
for all required fields or the import will error out.
.. attribute:: fields (list(Field))
The current field's subfields. The database and
external identifiers for m2o and m2m fields; a
filtered and transformed fields_get for o2m fields (to
a variable depth defined by ``depth``).
Fields with no sub-fields will have an empty list of
sub-fields.
:param str model: name of the model to get fields form
:param int landing: depth of recursion into o2m fields
"""
model_obj = self.pool[model]
fields = [{
'id': 'id',
'name': 'id',
'string': _("External ID"),
'required': False,
'fields': [],
}]
fields_got = model_obj.fields_get(cr, uid, context=context)
blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD]
for name, field in fields_got.iteritems():
if name in blacklist:
continue
# an empty string means the field is deprecated, @deprecated must
# be absent or False to mean not-deprecated
if field.get('deprecated', False) is not False:
continue
if field.get('readonly'):
states = field.get('states')
if not states:
continue
# states = {state: [(attr, value), (attr2, value2)], state2:...}
if not any(attr == 'readonly' and value is False
for attr, value in itertools.chain.from_iterable(
states.itervalues())):
continue
f = {
'id': name,
'name': name,
'string': field['string'],
# Y U NO ALWAYS HAS REQUIRED
'required': bool(field.get('required')),
'fields': [],
}
if field['type'] in ('many2many', 'many2one'):
f['fields'] = [
dict(f, name='id', string=_("External ID")),
dict(f, name='.id', string=_("Database ID")),
]
elif field['type'] == 'one2many' and depth:
f['fields'] = self.get_fields(
cr, uid, field['relation'], context=context, depth=depth-1)
if self.pool['res.users'].has_group(cr, uid, 'base.group_no_one'):
f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []})
fields.append(f)
# TODO: cache on model?
return fields
def _read_csv(self, record, options):
""" Returns a CSV-parsed iterator of all empty lines in the file
:throws csv.Error: if an error is detected during CSV parsing
:throws UnicodeDecodeError: if ``options.encoding`` is incorrect
"""
csv_iterator = csv.reader(
StringIO(record.file),
quotechar=str(options['quoting']),
delimiter=str(options['separator']))
csv_nonempty = itertools.ifilter(None, csv_iterator)
# TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet
encoding = options.get('encoding', 'utf-8')
return itertools.imap(
lambda row: [item.decode(encoding) for item in row],
csv_nonempty)
def _match_header(self, header, fields, options):
""" Attempts to match a given header to a field of the
imported model.
:param str header: header name from the CSV file
:param fields:
:param dict options:
:returns: an empty list if the header couldn't be matched, or
all the fields to traverse
:rtype: list(Field)
"""
for field in fields:
# FIXME: should match all translations & original
# TODO: use string distance (levenshtein? hamming?)
if header == field['name'] \
or header.lower() == field['string'].lower():
return [field]
if '/' not in header:
return []
# relational field path
traversal = []
subfields = fields
# Iteratively dive into fields tree
for section in header.split('/'):
# Strip section in case spaces are added around '/' for
# readability of paths
match = self._match_header(section.strip(), subfields, options)
# Any match failure, exit
if not match: return []
# prep subfields for next iteration within match[0]
field = match[0]
subfields = field['fields']
traversal.append(field)
return traversal
def _match_headers(self, rows, fields, options):
""" Attempts to match the imported model's fields to the
titles of the parsed CSV file, if the file is supposed to have
headers.
Will consume the first line of the ``rows`` iterator.
Returns a pair of (None, None) if headers were not requested
or the list of headers and a dict mapping cell indices
to key paths in the ``fields`` tree
:param Iterator rows:
:param dict fields:
:param dict options:
:rtype: (None, None) | (list(str), dict(int: list(str)))
"""
if not options.get('headers'):
return None, None
headers = next(rows)
return headers, dict(
(index, [field['name'] for field in self._match_header(header, fields, options)] or None)
for index, header in enumerate(headers)
)
def parse_preview(self, cr, uid, id, options, count=10, context=None):
""" Generates a preview of the uploaded files, and performs
fields-matching between the import's file data and the model's
columns.
If the headers are not requested (not options.headers),
``matches`` and ``headers`` are both ``False``.
:param id: identifier of the import
:param int count: number of preview lines to generate
:param options: format-specific options.
CSV: {encoding, quoting, separator, headers}
:type options: {str, str, str, bool}
:returns: {fields, matches, headers, preview} | {error, preview}
:rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
"""
(record,) = self.browse(cr, uid, [id], context=context)
fields = self.get_fields(cr, uid, record.res_model, context=context)
try:
rows = self._read_csv(record, options)
headers, matches = self._match_headers(rows, fields, options)
# Match should have consumed the first row (iif headers), get
# the ``count`` next rows for preview
preview = list(itertools.islice(rows, count))
assert preview, "CSV file seems to have no content"
return {
'fields': fields,
'matches': matches or False,
'headers': headers or False,
'preview': preview,
}
except Exception, e:
# Due to lazy generators, UnicodeDecodeError (for
# instance) may only be raised when serializing the
# preview to a list in the return.
_logger.debug("Error during CSV parsing preview", exc_info=True)
return {
'error': str(e),
# iso-8859-1 ensures decoding will always succeed,
# even if it yields non-printable characters. This is
# in case of UnicodeDecodeError (or csv.Error
# compounded with UnicodeDecodeError)
'preview': record.file[:ERROR_PREVIEW_BYTES]
.decode( 'iso-8859-1'),
}
def _convert_import_data(self, record, fields, options, context=None):
""" Extracts the input browse_record and fields list (with
``False``-y placeholders for fields to *not* import) into a
format Model.import_data can use: a fields list without holes
and the precisely matching data matrix
:param browse_record record:
:param list(str|bool): fields
:returns: (data, fields)
:rtype: (list(list(str)), list(str))
:raises ValueError: in case the import data could not be converted
"""
# Get indices for non-empty fields
indices = [index for index, field in enumerate(fields) if field]
if not indices:
raise ValueError(_("You must configure at least one field to import"))
# If only one index, itemgetter will return an atom rather
# than a 1-tuple
if len(indices) == 1: mapper = lambda row: [row[indices[0]]]
else: mapper = operator.itemgetter(*indices)
# Get only list of actually imported fields
import_fields = filter(None, fields)
rows_to_import = self._read_csv(record, options)
if options.get('headers'):
rows_to_import = itertools.islice(
rows_to_import, 1, None)
data = [
row for row in itertools.imap(mapper, rows_to_import)
# don't try inserting completely empty rows (e.g. from
# filtering out o2m fields)
if any(row)
]
return data, import_fields
def do(self, cr, uid, id, fields, options, dryrun=False, context=None):
""" Actual execution of the import
:param fields: import mapping: maps each column to a field,
``False`` for the columns to ignore
:type fields: list(str|bool)
:param dict options:
:param bool dryrun: performs all import operations (and
validations) but rollbacks writes, allows
getting as much errors as possible without
the risk of clobbering the database.
:returns: A list of errors. If the list is empty the import
executed fully and correctly. If the list is
non-empty it contains dicts with 3 keys ``type`` the
type of error (``error|warning``); ``message`` the
error message associated with the error (a string)
and ``record`` the data which failed to import (or
``false`` if that data isn't available or provided)
:rtype: list({type, message, record})
"""
cr.execute('SAVEPOINT import')
(record,) = self.browse(cr, uid, [id], context=context)
try:
data, import_fields = self._convert_import_data(
record, fields, options, context=context)
except ValueError, e:
return [{
'type': 'error',
'message': unicode(e),
'record': False,
}]
_logger.info('importing %d rows...', len(data))
import_result = self.pool[record.res_model].load(
cr, uid, import_fields, data, context=context)
_logger.info('done')
# If transaction aborted, RELEASE SAVEPOINT is going to raise
# an InternalError (ROLLBACK should work, maybe). Ignore that.
# TODO: to handle multiple errors, create savepoint around
# write and release it in case of write error (after
# adding error to errors array) => can keep on trying to
# import stuff, and rollback at the end if there is any
# error in the results.
try:
if dryrun:
cr.execute('ROLLBACK TO SAVEPOINT import')
else:
cr.execute('RELEASE SAVEPOINT import')
except psycopg2.InternalError:
pass
return import_result['messages']
| agpl-3.0 |
meraki/provisioning-lib | python-3.5-api-module/sample-createbulkimport.py | 3 | 3116 | import merakiapi
import os
import re
from vars import org, apikey
# bulk network import filename
bncfile = 'bncimport.csv'
# API doesn't provide DNS information for device, these variables allow defining DNS manually
dns1 = '8.8.8.8'
dns2 = '8.8.4.4'
# API doesn't provide network mask information for device, this variable allows defining mask manually
netmask = '255.255.255.0'
# API doesn't provide VLAN ID information for device, this variable allows defining VLAN ID manually
vlan = '#'
# API doesn't provide notes information for device, this variable allows defining notes manually
notes = 'This text will be inserted in the notes field of all devices'
# API doesn't provide gateway information for devices, this variable allows defining gateway manually
gateway = '###.###.###.###'
# Check for existing bulk import file, if it doesn't exist create new file with header line
if os.path.isfile(bncfile):
writefile = open(bncfile, 'a+')
else:
writefile = open(bncfile, 'w+')
print('Network name,Serial,Network tags,Name,Tags,Address,Notes,Static IP,Netmask,Gateway,DNS1,DNS2,VLAN',
file=writefile)
orgnetworks = merakiapi.getnetworklist(apikey, org, suppressprint=True)
for network in orgnetworks:
networkname = network['name']
if 'tags' not in network:
networktags = ''
else:
networktags = network['tags']
devicelist = merakiapi.getnetworkdevices(apikey, network['id'], suppressprint=True)
for device in devicelist:
if 'serial' not in device:
serialnum = ''
else:
serialnum = device['serial']
if 'name' not in device:
devicename = ''
else:
devicename = device['name']
if 'model' not in device:
devicemodel = ''
else:
devicemodel = device['model']
if 'lanIp' not in device:
deviceip = ''
else:
deviceip = device['lanIp']
if 'tags' not in device:
devicetags = ''
else:
devicetags = device['tags']
if 'address' not in device:
deviceaddr = ''
else:
deviceaddr = device['address']
# Remove commas from address information as bulk network creator doesn't accept commas in field data
deviceaddr = re.sub(',', ' ', deviceaddr)
deviceaddr = re.sub('\n', ' ', deviceaddr)
print('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12}'.format(str(networkname), str(serialnum),
str(networktags), str(devicename),
str(devicetags), str(deviceaddr),
str(notes), str(deviceip), str(netmask),
str(gateway), str(dns1), str(dns2),
str(vlan)), file=writefile)
writefile.close()
| gpl-3.0 |
CN-UPB/upb-son-editor-backend | src/son_editor/tests/utils.py | 2 | 6214 | import son_editor.impl.functionsimpl
import son_editor.impl.projectsimpl
import son_editor.impl.servicesimpl
import son_editor.impl.workspaceimpl
import son_editor.impl.cataloguesimpl
import os
from son_editor.app.database import db_session
from son_editor.impl.usermanagement import get_user
from son_editor.models.user import User
from son_editor.models.workspace import Workspace
from son_editor.util import constants
from son_editor.util.requestutil import get_config
def _get_header():
return {'Content-Type': 'application/json'}
def get_sample_vnf(name: str, vendor: str, version: str):
"""
Creates a minimal valid example vnf from the given name, vendor and version
:param name: The VNF name
:param vendor: The VNF vendor
:param version: The VNF version
:return: a dictionary with a valid vnf descriptor that will pass validation
"""
return \
{
'descriptor': {
"vendor": vendor,
"name": name,
"version": version,
"descriptor_version": "0.1",
"virtual_deployment_units": [
{
"id": "vdu_id",
"resource_requirements": {
"cpu": {
"vcpus": 1
},
"memory": {
"size": 1
}
}
}
]
}
}
def create_vnf(wsid: int, pjid: int, name: str, vendor: str, version: str) -> str:
"""
Creates a function with given name, vendor and version in the given project returns the id
:param testcase: Testcase instance to call HTTP requests
:param wsid: ID of the workspace
:param pjid: ID of the project
:param name: Name for the function to create
:param vendor: Vendor name for the function to create
:param version: Version name for the function to create
:returns: ID of the created function
"""
result = son_editor.impl.functionsimpl.create_function(wsid, pjid, get_sample_vnf(name, vendor, version))
return result['id']
def get_sample_ns(name: str, vendor: str, version: str) -> dict:
"""
Creates a minimal valid service descriptor with the given name, vendor and version
:param name: The name of the service
:param vendor: the vendor of the service
:param version: the version of the service
:return: A dict containing a descriptor that will pass validation and a metadata object for the service
"""
return {'descriptor': {'name': name,
'vendor': vendor,
'version': version,
"descriptor_version": "0.1"},
'meta': {'positions': []}}
def create_ns(wsid: int, pjid: int, name: str, vendor: str, version: str) -> int:
"""
Creates a function with given name, vendor and version in the given project returns the id
:param wsid: ID of the workspace
:param pjid: ID of the project
:param name: Name for the function to create
:param vendor: Vendor name for the function to create
:param version: Version name for the function to create
:returns: ID of the created function
"""
result = son_editor.impl.servicesimpl.create_service(wsid, pjid, get_sample_ns(name, vendor, version))
return result['id']
def create_workspace(user: User, ws_name: str) -> int:
"""
Creates a workspace
:param user: the user for which to insert the workspace
:param ws_name: Name of the workspace that gets created
:return: ID of the created workspace
"""
ws_data = {'name': ws_name,
'platforms': [
{'name': 'sonEmu',
'url': get_config()['test']['platform-instance']}
]}
workspace_data = son_editor.impl.workspaceimpl.create_workspace(user.name, ws_data)
return workspace_data['id']
def create_private_catalogue_descriptor(ws: Workspace, vendor: str, name: str, version: str, isVNF: bool):
catalogue_type = "vnf_catalogue" if isVNF else "ns_catalogue"
path = ws.path + "/catalogues/{}/{}/{}/{}/".format(catalogue_type, vendor, name, version)
os.makedirs(path)
file = open(path + "descriptor.yml", 'a')
file.write('vendor: "{}"\n'.format(vendor) +
'name: "{}"\n'.format(name) +
'version: "{}"'.format(version))
file.close()
def create_catalogue(wsid: int, name: str, url: str):
return son_editor.impl.cataloguesimpl.create_catalogue(wsid, {'name': name, 'url': url})['id']
def create_logged_in_user(app, user_name, access_token='fake_access_token') -> User:
"""
Creates a user with database record and session
:param app: Test context / app
:param user_name: User name
:return: Model instance
"""
# Add some session stuff ( need for finding the user's workspace )
# Add some dummy objects
user = User(name=user_name, email=user_name + "@bar.com")
session = db_session()
session.add(user)
session.commit()
with app as c:
with c.session_transaction() as session:
session['access_token'] = access_token
session['user_data'] = {'login': user_name, 'email': user.email}
return user
def delete_workspace(testcase, ws_id: int):
"""
Deletes a workspace
:param testcase: Testcase instance to call HTTP requests
:param ws_id: The workspace id which gets deleted
:return: True, if successful
"""
response = testcase.app.delete("/" + constants.WORKSPACES + "/" + str(ws_id) + "/",
headers=_get_header())
return response.status_code == 200
def create_project(ws_id: int, project_name: str) -> str:
"""
Creates a project
:param repo: the repository url, None by default
:param ws_id: The workspace where the project gets created
:param project_name: Name of the workspace that gets created
:return: ID of the created project
"""
return son_editor.impl.projectsimpl.create_project(ws_id, {'name': project_name})['id']
| apache-2.0 |
songmonit/CTTMSONLINE_V8 | addons/hr_timesheet_sheet/__init__.py | 434 | 1127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sheet
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alanljj/oca_server-tools | users_ldap_mail/users_ldap_model.py | 51 | 2783 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
import logging
_log = logging.getLogger(__name__)
class CompanyLDAP(orm.Model):
_inherit = 'res.company.ldap'
_columns = {
'name_attribute': fields.char(
'Name Attribute', size=64,
help="By default 'cn' is used. "
"For ActiveDirectory you might use 'displayName' instead."),
'mail_attribute': fields.char(
'E-mail attribute', size=64,
help="LDAP attribute to use to retrieve em-mail address."),
}
_defaults = {
'name_attribute': 'cn',
'mail_attribute': 'mail',
}
def get_ldap_dicts(self, cr, ids=None):
"""
Copy of auth_ldap's funtion, changing only the SQL, so that it returns
all fields in the table.
"""
if ids:
id_clause = 'AND id IN (%s)'
args = [tuple(ids)]
else:
id_clause = ''
args = []
cr.execute("""
SELECT *
FROM res_company_ldap
WHERE ldap_server != '' """ + id_clause + """ ORDER BY sequence
""", args)
return cr.dictfetchall()
def map_ldap_attributes(self, cr, uid, conf, login, ldap_entry):
values = super(CompanyLDAP, self).map_ldap_attributes(
cr, uid, conf, login, ldap_entry)
mapping = [
('name', 'name_attribute'),
('email', 'mail_attribute'),
]
for value_key, conf_name in mapping:
try:
if conf[conf_name]:
values[value_key] = ldap_entry[1][conf[conf_name]][0]
except KeyError:
_log.warning('No LDAP attribute "%s" found for login "%s"' % (
conf.get(conf_name), values.get('login')))
return values
| agpl-3.0 |
wraiden/spacewalk | backend/satellite_tools/exporter/exportLib.py | 3 | 53510 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import time
from spacewalk.common.usix import StringType
from spacewalk.common import rhnLib
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
class ArrayIterator:
def __init__(self, arr):
self._arr = arr
if self._arr:
self._pos = 0
else:
# Nothing to iterate over
self._pos = None
def get_array(self):
return self._arr
def fetchone_dict(self):
if self._pos is None:
return None
i = self._pos
self._pos = self._pos + 1
if self._pos == len(self._arr):
self._pos = None
return self._arr[i]
class BaseDumper:
# tag_name has to be set in subclasses
def __init__(self, writer, data_iterator=None):
self._writer = writer
self._attributes = {}
self._iterator = data_iterator
# Generic timing function
@staticmethod
def timer(debug_level, message, function, *args, **kwargs):
start = time.time()
result = function(*args, **kwargs)
log_debug(debug_level, message, "timing: %.3f" % (time.time() - start))
return result
def set_attributes(self):
return self._attributes
def set_iterator(self):
return self._iterator
def dump(self):
if not hasattr(self, "tag_name"):
raise Exception("Programmer error: subclass did not set tag_name")
tag_name = getattr(self, "tag_name")
self._attributes = self.set_attributes() or {}
self._iterator = self.timer(5, "set_iterator", self.set_iterator)
if not self._iterator:
self._writer.empty_tag(tag_name, attributes=self._attributes)
return
data_found = 0
while 1:
data = self.timer(6, "fetchone_dict", self._iterator.fetchone_dict)
if not data:
break
if not data_found:
data_found = 1
self._writer.open_tag(tag_name, attributes=self._attributes)
if isinstance(data, StringType):
# The iterator produced some XML dump, just write it
self._writer.stream.write(data)
else:
self.timer(6, "dump_subelement", self.dump_subelement, data)
if data_found:
self._writer.close_tag(tag_name)
else:
self._writer.empty_tag(tag_name, attributes=self._attributes)
def dump_subelement(self, data):
# pylint: disable=R0201
if isinstance(data, BaseDumper):
data.dump()
def get_writer(self):
return self._writer
def set_writer(self, writer):
self._writer = writer
class EmptyDumper(BaseDumper):
def __init__(self, writer, tag_name, attributes=None):
self.tag_name = tag_name
self.attributes = attributes or {}
BaseDumper.__init__(self, writer)
def dump(self):
self._writer.empty_tag(self.tag_name, attributes=self.attributes)
class SimpleDumper(BaseDumper):
def __init__(self, writer, tag_name, value, max_value_bytes=None):
self.tag_name = tag_name
self._value = value
# max number of bytes satellite can handle in the matching db row
self._max_value_bytes = max_value_bytes
BaseDumper.__init__(self, writer)
def dump(self):
self._writer.open_tag(self.tag_name)
if self._value is None:
self._writer.empty_tag('rhn-null')
else:
self._writer.data(self._value)
self._writer.close_tag(self.tag_name)
class BaseRowDumper(BaseDumper):
def __init__(self, writer, row):
BaseDumper.__init__(self, writer)
self._row = row
class BaseChecksumRowDumper(BaseRowDumper):
def set_iterator(self):
# checksums
checksum_arr = [{'type': self._row['checksum_type'],
'value': self._row['checksum']}]
arr = [_ChecksumDumper(self._writer, data_iterator=ArrayIterator(checksum_arr))]
return ArrayIterator(arr)
class BaseQueryDumper(BaseDumper):
iterator_query = None
def set_iterator(self):
if self._iterator:
return self._iterator
h = rhnSQL.prepare(self.iterator_query)
h.execute()
return h
class BaseSubelementDumper(BaseDumper):
# pylint: disable=E1101
subelement_dumper_class = object
def dump_subelement(self, data):
d = self.subelement_dumper_class(self._writer, data)
d.dump()
####
class ExportTypeDumper(BaseDumper):
def __init__(self, writer, start_date=None, end_date=None):
if start_date:
self.type = 'incremental'
else:
self.type = 'full'
self.start_date = start_date
if end_date:
self.end_date = end_date
else:
self.end_date = time.strftime("%Y%m%d%H%M%S")
BaseDumper.__init__(self, writer)
def dump(self):
self._writer.open_tag('export-type')
self._writer.stream.write(self.type)
self._writer.close_tag('export-type')
if self.start_date:
self._writer.open_tag('export-start-date')
self._writer.stream.write(self.start_date)
self._writer.close_tag('export-start-date')
if self.end_date:
self._writer.open_tag('export-end-date')
self._writer.stream.write(self.end_date)
self._writer.close_tag('export-end-date')
class SatelliteDumper(BaseDumper):
tag_name = 'rhn-satellite'
def __init__(self, writer, *dumpers):
BaseDumper.__init__(self, writer)
self._dumpers = dumpers
def set_attributes(self):
return {
'version': 'x.y',
}
def set_iterator(self):
return ArrayIterator(self._dumpers)
class _OrgTrustDumper(BaseDumper):
tag_name = 'rhn-org-trusts'
def dump_subelement(self, data):
c = EmptyDumper(self._writer, 'rhn-org-trust', attributes={
'org-id': data['org_trust_id'],
})
c.dump()
class _OrgDumper(BaseDumper):
tag_name = 'rhn-org'
def __init__(self, writer, org):
self.org = org
BaseDumper.__init__(self, writer)
_query_org_trusts = """
select rto.org_trust_id
from rhnTrustedOrgs rto
where rto.org_id = :org_id
"""
def set_iterator(self):
# trusts
h = rhnSQL.prepare(self._query_org_trusts)
h.execute(org_id=self.org['id'])
return ArrayIterator([_OrgTrustDumper(self._writer, data_iterator=h)])
def set_attributes(self):
attributes = {
'id': self.org['id'],
'name': self.org['name'],
}
return attributes
class OrgsDumper(BaseDumper):
tag_name = 'rhn-orgs'
def __init__(self, writer, data_iterator=None):
BaseDumper.__init__(self, writer, data_iterator)
def dump_subelement(self, data):
org = _OrgDumper(self._writer, data)
org.dump()
class ChannelTrustedOrgsDumper(BaseDumper):
tag_name = 'rhn-channel-trusted-orgs'
def dump_subelement(self, data):
d = EmptyDumper(self._writer, 'rhn-channel-trusted-org',
attributes={'org-id': data['org_trust_id']})
d.dump()
class _ChannelDumper(BaseRowDumper):
tag_name = 'rhn-channel'
def __init__(self, writer, row, start_date=None, end_date=None, use_rhn_date=True, whole_errata=False):
BaseRowDumper.__init__(self, writer, row)
self.start_date = start_date
self.end_date = end_date
self.use_rhn_date = use_rhn_date
self.whole_errata = whole_errata
def set_attributes(self):
channel_id = self._row['id']
packages = ["rhn-package-%s" % x for x in self._get_package_ids()]
# XXX channel-errata is deprecated and should go away in dump version
# 3 or higher - we now dump that information in its own subelement
# rhn-channel-errata
errata = ["rhn-erratum-%s" % x for x in self._get_errata_ids()]
ks_trees = self._get_kickstartable_trees()
return {
'channel-id': 'rhn-channel-%s' % channel_id,
'label': self._row['label'],
'org_id': self._row['org_id'] or "",
'channel-arch': self._row['channel_arch'],
'packages': ' '.join(packages),
'channel-errata': ' '.join(errata),
'kickstartable-trees': ' '.join(ks_trees),
'sharing': self._row['channel_access'],
}
_query_channel_families = rhnSQL.Statement("""
select cf.id, cf.label
from rhnChannelFamily cf, rhnChannelFamilyMembers cfm
where cfm.channel_family_id = cf.id
and cfm.channel_id = :channel_id
""")
_query_dist_channel_map = rhnSQL.Statement("""
select dcm.os, dcm.release, ca.label channel_arch
from rhnDistChannelMap dcm, rhnChannelArch ca
where dcm.channel_id = :channel_id
and dcm.channel_arch_id = ca.id
and dcm.org_id is null
""")
_query_get_channel_trusts = rhnSQL.Statement("""
select org_trust_id
from rhnChannelTrust
where channel_id = :channel_id
""")
def set_iterator(self):
channel_id = self._row['id']
arr = []
mappings = [
('rhn-channel-parent-channel', 'parent_channel'),
('rhn-channel-basedir', 'basedir'),
('rhn-channel-name', 'name'),
('rhn-channel-summary', 'summary'),
('rhn-channel-description', 'description'),
('rhn-channel-gpg-key-url', 'gpg_key_url'),
('rhn-channel-checksum-type', 'checksum_type'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
arr.append(SimpleDumper(self._writer, 'rhn-channel-last-modified',
_dbtime2timestamp(self._row['last_modified'])))
channel_product_details = self._get_channel_product_details()
arr.append(SimpleDumper(self._writer, 'rhn-channel-product-name',
channel_product_details[0]))
arr.append(SimpleDumper(self._writer, 'rhn-channel-product-version',
channel_product_details[1]))
arr.append(SimpleDumper(self._writer, 'rhn-channel-product-beta',
channel_product_details[2]))
comp_last_modified = self._channel_comps_last_modified()
if comp_last_modified is not None:
arr.append(SimpleDumper(self._writer, 'rhn-channel-comps-last-modified',
_dbtime2timestamp(comp_last_modified[0])))
h = rhnSQL.prepare(self._query_get_channel_trusts)
h.execute(channel_id=channel_id)
arr.append(ChannelTrustedOrgsDumper(self._writer, data_iterator=h))
h = rhnSQL.prepare(self._query_channel_families)
h.execute(channel_id=channel_id)
arr.append(ChannelFamiliesDumper(self._writer, data_iterator=h,
ignore_subelements=1))
h = rhnSQL.prepare(self._query_dist_channel_map)
h.execute(channel_id=channel_id)
arr.append(DistsDumper(self._writer, h))
# Source package information (with timestamps)
h = self._get_cursor_source_packages()
arr.append(ChannelSourcePackagesDumper(self._writer, h))
# Errata information (with timestamps)
query_args = {'channel_id': channel_id}
if self.start_date:
if self.use_rhn_date:
query = self._query__get_errata_ids_by_rhnlimits
else:
query = self._query__get_errata_ids_by_limits
query_args.update({'lower_limit': self.start_date,
'upper_limit': self.end_date})
else:
query = self._query__get_errata_ids
h = rhnSQL.prepare(query)
h.execute(**query_args)
arr.append(ChannelErrataDumper(self._writer, h))
arr.append(ExportTypeDumper(self._writer, self.start_date, self.end_date))
return ArrayIterator(arr)
_query_get_package_ids = rhnSQL.Statement("""
select package_id as id
from rhnChannelPackage
where channel_id = :channel_id
""")
_query_get_package_ids_by_date_limits = rhnSQL.Statement("""
select package_id as id
from rhnChannelPackage rcp
where rcp.channel_id = :channel_id
and rcp.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""")
_query_get_package_ids_by_rhndate_limits = rhnSQL.Statement("""
select package_id as id
from rhnPackage rp, rhnChannelPackage rcp
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
and rp.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""")
_query_pkgids_by_date_whole_errata = rhnSQL.Statement("""
select rcp.package_id as id
from rhnChannelPackage rcp, rhnPackage rp
left join rhnErrataPackage rep on rp.id = rep.package_id
left join rhnErrata re on rep.errata_id = re.id
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
and ((re.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and re.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rcp.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rcp.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS'))
)
""")
_query_get_pkgids_by_rhndate_whole_errata = rhnSQL.Statement("""
select rcp.package_id as id
from rhnChannelPackage rcp, rhnPackage rp
left join rhnErrataPackage rep on rp.id = rep.package_id
left join rhnErrata re on rep.errata_id = re.id
where rcp.channel_id = :channel_id
and rcp.package_id = rp.id
and ((re.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and re.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
) or (rep.package_id is NULL
and rp.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and rp.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS'))
)
""")
# Things that can be overwriten in subclasses
def _get_package_ids(self):
if self.start_date and self.whole_errata:
return self._get_ids(self._query_pkgids_by_date_whole_errata,
self._query_get_pkgids_by_rhndate_whole_errata,
self._query_get_package_ids)
else:
return self._get_ids(self._query_get_package_ids_by_date_limits,
self._query_get_package_ids_by_rhndate_limits,
self._query_get_package_ids)
def _get_ids(self, query_with_limit, query_with_rhnlimit, query_no_limits):
query_args = {'channel_id': self._row['id']}
if self.start_date:
if self.use_rhn_date:
query = query_with_rhnlimit
else:
query = query_with_limit
query_args.update({'lower_limit': self.start_date,
'upper_limit': self.end_date})
else:
query = query_no_limits
h = rhnSQL.prepare(query)
h.execute(**query_args)
return [x['id'] for x in h.fetchall_dict() or []]
_query_get_source_package_ids = rhnSQL.Statement("""
select distinct ps.id, sr.name source_rpm,
TO_CHAR(ps.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannelPackage cp, rhnPackage p, rhnPackageSource ps,
rhnSourceRPM sr
where cp.channel_id = :channel_id
and cp.package_id = p.id
and p.source_rpm_id = ps.source_rpm_id
and ((p.org_id is null and ps.org_id is null) or
p.org_id = ps.org_id)
and ps.source_rpm_id = sr.id
""")
def _get_cursor_source_packages(self):
channel_id = self._row['id']
h = rhnSQL.prepare(self._query_get_source_package_ids)
h.execute(channel_id=channel_id)
return h
_query__get_errata_ids = rhnSQL.Statement("""
select ce.errata_id as id, e.advisory_name,
TO_CHAR(e.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannelErrata ce, rhnErrata e
where ce.channel_id = :channel_id
and ce.errata_id = e.id
""")
_query__get_errata_ids_by_limits = rhnSQL.Statement("""
%s
and ce.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and ce.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query__get_errata_ids)
_query__get_errata_ids_by_rhnlimits = rhnSQL.Statement("""
%s
and e.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and e.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query__get_errata_ids)
def _get_errata_ids(self):
return self._get_ids(self._query__get_errata_ids_by_limits,
self._query__get_errata_ids_by_rhnlimits,
self._query__get_errata_ids)
_query_get_kickstartable_trees = rhnSQL.Statement("""
select kt.label as id
from rhnKickstartableTree kt
where kt.channel_id = :channel_id
and kt.org_id is null
""")
_query_get_kickstartable_trees_by_rhnlimits = rhnSQL.Statement("""
%s
and kt.last_modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and kt.last_modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query_get_kickstartable_trees)
_query_get_kickstartable_trees_by_limits = rhnSQL.Statement("""
%s
and kt.modified >= TO_TIMESTAMP(:lower_limit, 'YYYYMMDDHH24MISS')
and kt.modified <= TO_TIMESTAMP(:upper_limit, 'YYYYMMDDHH24MISS')
""" % _query_get_kickstartable_trees)
def _get_kickstartable_trees(self):
ks_trees = self._get_ids(self._query_get_kickstartable_trees_by_limits,
self._query_get_kickstartable_trees_by_rhnlimits,
self._query_get_kickstartable_trees)
ks_trees.sort()
return ks_trees
_query_get_channel_product_details = rhnSQL.Statement("""
select cp.product as name,
cp.version as version,
cp.beta
from rhnChannel c,
rhnChannelProduct cp
where c.id = :channel_id
and c.channel_product_id = cp.id
""")
def _get_channel_product_details(self):
"""
Export rhnChannelProduct table content through ChannelDumper
return a tuple containing (product name, product version, beta status)
or (None, None, None) if the information is missing
"""
channel_id = self._row['id']
h = rhnSQL.prepare(self._query_get_channel_product_details)
h.execute(channel_id=channel_id)
row = h.fetchone_dict()
if not row:
return (None, None, None)
else:
return (row['name'], row['version'], row['beta'])
_query_channel_comps_last_modified = rhnSQL.Statement("""
select to_char(last_modified, 'YYYYMMDDHH24MISS') as comps_last_modified
from rhnChannelComps
where channel_id = :channel_id
order by id desc
""")
def _channel_comps_last_modified(self):
channel_id = self._row['id']
h = rhnSQL.prepare(self._query_channel_comps_last_modified)
h.execute(channel_id=channel_id)
return h.fetchone()
class ChannelsDumper(BaseSubelementDumper):
tag_name = 'rhn-channels'
subelement_dumper_class = _ChannelDumper
def __init__(self, writer, channels=()):
BaseSubelementDumper.__init__(self, writer)
self._channels = channels
def set_iterator(self):
if not self._channels:
# Nothing to do
return
raise NotImplementedError("To be overridden in a child class")
class ChannelDumper(_ChannelDumper):
# pylint: disable=W0231,W0233
def __init__(self, writer, row):
BaseRowDumper.__init__(self, writer, row)
#_query_release_channel_map = rhnSQL.Statement("""
# select dcm.os product, dcm.release version,
# dcm.eus_release release, ca.label channel_arch,
# dcm.is_default is_default
# from rhnDistChannelMap dcm, rhnChannelArch ca
# where dcm.channel_id = :channel_id
# and dcm.channel_arch_id = ca.id
# and dcm.is_eus = 'Y'
#""")
def set_iterator(self):
arrayiterator = _ChannelDumper.set_iterator()
arr = arrayiterator.get_array()
mappings = [
('rhn-channel-receiving-updates', 'receiving_updates'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
#channel_id = self._row['id']
# Add EUS info
#h = rhnSQL.prepare(self._query_release_channel_map)
# h.execute(channel_id=channel_id)
#arr.append(ReleaseDumper(self._writer, h))
return arrayiterator
# class ReleaseDumper(BaseDumper):
# tag_name = 'rhn-release'
#
# def dump_subelement(self, data):
# d = _ReleaseDumper(self._writer, data)
# d.dump()
#
# class _ReleaseDumper(BaseRowDumper):
# tag_name = 'rhn-release'
#
# def set_attributes(self):
# return {
# 'product' : self._row['product'],
# 'version' : self._row['version'],
# 'release' : self._row['release'],
# 'channel-arch' : self._row['channel_arch'],
# 'is-default' : self._row['is_default'],
# }
class _ChannelSourcePackageDumper(BaseRowDumper):
tag_name = 'source-package'
def set_attributes(self):
return {
'id': 'rhn-source-package-%s' % self._row['id'],
'source-rpm': self._row['source_rpm'],
'last-modified': _dbtime2timestamp(self._row['last_modified']),
}
class ChannelSourcePackagesDumper(BaseSubelementDumper):
# Dumps the erratum id and the last modified for an erratum in this
# channel
tag_name = 'source-packages'
subelement_dumper_class = _ChannelSourcePackageDumper
class _ChannelErratumDumper(BaseRowDumper):
tag_name = 'erratum'
def set_attributes(self):
return {
'id': 'rhn-erratum-%s' % self._row['id'],
'advisory-name': self._row['advisory_name'],
'last-modified': _dbtime2timestamp(self._row['last_modified']),
}
class ChannelErrataDumper(BaseSubelementDumper):
# Dumps the erratum id and the last modified for an erratum in this
# channel
tag_name = 'rhn-channel-errata'
subelement_dumper_class = _ChannelErratumDumper
class _DistDumper(BaseRowDumper):
tag_name = 'rhn-dist'
def set_attributes(self):
return {
'os': self._row['os'],
'release': self._row['release'],
'channel-arch': self._row['channel_arch'],
}
class DistsDumper(BaseSubelementDumper):
tag_name = 'rhn-dists'
subelement_dumper_class = _DistDumper
class ChannelFamiliesDumper(BaseQueryDumper):
tag_name = 'rhn-channel-families'
iterator_query = 'select cf.* from rhnChannelFamily'
def __init__(self, writer, data_iterator=None, ignore_subelements=0,
null_max_members=1):
BaseQueryDumper.__init__(self, writer, data_iterator=data_iterator)
self._ignore_subelements = ignore_subelements
self._null_max_members = null_max_members
def dump_subelement(self, data):
cf = _ChannelFamilyDumper(self._writer, data,
ignore_subelements=self._ignore_subelements,
null_max_members=self._null_max_members)
cf.dump()
class _ChannelFamilyDumper(BaseRowDumper):
tag_name = 'rhn-channel-family'
def __init__(self, writer, row, ignore_subelements=0, null_max_members=1):
BaseRowDumper.__init__(self, writer, row)
self._ignore_subelements = ignore_subelements
self._null_max_members = null_max_members
def set_iterator(self):
if self._ignore_subelements:
return None
arr = []
mappings = [
('rhn-channel-family-name', 'name'),
('rhn-channel-family-product-url', 'product_url'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
return ArrayIterator(arr)
_query_get_channel_family_channels = rhnSQL.Statement("""
select c.label
from rhnChannelFamilyMembers cfm, rhnChannel c
where cfm.channel_family_id = :channel_family_id
and cfm.channel_id = c.id
""")
def set_attributes(self):
# Get all channels that are part of this channel family
h = rhnSQL.prepare(self._query_get_channel_family_channels)
channel_family_id = self._row['id']
h.execute(channel_family_id=channel_family_id)
channels = [x['label'] for x in h.fetchall_dict() or []]
attributes = {
'id': "rhn-channel-family-%s" % channel_family_id,
'label': self._row['label'],
'channel-labels': ' '.join(channels),
}
if self._ignore_subelements:
return attributes
if self._row['label'] != 'rh-public':
if self._null_max_members:
attributes['max-members'] = 0
elif ('max_members' in self._row) and self._row['max_members']:
attributes['max-members'] = self._row['max_members']
return attributes
##
class _PackageDumper(BaseRowDumper):
tag_name = 'rhn-package'
def set_attributes(self):
attrs = ["name", "version", "release", "package_arch",
"package_group", "rpm_version", "package_size", "payload_size",
"installed_size", "build_host", "source_rpm", "payload_format",
"compat"]
attr_dict = {
'id': "rhn-package-%s" % self._row['id'],
'org_id': self._row['org_id'] or "",
'epoch': self._row['epoch'] or "",
'cookie': self._row['cookie'] or "",
'build-time': _dbtime2timestamp(self._row['build_time']),
'last-modified': _dbtime2timestamp(self._row['last_modified']),
}
for attr in attrs:
attr_dict[attr.replace('_', '-')] = self._row[attr]
if self._row['checksum_type'] == 'md5':
# compatibility with older satellite
attr_dict['md5sum'] = self._row['checksum']
return attr_dict
def set_iterator(self):
arr = []
mappings = [
('rhn-package-summary', 'summary'),
('rhn-package-description', 'description'),
('rhn-package-vendor', 'vendor'),
('rhn-package-copyright', 'copyright'),
('rhn-package-header-sig', 'header_sig'),
('rhn-package-header-start', 'header_start'),
('rhn-package-header-end', 'header_end')
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
# checksums
checksum_arr = [{'type': self._row['checksum_type'],
'value': self._row['checksum']}]
arr.append(_ChecksumDumper(self._writer,
data_iterator=ArrayIterator(checksum_arr)))
h = rhnSQL.prepare("""
select
name, text,
TO_CHAR(time, 'YYYYMMDDHH24MISS') as time
from rhnPackageChangeLog
where package_id = :package_id
""")
h.execute(package_id=self._row['id'])
arr.append(_ChangelogDumper(self._writer, data_iterator=h))
# Dependency information
mappings = [
['rhnPackageRequires', 'rhn-package-requires', 'rhn-package-requires-entry'],
['rhnPackageProvides', 'rhn-package-provides', 'rhn-package-provides-entry'],
['rhnPackageConflicts', 'rhn-package-conflicts', 'rhn-package-conflicts-entry'],
['rhnPackageObsoletes', 'rhn-package-obsoletes', 'rhn-package-obsoletes-entry'],
['rhnPackageRecommends', 'rhn-package-recommends', 'rhn-package-recommends-entry'],
['rhnPackageSuggests', 'rhn-package-suggests', 'rhn-package-suggests-entry'],
['rhnPackageSupplements', 'rhn-package-supplements', 'rhn-package-supplements-entry'],
['rhnPackageEnhances', 'rhn-package-enhances', 'rhn-package-enhances-entry'],
['rhnPackageBreaks', 'rhn-package-breaks', 'rhn-package-breaks-entry'],
['rhnPackagePredepends', 'rhn-package-predepends', 'rhn-package-predepends-entry'],
]
for table_name, container_name, entry_name in mappings:
h = rhnSQL.prepare("""
select pc.name, pc.version, pd.sense
from %s pd, rhnPackageCapability pc
where pd.capability_id = pc.id
and pd.package_id = :package_id
""" % table_name)
h.execute(package_id=self._row['id'])
arr.append(_DependencyDumper(self._writer, data_iterator=h,
container_name=container_name,
entry_name=entry_name))
# Files
h = rhnSQL.prepare("""
select
pc.name, pf.device, pf.inode, pf.file_mode, pf.username,
pf.groupname, pf.rdev, pf.file_size,
TO_CHAR(mtime, 'YYYYMMDDHH24MISS') mtime,
c.checksum_type as "checksum-type",
c.checksum, pf.linkto, pf.flags, pf.verifyflags, pf.lang
from rhnPackageFile pf
left join rhnChecksumView c
on pf.checksum_id = c.id,
rhnPackageCapability pc
where pf.capability_id = pc.id
and pf.package_id = :package_id
""")
h.execute(package_id=self._row['id'])
arr.append(_PackageFilesDumper(self._writer, data_iterator=h))
return ArrayIterator(arr)
class PackagesDumper(BaseSubelementDumper, BaseQueryDumper):
tag_name = 'rhn-packages'
subelement_dumper_class = _PackageDumper
def set_iterator(self):
return BaseQueryDumper.set_iterator(self)
##
class ShortPackageEntryDumper(BaseChecksumRowDumper):
tag_name = 'rhn-package-short'
def set_attributes(self):
attr = {
'id': "rhn-package-%s" % self._row['id'],
'name': self._row['name'],
'version': self._row['version'],
'release': self._row['release'],
'epoch': self._row['epoch'] or "",
'package-arch': self._row['package_arch'],
'package-size': self._row['package_size'],
'last-modified': _dbtime2timestamp(self._row['last_modified']),
'org-id': self._row['org_id'] or "",
}
if self._row['checksum_type'] == 'md5':
# compatibility with older satellite
attr['md5sum'] = self._row['checksum']
return attr
class ShortPackagesDumper(BaseSubelementDumper, BaseQueryDumper):
tag_name = 'rhn-packages-short'
subelement_dumper_class = ShortPackageEntryDumper
def set_iterator(self):
return BaseQueryDumper.set_iterator(self)
##
class SourcePackagesDumper(BaseQueryDumper):
tag_name = 'rhn-source-packages'
def dump_subelement(self, data):
attributes = {}
attrs = [
"id", "source_rpm", "package_group", "rpm_version",
"payload_size", "build_host", "sigchecksum_type", "sigchecksum", "vendor",
"cookie", "package_size", "checksum_type", "checksum"
]
for attr in attrs:
attributes[attr.replace('_', '-')] = data[attr]
attributes['id'] = "rhn-source-package-%s" % data['id']
attributes['build-time'] = _dbtime2timestamp(data['build_time'])
attributes['last-modified'] = _dbtime2timestamp(data['last_modified'])
d = EmptyDumper(self._writer, 'rhn-source-package',
attributes=attributes)
d.dump()
##
class _ChecksumDumper(BaseDumper):
tag_name = 'checksums'
def dump_subelement(self, data):
c = EmptyDumper(self._writer, 'checksum', attributes={
'type': data['type'],
'value': data['value'],
})
c.dump()
##
class _ChangelogEntryDumper(BaseRowDumper):
tag_name = 'rhn-package-changelog-entry'
def set_iterator(self):
arr = []
mappings = [
('rhn-package-changelog-entry-name', 'name'),
('rhn-package-changelog-entry-text', 'text'),
]
for k, v in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v]))
arr.append(SimpleDumper(self._writer, 'rhn-package-changelog-entry-time',
_dbtime2timestamp(self._row['time'])))
return ArrayIterator(arr)
class _ChangelogDumper(BaseSubelementDumper):
tag_name = 'rhn-package-changelog'
subelement_dumper_class = _ChangelogEntryDumper
##
class _DependencyDumper(BaseDumper):
def __init__(self, writer, data_iterator, container_name, entry_name):
self.tag_name = container_name
self.entry_name = entry_name
BaseDumper.__init__(self, writer, data_iterator=data_iterator)
def dump_subelement(self, data):
d = EmptyDumper(self._writer, self.entry_name, attributes={
'name': data['name'],
'version': data['version'] or "",
'sense': data['sense'],
})
d.dump()
# Files
class _PackageFilesDumper(BaseDumper):
tag_name = 'rhn-package-files'
def dump_subelement(self, data):
data['mtime'] = _dbtime2timestamp(data['mtime'])
data['checksum-type'] = data['checksum-type'] or ""
data['checksum'] = data['checksum'] or ""
if data['checksum-type'] in ('md5', ''):
# generate md5="..." attribute
# for compatibility with older satellites
data['md5'] = data['checksum']
data['linkto'] = data['linkto'] or ""
data['lang'] = data['lang'] or ""
d = EmptyDumper(self._writer, 'rhn-package-file',
attributes=data)
d.dump()
# Errata
class _ErratumDumper(BaseRowDumper):
tag_name = 'rhn-erratum'
def set_attributes(self):
h = rhnSQL.prepare("""
select c.label
from rhnChannelErrata ec, rhnChannel c
where ec.channel_id = c.id
and ec.errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
channels = [x['label'] for x in h.fetchall_dict() or []]
h = rhnSQL.prepare("""
select ep.package_id
from rhnErrataPackage ep
where ep.errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
packages = ["rhn-package-%s" % x['package_id'] for x in
h.fetchall_dict() or []]
h = rhnSQL.prepare("""
select c.name cve
from rhnErrataCVE ec, rhnCVE c
where ec.errata_id = :errata_id
and ec.cve_id = c.id
""")
h.execute(errata_id=self._row['id'])
cves = [x['cve'] for x in h.fetchall_dict() or []]
return {
'id': 'rhn-erratum-%s' % self._row['id'],
'org_id': self._row['org_id'] or "",
'advisory': self._row['advisory'],
'channels': ' '.join(channels),
'packages': ' '.join(packages),
'cve-names': ' '.join(cves),
}
type_id_column = ""
def set_iterator(self):
arr = []
mappings = [
('rhn-erratum-advisory-name', 'advisory_name', 100),
('rhn-erratum-advisory-rel', 'advisory_rel', 32),
('rhn-erratum-advisory-type', 'advisory_type', 32),
('rhn-erratum-product', 'product', 64),
('rhn-erratum-description', 'description', 4000),
('rhn-erratum-synopsis', 'synopsis', 4000),
('rhn-erratum-topic', 'topic', 4000),
('rhn-erratum-solution', 'solution', 4000),
('rhn-erratum-refers-to', 'refers_to', 4000),
('rhn-erratum-notes', 'notes', 4000),
('rhn-erratum-errata-from', 'errata_from', 127),
]
for k, v, b in mappings:
arr.append(SimpleDumper(self._writer, k, self._row[v] or "", b))
arr.append(SimpleDumper(self._writer, 'rhn-erratum-issue-date',
_dbtime2timestamp(self._row['issue_date'])))
arr.append(SimpleDumper(self._writer, 'rhn-erratum-update-date',
_dbtime2timestamp(self._row['update_date'])))
arr.append(SimpleDumper(self._writer, 'rhn-erratum-last-modified',
_dbtime2timestamp(self._row['last_modified'])))
h = rhnSQL.prepare("""
select keyword
from rhnErrataKeyword
where errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
arr.append(_ErratumKeywordDumper(self._writer, data_iterator=h))
h = rhnSQL.prepare("""
select bug_id, summary, href
from rhnErrataBuglist
where errata_id = :errata_id
""")
h.execute(errata_id=self._row['id'])
arr.append(_ErratumBuglistDumper(self._writer, data_iterator=h))
_query_errata_file_info = """
select ef.id errata_file_id, c.checksum_type, c.checksum,
ef.filename, eft.label as type,
efp.package_id, efps.package_id as source_package_id
from rhnErrataFile ef left outer join rhnErrataFilePackage efp on ef.id = efp.errata_file_id
left outer join rhnErrataFilePackageSource efps on ef.id = efps.errata_file_id,
rhnErrataFileType eft, rhnChecksumView c
where ef.errata_id = :errata_id
and ef.type = eft.id
and ef.checksum_id = c.id
%s
"""
h = rhnSQL.prepare(_query_errata_file_info % self.type_id_column)
h.execute(errata_id=self._row['id'])
arr.append(_ErratumFilesDumper(self._writer, data_iterator=h))
return ArrayIterator(arr)
class ErrataDumper(BaseSubelementDumper):
tag_name = 'rhn-errata'
subelement_dumper_class = _ErratumDumper
def set_iterator(self):
if self._iterator:
return self._iterator
raise NotImplementedError("To be overridden in a child class")
class _ErratumKeywordDumper(BaseDumper):
tag_name = 'rhn-erratum-keywords'
def dump_subelement(self, data):
d = SimpleDumper(self._writer, 'rhn-erratum-keyword', data['keyword'])
d.dump()
class _ErratumBugDumper(BaseRowDumper):
tag_name = 'rhn-erratum-bug'
def set_iterator(self):
arr = [
SimpleDumper(self._writer, 'rhn-erratum-bug-id', self._row['bug_id']),
SimpleDumper(self._writer, 'rhn-erratum-bug-summary',
self._row['summary'] or ""),
SimpleDumper(self._writer, 'rhn-erratum-bug-href', self._row['href']),
]
return ArrayIterator(arr)
class _ErratumBuglistDumper(BaseSubelementDumper):
tag_name = 'rhn-erratum-bugs'
subelement_dumper_class = _ErratumBugDumper
class _ErratumFileEntryDumper(BaseChecksumRowDumper):
tag_name = 'rhn-erratum-file'
def set_attributes(self):
attributes = {
'filename': self._row['filename'][:4000],
'type': self._row['type'],
}
if self._row['checksum_type'] == 'md5':
attributes['md5sum'] = self._row['checksum']
# Compute the channels for this file
h = rhnSQL.prepare("""
select c.label
from rhnErrataFileChannel efc, rhnChannel c
where efc.errata_file_id = :errata_file_id
and efc.channel_id = c.id
""")
h.execute(errata_file_id=self._row['errata_file_id'])
channels = ' '.join(
[x['label'] for x in h.fetchall_dict() or []])
if channels:
attributes['channels'] = channels
# Get the package id or source_package_id
if self._row['type'] == 'RPM':
package_id = self._row['package_id']
if package_id is not None:
attributes['package'] = 'rhn-package-%s' % package_id
elif self._row['type'] == 'SRPM':
package_id = self._row['source_package_id']
if package_id is not None:
attributes['source-package'] = 'rhn-package-source-%s' % package_id
return attributes
class _ErratumFilesDumper(BaseSubelementDumper):
tag_name = 'rhn-erratum-files'
subelement_dumper_class = _ErratumFileEntryDumper
# Arches
class BaseArchesDumper(BaseDumper):
table_name = 'foo'
subelement_tag = 'foo'
def set_iterator(self):
h = rhnSQL.prepare("""
select id, label, name
from %s
""" % self.table_name)
h.execute()
return h
def dump_subelement(self, data):
attributes = {
'id': "%s-id-%s" % (self.subelement_tag, data['id']),
'label': data['label'],
'name': data['name'],
}
EmptyDumper(self._writer, self.subelement_tag, attributes).dump()
class RestrictedArchesDumper(BaseArchesDumper):
def __init__(self, writer, data_iterator=None, rpm_arch_type_only=0):
BaseArchesDumper.__init__(self, writer=writer,
data_iterator=data_iterator)
self.rpm_arch_type_only = rpm_arch_type_only
def set_iterator(self):
query_templ = """
select aa.id, aa.label, aa.name,
at.label arch_type_label, at.name arch_type_name
from %s aa,
rhnArchType at
where aa.arch_type_id = at.id
%s
"""
if self.rpm_arch_type_only:
h = rhnSQL.prepare(query_templ % (self.table_name, "and at.label = 'rpm'"))
else:
h = rhnSQL.prepare(query_templ % (self.table_name, ""))
h.execute()
return h
def dump_subelement(self, data):
attributes = {
'id': "%s-id-%s" % (self.subelement_tag, data['id']),
'label': data['label'],
'name': data['name'],
'arch-type-label': data['arch_type_label'],
'arch-type-name': data['arch_type_name'],
}
EmptyDumper(self._writer, self.subelement_tag, attributes).dump()
class ChannelArchesDumper(RestrictedArchesDumper):
tag_name = 'rhn-channel-arches'
subelement_tag = 'rhn-channel-arch'
table_name = 'rhnChannelArch'
class PackageArchesDumper(RestrictedArchesDumper):
tag_name = 'rhn-package-arches'
subelement_tag = 'rhn-package-arch'
table_name = 'rhnPackageArch'
class ServerArchesDumper(RestrictedArchesDumper):
tag_name = 'rhn-server-arches'
subelement_tag = 'rhn-server-arch'
table_name = 'rhnServerArch'
class CPUArchesDumper(BaseArchesDumper):
tag_name = 'rhn-cpu-arches'
subelement_tag = 'rhn-cpu-arch'
table_name = 'rhnCPUArch'
class RestrictedArchCompatDumper(BaseArchesDumper):
_query_rpm_arch_type_only = ""
_query_arch_type_all = ""
_subelement_tag = ""
def __init__(self, writer, data_iterator=None, rpm_arch_type_only=0, virt_filter=0):
BaseArchesDumper.__init__(self, writer=writer,
data_iterator=data_iterator)
self.rpm_arch_type_only = rpm_arch_type_only
self.virt_filter = virt_filter
def set_iterator(self):
_virt_filter_sql = ""
if self.virt_filter:
_virt_filter_sql = """and sgt.label not like 'virt%'"""
if self._subelement_tag == 'rhn-server-group-server-arch-compat':
if self.rpm_arch_type_only:
h = rhnSQL.prepare(self._query_rpm_arch_type_only % _virt_filter_sql)
else:
h = rhnSQL.prepare(self._query_arch_type_all % _virt_filter_sql)
else:
if self.rpm_arch_type_only:
h = rhnSQL.prepare(self._query_rpm_arch_type_only)
else:
h = rhnSQL.prepare(self._query_arch_type_all)
h.execute()
return h
def dump_subelement(self, data):
EmptyDumper(self._writer, self._subelement_tag, data).dump()
class ServerPackageArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-server-package-arch-compatibility-map'
_subelement_tag = 'rhn-server-package-arch-compat'
_query_rpm_arch_type_only = rhnSQL.Statement("""
select sa.label "server-arch",
pa.label "package-arch",
spac.preference
from rhnServerPackageArchCompat spac,
rhnServerArch sa,
rhnPackageArch pa,
rhnArchType aas,
rhnArchType aap
where spac.server_arch_id = sa.id
and spac.package_arch_id = pa.id
and sa.arch_type_id = aas.id
and aas.label = 'rpm'
and pa.arch_type_id = aap.id
and aap.label = 'rpm'
""")
_query_arch_type_all = rhnSQL.Statement("""
select sa.label "server-arch",
pa.label "package-arch",
spac.preference
from rhnServerPackageArchCompat spac,
rhnServerArch sa,
rhnPackageArch pa
where spac.server_arch_id = sa.id
and spac.package_arch_id = pa.id
""")
class ServerChannelArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-server-channel-arch-compatibility-map'
_subelement_tag = 'rhn-server-channel-arch-compat'
_query_rpm_arch_type_only = rhnSQL.Statement("""
select sa.label "server-arch",
ca.label "channel-arch"
from rhnServerChannelArchCompat scac,
rhnServerArch sa,
rhnChannelArch ca,
rhnArchType aas,
rhnArchType aac
where scac.server_arch_id = sa.id
and scac.channel_arch_id = ca.id
and sa.arch_type_id = aas.id
and aas.label = 'rpm'
and ca.arch_type_id = aac.id
and aac.label = 'rpm'
""")
_query_arch_type_all = rhnSQL.Statement("""
select sa.label "server-arch",
ca.label "channel-arch"
from rhnServerChannelArchCompat scac,
rhnServerArch sa,
rhnChannelArch ca
where scac.server_arch_id = sa.id
and scac.channel_arch_id = ca.id
""")
class ChannelPackageArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-channel-package-arch-compatibility-map'
_subelement_tag = 'rhn-channel-package-arch-compat'
_query_rpm_arch_type_only = rhnSQL.Statement("""
select ca.label "channel-arch",
pa.label "package-arch"
from rhnChannelPackageArchCompat cpac,
rhnChannelArch ca,
rhnPackageArch pa,
rhnArchType aac,
rhnArchType aap
where cpac.channel_arch_id = ca.id
and cpac.package_arch_id = pa.id
and ca.arch_type_id = aac.id
and aac.label = 'rpm'
and pa.arch_type_id = aap.id
and aap.label = 'rpm'
""")
_query_arch_type_all = rhnSQL.Statement("""
select ca.label "channel-arch",
pa.label "package-arch"
from rhnChannelPackageArchCompat cpac,
rhnChannelArch ca,
rhnPackageArch pa
where cpac.channel_arch_id = ca.id
and cpac.package_arch_id = pa.id
""")
class ServerGroupTypeServerArchCompatDumper(RestrictedArchCompatDumper):
tag_name = 'rhn-server-group-server-arch-compatibility-map'
_subelement_tag = 'rhn-server-group-server-arch-compat'
_query_rpm_arch_type_only = """
select sgt.label "server-group-type",
sa.label "server-arch"
from rhnServerGroupType sgt,
rhnServerArch sa,
rhnArchType aas,
rhnServerServerGroupArchCompat ssgac
where ssgac.server_arch_id = sa.id
and sa.arch_type_id = aas.id
and aas.label = 'rpm'
and ssgac.server_group_type = sgt.id
%s
"""
#_query_arch_type_all = rhnSQL.Statement("""
_query_arch_type_all = """
select sgt.label "server-group-type",
sa.label "server-arch"
from rhnServerGroupType sgt,
rhnServerArch sa,
rhnServerServerGroupArchCompat ssgac
where ssgac.server_arch_id = sa.id
and ssgac.server_group_type = sgt.id
%s
"""
class BlacklistObsoletesDumper(BaseDumper):
tag_name = 'rhn-blacklist-obsoletes'
def dump(self):
note = """\n<!-- This file is intentionally left empty.
Older Satellites and Spacewalks require this file to exist in the dump. -->\n"""
self._writer.stream.write(note)
self._writer.empty_tag(self.tag_name)
class _KickstartableTreeDumper(BaseRowDumper):
tag_name = 'rhn-kickstartable-tree'
def set_attributes(self):
row_dict = self._row.copy()
del row_dict['id']
last_modified = row_dict['last-modified']
row_dict['last-modified'] = _dbtime2timestamp(last_modified)
return row_dict
def set_iterator(self):
kstree_id = self._row['id']
h = rhnSQL.prepare("""
select relative_filename,
c.checksum_type,
c.checksum,
file_size,
TO_CHAR(last_modified, 'YYYYMMDDHH24MISS') "last-modified"
from rhnKSTreeFile, rhnChecksumView c
where kstree_id = :kstree_id
and checksum_id = c.id
""")
h.execute(kstree_id=kstree_id)
return ArrayIterator([_KickstartFilesDumper(self._writer, h)])
class KickstartableTreesDumper(BaseSubelementDumper, BaseQueryDumper):
tag_name = 'rhn-kickstartable-trees'
subelement_dumper_class = _KickstartableTreeDumper
iterator_query = """
select kt.id,
c.label channel,
kt.base_path "base-path",
kt.label,
kt.boot_image "boot-image",
ktt.name "kstree-type-name",
ktt.label "kstree-type-label",
kit.name "install-type-name",
kit.label "install-type-label",
TO_CHAR(kt.last_modified, 'YYYYMMDDHH24MISS') "last-modified"
from rhnKickstartableTree kt,
rhnKSTreeType ktt,
rhnKSInstallType kit,
rhnChannel c
where kt.channel_id = c.id
and ktt.id = kt.kstree_type
and kit.id = kt.install_type
and kt.org_id is NULL
"""
def set_iterator(self):
return BaseQueryDumper.set_iterator(self)
class _KickstartFileEntryDumper(BaseChecksumRowDumper):
tag_name = 'rhn-kickstart-file'
def set_attributes(self):
attr = {
'relative-path': self._row['relative_filename'],
'file-size': self._row['file_size'],
'last-modified': _dbtime2timestamp(self._row['last-modified']),
}
if self._row['checksum_type'] == 'md5':
attr['md5sum'] = self._row['checksum']
return attr
class _KickstartFilesDumper(BaseSubelementDumper):
tag_name = 'rhn-kickstart-files'
subelement_dumper_class = _KickstartFileEntryDumper
def _dbtime2timestamp(val):
return int(rhnLib.timestamp(val))
class ProductNamesDumper(BaseDumper):
tag_name = "rhn-product-names"
iterator_query = 'select label, name from rhnProductName'
def dump_subelement(self, data):
EmptyDumper(self._writer, 'rhn-product-name', data).dump()
| gpl-2.0 |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/OneLogin/Roles/ListAll.py | 5 | 2799 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListAll
# Retrieves a list of all roles.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListAll(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListAll Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListAll, self).__init__(temboo_session, '/Library/OneLogin/Roles/ListAll')
def new_input_set(self):
return ListAllInputSet()
def _make_result_set(self, result, path):
return ListAllResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListAllChoreographyExecution(session, exec_id, path)
class ListAllInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListAll
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by OneLogin.)
"""
super(ListAllInputSet, self)._set_input('APIKey', value)
class ListAllResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListAll Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from OneLogin.)
"""
return self._output.get('Response', None)
class ListAllChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListAllResultSet(response, path)
| apache-2.0 |
BaladiDogGames/baladidoggames.github.io | mingw/bin/lib/distutils/command/install_scripts.py | 241 | 2068 | """distutils.command.install_scripts
Implements the Distutils 'install_scripts' command, for installing
Python scripts."""
# contributed by Bastian Kleineidam
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils import log
from stat import ST_MODE
class install_scripts (Command):
description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.build_dir = None
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run (self):
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs (self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
# class install_scripts
| mit |
uwafsl/MissionPlanner | Lib/lib2to3/fixes/fix_map.py | 61 | 3153 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
| gpl-3.0 |
xs2maverick/adhocracy3.mercator | src/adhocracy_core/adhocracy_core/messaging/test_init.py | 2 | 11153 | from unittest.mock import Mock
from pytest import fixture
from pytest import mark
from pytest import raises
from pyramid import testing
@fixture
def integration(config):
config.include('pyramid_mailer.testing')
config.include('pyramid_mako')
config.include('adhocracy_core.content')
config.include('adhocracy_core.sheets.metadata')
config.include('adhocracy_core.messaging')
@fixture
def request_(registry):
request = testing.DummyRequest()
request.registry = registry
return request
@mark.usefixtures('integration')
class TestSendMail:
def test_send_mail_with_body_and_html(self, registry, request_):
mailer = registry.messenger.mailer
registry.messenger.send_mail(subject='Test mail',
recipients=['user@example.org'],
sender='admin@example.com',
body='Blah!',
request=request_,
html='<p>Bäh!</p>')
msg = str(mailer.outbox[0].to_message())
assert 'Test mail' in msg
assert 'Blah' in msg
assert 'Content-Type: text/html' in msg
def test_send_mail_with_body(self, registry, request_):
mailer = registry.messenger.mailer
registry.messenger.send_mail(subject='Test mail',
recipients=['user@example.org'],
sender='admin@example.com',
request=request_,
body='Blah!')
msg = str(mailer.outbox[0].to_message())
assert 'Content-Type: text/html' not in msg
def test_send_mail_without_body_and_html(self, registry, request_):
with raises(ValueError):
registry.messenger.send_mail(subject='Test mail',
recipients=['user@example.org'],
request=request_,
sender='admin@example.com')
def test_send_mail_no_recipient(self, registry, request_):
with raises(ValueError):
registry.messenger.send_mail(subject='Test mail',
recipients=None,
sender='admin@example.com',
body='Blah!',
request=request_,
html='<p>Bäh!</p>')
class TestSendMailToQueue:
def test_send_mail_to_queue(self, config, registry, request_):
config.include('pyramid_mailer.testing')
config.include('adhocracy_core.content')
registry.settings['adhocracy.use_mail_queue'] = 'true'
config.include('adhocracy_core.messaging')
mailer = registry.messenger.mailer
registry.messenger.send_mail(subject='Test mail',
recipients=['user@example.org'],
sender='admin@example.com',
body='Blah!',
request=request_,
html='<p>Bäh!</p>')
assert len(mailer.queue) == 1
assert len(mailer.outbox) == 0
def _msg_to_str(msg):
"""Convert an email message into a string."""
# The DummyMailer is too stupid to use a default sender, hence we add
# one manually, if necessary
if msg.sender is None:
msg.sender = 'support@unconfigured.domain'
msgtext = str(msg.to_message())
# Undo quoted-printable encoding of spaces for convenient testing
return msgtext.replace('=20', ' ')
def mock_get_sheet_field(context, sheet, field_name, registry):
result = getattr(context, field_name)
return result
@mark.usefixtures('integration')
class TestSendAbuseComplaint:
def test_send_abuse_complaint_with_user(self, monkeypatch, registry,
request_):
from adhocracy_core import messaging
from adhocracy_core.resources.principal import IUser
monkeypatch.setattr(messaging, 'get_sheet_field', mock_get_sheet_field)
user = Mock(spec=IUser)
user.name = 'Alex User'
mailer = registry.messenger.mailer
messenger = registry.messenger
messenger.abuse_handler_mail = 'abuse_handler@unconfigured.domain'
url = 'http://localhost/blablah'
remark = 'Too much blah!'
messenger.send_abuse_complaint(url=url, remark=remark, user=user,
request=request_)
msgtext = _msg_to_str(mailer.outbox[0])
assert messenger.abuse_handler_mail in msgtext
assert url in msgtext
assert remark in msgtext
assert 'sent by user Alex User' in msgtext
def test_send_abuse_complaint_without_user(self, registry, request_):
mailer = registry.messenger.mailer
messenger = registry.messenger
messenger.abuse_handler_mail = 'abuse_handler@unconfigured.domain'
url = 'http://localhost/blablah'
remark = 'Too much blah!'
messenger.send_abuse_complaint(url=url, remark=remark, user=None,
request=request_)
msgtext = _msg_to_str(mailer.outbox[0])
assert 'sent by an anonymous user' in msgtext
@mark.usefixtures('integration')
class TestSendMessageToUser:
def test_send_message_to_user(self, monkeypatch, registry, request_):
from adhocracy_core import messaging
from adhocracy_core.resources.principal import IUser
recipient = Mock(spec=IUser)
recipient.email = 'recipient@example.org'
sender = Mock(spec=IUser)
sender.name = 'username'
sender.email = 'sender@example.org'
sender.name = 'Sandy Sender'
monkeypatch.setattr(messaging, 'get_sheet_field', mock_get_sheet_field)
mailer = registry.messenger.mailer
messenger = registry.messenger
messenger.send_message_to_user(
recipient=recipient,
title='Important Adhocracy notice',
text='Surprisingly enough, all is well.',
request=request_,
from_user=sender)
msgtext = _msg_to_str(mailer.outbox[0])
assert 'From: sender@example.org' in msgtext
assert 'Subject: [Adhocracy] Message from Sandy Sender: Important Adhocracy notice' in msgtext
assert 'To: recipient@example.org' in msgtext
class TestSendRegistrationMail:
@fixture
def registry(self, config):
config.include('pyramid_mailer.testing')
return config.registry
@fixture
def inst(self, registry):
from . import Messenger
return Messenger(registry)
@fixture
def user(self):
user = testing.DummyResource(name = 'Anna Müller',
email = 'anna@example.org')
return user
def test_send_registration_mail(self, inst, registry, user, request_):
mailer = inst.mailer
inst.send_registration_mail(user, '/activate/91X', request=request_)
msg = mailer.outbox[0]
# The DummyMailer is too stupid to use a default sender, hence we add
# one manually
msg.sender = 'support@unconfigured.domain'
text = str(msg.to_message())
assert '/activate/91X' in text
class TestSendPasswordResetMail:
@fixture
def registry(self, config):
config.include('pyramid_mailer.testing')
config.registry.settings['adhocracy.site_name'] = 'sitename'
config.registry.settings['adhocracy.frontend_url'] = 'http://front.end'
return config.registry
@fixture
def inst(self, registry):
from . import Messenger
return Messenger(registry)
def test_send_password_reset_mail(self, inst, request_):
inst.send_mail = Mock()
user = testing.DummyResource(name='Anna', email='anna@example.org')
reset = testing.DummyResource(__name__='/reset')
inst.send_password_reset_mail(user, reset, request=request_)
assert inst.send_mail.call_args[1]['recipients'] == ['anna@example.org']
assert inst.send_mail.call_args[1]['subject'] ==\
'mail_reset_password_subject'
assert inst.send_mail.call_args[1]['body'] == \
'mail_reset_password_body_txt'
assert inst.send_mail.call_args[1]['body'].mapping ==\
{'user_name': 'Anna',
'site_name': 'sitename',
'reset_url': 'http://front.end/password_reset/?path=%252Freset'}
assert inst.send_mail.call_args[1]['request'] == request_
class TestSendInvitationMail:
@fixture
def registry(self, config):
config.include('pyramid_mako')
config.include('pyramid_mailer.testing')
config.registry.settings['adhocracy.site_name'] = 'sitename'
config.registry.settings['adhocracy.frontend_url'] = 'http://front.end'
return config.registry
@fixture
def inst(self, registry):
from . import Messenger
return Messenger(registry)
def test_send_mail_with_password_reset_link(self, inst, request_):
inst.send_mail = Mock()
user = testing.DummyResource(name='Anna', email='anna@example.org')
reset = testing.DummyResource(__name__='/reset')
inst.send_invitation_mail(user, reset, request=request_)
assert inst.send_mail.call_args[1]['recipients'] == ['anna@example.org']
assert inst.send_mail.call_args[1]['subject'] ==\
'mail_invitation_subject'
assert inst.send_mail.call_args[1]['body'] == \
'mail_invitation_body_txt'
assert inst.send_mail.call_args[1]['body'].mapping ==\
{'user_name': 'Anna',
'site_name': 'sitename',
'email': 'anna@example.org',
'reset_url': 'http://front.end/password_reset/?path=%252Freset'}
assert inst.send_mail.call_args[1]['request'] == request_
def test_render_custom_subject(self, inst, request_):
inst.send_mail = Mock()
user = testing.DummyResource(name='Anna', email='anna@example.org')
reset = testing.DummyResource(__name__='/reset')
inst.send_invitation_mail(user, reset, request=request_,
subject_tmpl='adhocracy_core:templates/invite_subject_sample.txt.mako')
assert inst.send_mail.call_args[1]['subject'] == 'Welcome Anna to sitename.'
def test_render_custom_body(self, inst, request_):
inst.send_mail = Mock()
user = testing.DummyResource(name='Anna', email='anna@example.org')
reset = testing.DummyResource(__name__='/reset')
inst.send_invitation_mail(user, reset, request=request_,
body_tmpl='adhocracy_core:templates/invite_body_sample.txt.mako')
assert inst.send_mail.call_args[1]['body'] ==\
'Hi Anna,\n'\
'please reset your password here http://front.end/password_reset/?path=%252Freset to join sitename.'
| agpl-3.0 |
broferek/ansible | test/units/modules/network/fortios/test_fortios_spamfilter_mheader.py | 21 | 7959 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_spamfilter_mheader
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_spamfilter_mheader.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_spamfilter_mheader_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_mheader': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_mheader.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'mheader', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_spamfilter_mheader_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_mheader': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_mheader.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'mheader', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_spamfilter_mheader_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'spamfilter_mheader': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_mheader.fortios_spamfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('spamfilter', 'mheader', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_spamfilter_mheader_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'spamfilter_mheader': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_mheader.fortios_spamfilter(input_data, fos_instance)
delete_method_mock.assert_called_with('spamfilter', 'mheader', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_spamfilter_mheader_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_mheader': {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_mheader.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'mheader', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_spamfilter_mheader_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'spamfilter_mheader': {
'random_attribute_not_valid': 'tag',
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_spamfilter_mheader.fortios_spamfilter(input_data, fos_instance)
expected_data = {
'comment': 'Optional comments.',
'id': '4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('spamfilter', 'mheader', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
AndreasHeger/alignlib | python/tests/test_Weightor.py | 1 | 2035 | # alignlib - a library for aligning protein sequences
#
# $Id: test_Alignment.py,v 1.3 2004/01/23 17:34:58 aheger Exp $
#
# Copyright (C) 2004 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import unittest, sys
from alignlib import *
class WeightorCase( unittest.TestCase ):
def setUp( self ):
self.mWeightorFactory = makeWeightor
self.mMali = makeMultipleAlignment()
self.mMali.add( makeAlignatum("AAAAAAAAAAAAAAAAAAAA") )
self.mMali.add( makeAlignatum("AAAAAAAAAAAAAAAAAAAA") )
self.mMali.add( makeAlignatum("AAAAAAAAAAAAAAAAAAAA") )
def testSetDefault(self):
setDefaultWeightor( self.mWeightorFactory() )
def testWeight(self):
weightor = getDefaultWeightor()
# weightor.calculateWeights( self.mMali )
"""
class WeightorHenikoffCase( WeightorCase ):
def setUp( self ):
self.mWeightorFactory = makeWeightorHenikoff
class WeightorHenikoffKimmenCase( WeightorCase ):
def setUp( self ):
self.mWeightorFactory = makeWeightorHenikoffKimmen
"""
def suite():
suite = unittest.TestSuite()
suite.addTest(WeightorTestCase)
#suite.addTest(WeightorHenikoffTestCase)
#suite.addTest(WeightorHenikoffKimmenTestCase)
return suite
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
spacetelescope/stsci.tools | doc/source/conf.py | 1 | 7012 | # -*- coding: utf-8 -*-
#
# stsci.tools documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 7 13:09:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from stsci.tools import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stsci.tools'
copyright = u'2020, STScI'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
#html_static_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = ['py-modindex']
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'stsci.toolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'stsci.tools.tex', u'stsci.tools Documentation',
# u'SSB', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/',
(None, 'http://data.astropy.org/intersphinx/matplotlib.inv')),
'astropy': ('https://docs.astropy.org/en/stable/', None)
}
| bsd-3-clause |
aldryn/aldryn-mailchimp | aldryn_mailchimp/south_migrations/0002_auto__chg_field_subscriptionplugin_list_id.py | 2 | 2877 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SubscriptionPlugin.list_id'
db.alter_column('cmsplugin_subscriptionplugin', 'list_id', self.gf('django.db.models.fields.CharField')(max_length=20))
def backwards(self, orm):
# Changing field 'SubscriptionPlugin.list_id'
db.alter_column('cmsplugin_subscriptionplugin', 'list_id', self.gf('django.db.models.fields.PositiveIntegerField')())
models = {
'aldryn_mailchimp.subscriptionplugin': {
'Meta': {'object_name': 'SubscriptionPlugin', 'db_table': "'cmsplugin_subscriptionplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'list_id': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_mailchimp'] | bsd-3-clause |
sysadminmatmoz/ingadhoc | account_interests/interest.py | 9 | 2049 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
class account_account_interest(models.Model):
_name = "account.account.interest"
_description = 'Account Account Interest'
account_id = fields.Many2one(
'account.account',
'Account',
required=True,
ondelete="cascade")
interest_account_id = fields.Many2one(
'account.account',
'Interest Account',
required=True,
domain=[('type', '!=', 'view')])
analytic_account_id = fields.Many2one(
'account.analytic.account',
'Analytic account',
domain=[('type', '!=', 'view')])
interest_rate = fields.Float(
'Interest',
required=True,
digits=(7, 4))
date_from = fields.Date(
'Date From',
required=True)
date_to = fields.Date('Date To')
class account_account(models.Model):
_inherit = "account.account"
account_account_interest_ids = fields.One2many(
'account.account.interest',
'account_id',
'Interest Rates')
def get_active_interest_data(self, cr, uid, ids, dt_from, dt_to, context=None):
if context is None:
context = {}
interest_obj = self.pool.get('account.account.interest')
res = {}
for record_id in ids:
interest_domain = [
('account_id.id', '=', record_id),
('date_from', '<=', dt_from),
'|', ('date_to', '>=', dt_to),
('date_to', '=', False)]
interest_ids = interest_obj.search(
cr, uid, interest_domain, context=context)
if interest_ids:
res[record_id] = interest_obj.browse(
cr, uid, interest_ids[0], context=context)
return res
| agpl-3.0 |
Agent007/deepchem | examples/qm7/qm7b_DTNN.py | 1 | 1237 | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from trans import undo_transforms
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load Tox21 dataset
tasks, datasets, transformers = dc.molnet.load_qm7b_from_mat()
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
# Batch size of models
batch_size = 50
n_embedding = 30
n_distance = 51
distance_min = -1.
distance_max = 9.2
n_hidden = 15
model = dc.models.DTNNModel(
len(tasks),
n_embedding=n_embedding,
n_hidden=n_hidden,
n_distance=n_distance,
distance_min=distance_min,
distance_max=distance_max,
output_activation=False,
batch_size=batch_size,
learning_rate=0.0001,
use_queue=False,
mode="regression")
# Fit trained model
model.fit(train_dataset, nb_epoch=1)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
apache/incubator-airflow | tests/utils/test_task_group.py | 5 | 20614 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pendulum
import pytest
from airflow.models import DAG
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
from airflow.www.views import dag_edges, task_group_to_dict
EXPECTED_JSON = {
'id': None,
'value': {
'label': None,
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue',
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': '',
'children': [
{
'id': 'group234',
'value': {
'label': 'group234',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue',
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': '',
'children': [
{
'id': 'group234.group34',
'value': {
'label': 'group34',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue',
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': '',
'children': [
{
'id': 'group234.group34.task3',
'value': {
'label': 'task3',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'group234.group34.task4',
'value': {
'label': 'task4',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'group234.group34.downstream_join_id',
'value': {
'label': '',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue;',
'shape': 'circle',
},
},
],
},
{
'id': 'group234.task2',
'value': {
'label': 'task2',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'group234.upstream_join_id',
'value': {
'label': '',
'labelStyle': 'fill:#000;',
'style': 'fill:CornflowerBlue;',
'shape': 'circle',
},
},
],
},
{
'id': 'task1',
'value': {
'label': 'task1',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
{
'id': 'task5',
'value': {
'label': 'task5',
'labelStyle': 'fill:#000;',
'style': 'fill:#e8f7e4;',
'rx': 5,
'ry': 5,
},
},
],
}
def test_build_task_group_context_manager():
execution_date = pendulum.parse("20200101")
with DAG("test_build_task_group_context_manager", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
assert task1.get_direct_relative_ids(upstream=False) == {
'group234.group34.task4',
'group234.group34.task3',
'group234.task2',
}
assert task5.get_direct_relative_ids(upstream=True) == {
'group234.group34.task4',
'group234.group34.task3',
}
assert dag.task_group.group_id is None
assert dag.task_group.is_root
assert set(dag.task_group.children.keys()) == {"task1", "group234", "task5"}
assert group34.group_id == "group234.group34"
assert task_group_to_dict(dag.task_group) == EXPECTED_JSON
def test_build_task_group():
"""
This is an alternative syntax to use TaskGroup. It should result in the same TaskGroup
as using context manager.
"""
execution_date = pendulum.parse("20200101")
dag = DAG("test_build_task_group", start_date=execution_date)
task1 = DummyOperator(task_id="task1", dag=dag)
group234 = TaskGroup("group234", dag=dag)
_ = DummyOperator(task_id="task2", dag=dag, task_group=group234)
group34 = TaskGroup("group34", dag=dag, parent_group=group234)
_ = DummyOperator(task_id="task3", dag=dag, task_group=group34)
_ = DummyOperator(task_id="task4", dag=dag, task_group=group34)
task5 = DummyOperator(task_id="task5", dag=dag)
task1 >> group234
group34 >> task5
assert task_group_to_dict(dag.task_group) == EXPECTED_JSON
def extract_node_id(node, include_label=False):
ret = {"id": node["id"]}
if include_label:
ret["label"] = node["value"]["label"]
if "children" in node:
children = []
for child in node["children"]:
children.append(extract_node_id(child, include_label=include_label))
ret["children"] = children
return ret
def test_build_task_group_with_prefix():
"""
Tests that prefix_group_id turns on/off prefixing of task_id with group_id.
"""
execution_date = pendulum.parse("20200101")
with DAG("test_build_task_group_with_prefix", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234", prefix_group_id=False) as group234:
task2 = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
task3 = DummyOperator(task_id="task3")
with TaskGroup("group4", prefix_group_id=False) as group4:
task4 = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
assert task2.task_id == "task2"
assert group34.group_id == "group34"
assert task3.task_id == "group34.task3"
assert group4.group_id == "group34.group4"
assert task4.task_id == "task4"
assert task5.task_id == "task5"
assert group234.get_child_by_label("task2") == task2
assert group234.get_child_by_label("group34") == group34
assert group4.get_child_by_label("task4") == task4
assert extract_node_id(task_group_to_dict(dag.task_group), include_label=True) == {
'id': None,
'label': None,
'children': [
{
'id': 'group234',
'label': 'group234',
'children': [
{
'id': 'group34',
'label': 'group34',
'children': [
{
'id': 'group34.group4',
'label': 'group4',
'children': [{'id': 'task4', 'label': 'task4'}],
},
{'id': 'group34.task3', 'label': 'task3'},
{'id': 'group34.downstream_join_id', 'label': ''},
],
},
{'id': 'task2', 'label': 'task2'},
{'id': 'group234.upstream_join_id', 'label': ''},
],
},
{'id': 'task1', 'label': 'task1'},
{'id': 'task5', 'label': 'task5'},
],
}
def test_build_task_group_with_task_decorator():
"""
Test that TaskGroup can be used with the @task decorator.
"""
from airflow.operators.python import task
@task
def task_1():
print("task_1")
@task
def task_2():
return "task_2"
@task
def task_3():
return "task_3"
@task
def task_4(task_2_output, task_3_output):
print(task_2_output, task_3_output)
@task
def task_5():
print("task_5")
execution_date = pendulum.parse("20200101")
with DAG("test_build_task_group_with_task_decorator", start_date=execution_date) as dag:
tsk_1 = task_1()
with TaskGroup("group234") as group234:
tsk_2 = task_2()
tsk_3 = task_3()
tsk_4 = task_4(tsk_2, tsk_3)
tsk_5 = task_5()
tsk_1 >> group234 >> tsk_5
# pylint: disable=no-member
assert tsk_1.operator in tsk_2.operator.upstream_list
assert tsk_1.operator in tsk_3.operator.upstream_list
assert tsk_5.operator in tsk_4.operator.downstream_list
# pylint: enable=no-member
assert extract_node_id(task_group_to_dict(dag.task_group)) == {
'id': None,
'children': [
{
'id': 'group234',
'children': [
{'id': 'group234.task_2'},
{'id': 'group234.task_3'},
{'id': 'group234.task_4'},
{'id': 'group234.upstream_join_id'},
{'id': 'group234.downstream_join_id'},
],
},
{'id': 'task_1'},
{'id': 'task_5'},
],
}
edges = dag_edges(dag)
assert sorted((e["source_id"], e["target_id"]) for e in edges) == [
('group234.downstream_join_id', 'task_5'),
('group234.task_2', 'group234.task_4'),
('group234.task_3', 'group234.task_4'),
('group234.task_4', 'group234.downstream_join_id'),
('group234.upstream_join_id', 'group234.task_2'),
('group234.upstream_join_id', 'group234.task_3'),
('task_1', 'group234.upstream_join_id'),
]
def test_sub_dag_task_group():
"""
Tests dag.sub_dag() updates task_group correctly.
"""
execution_date = pendulum.parse("20200101")
with DAG("test_test_task_group_sub_dag", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
with TaskGroup("group6") as group6:
_ = DummyOperator(task_id="task6")
task7 = DummyOperator(task_id="task7")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
group234 >> group6
group234 >> task7
subdag = dag.sub_dag(task_ids_or_regex="task5", include_upstream=True, include_downstream=False)
assert extract_node_id(task_group_to_dict(subdag.task_group)) == {
'id': None,
'children': [
{
'id': 'group234',
'children': [
{
'id': 'group234.group34',
'children': [
{'id': 'group234.group34.task3'},
{'id': 'group234.group34.task4'},
{'id': 'group234.group34.downstream_join_id'},
],
},
{'id': 'group234.upstream_join_id'},
],
},
{'id': 'task1'},
{'id': 'task5'},
],
}
edges = dag_edges(subdag)
assert sorted((e["source_id"], e["target_id"]) for e in edges) == [
('group234.group34.downstream_join_id', 'task5'),
('group234.group34.task3', 'group234.group34.downstream_join_id'),
('group234.group34.task4', 'group234.group34.downstream_join_id'),
('group234.upstream_join_id', 'group234.group34.task3'),
('group234.upstream_join_id', 'group234.group34.task4'),
('task1', 'group234.upstream_join_id'),
]
subdag_task_groups = subdag.task_group.get_task_group_dict()
assert subdag_task_groups.keys() == {None, "group234", "group234.group34"}
included_group_ids = {"group234", "group234.group34"}
included_task_ids = {'group234.group34.task3', 'group234.group34.task4', 'task1', 'task5'}
for task_group in subdag_task_groups.values():
assert task_group.upstream_group_ids.issubset(included_group_ids)
assert task_group.downstream_group_ids.issubset(included_group_ids)
assert task_group.upstream_task_ids.issubset(included_task_ids)
assert task_group.downstream_task_ids.issubset(included_task_ids)
for task in subdag.task_group:
assert task.upstream_task_ids.issubset(included_task_ids)
assert task.downstream_task_ids.issubset(included_task_ids)
def test_dag_edges():
execution_date = pendulum.parse("20200101")
with DAG("test_dag_edges", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group_a") as group_a:
with TaskGroup("group_b") as group_b:
task2 = DummyOperator(task_id="task2")
task3 = DummyOperator(task_id="task3")
task4 = DummyOperator(task_id="task4")
task2 >> [task3, task4]
task5 = DummyOperator(task_id="task5")
task5 << group_b
task1 >> group_a
with TaskGroup("group_c") as group_c:
task6 = DummyOperator(task_id="task6")
task7 = DummyOperator(task_id="task7")
task8 = DummyOperator(task_id="task8")
[task6, task7] >> task8
group_a >> group_c
task5 >> task8
task9 = DummyOperator(task_id="task9")
task10 = DummyOperator(task_id="task10")
group_c >> [task9, task10]
with TaskGroup("group_d") as group_d:
task11 = DummyOperator(task_id="task11")
task12 = DummyOperator(task_id="task12")
task11 >> task12
group_d << group_c
nodes = task_group_to_dict(dag.task_group)
edges = dag_edges(dag)
assert extract_node_id(nodes) == {
'id': None,
'children': [
{
'id': 'group_a',
'children': [
{
'id': 'group_a.group_b',
'children': [
{'id': 'group_a.group_b.task2'},
{'id': 'group_a.group_b.task3'},
{'id': 'group_a.group_b.task4'},
{'id': 'group_a.group_b.downstream_join_id'},
],
},
{'id': 'group_a.task5'},
{'id': 'group_a.upstream_join_id'},
{'id': 'group_a.downstream_join_id'},
],
},
{
'id': 'group_c',
'children': [
{'id': 'group_c.task6'},
{'id': 'group_c.task7'},
{'id': 'group_c.task8'},
{'id': 'group_c.upstream_join_id'},
{'id': 'group_c.downstream_join_id'},
],
},
{
'id': 'group_d',
'children': [
{'id': 'group_d.task11'},
{'id': 'group_d.task12'},
{'id': 'group_d.upstream_join_id'},
],
},
{'id': 'task1'},
{'id': 'task10'},
{'id': 'task9'},
],
}
assert sorted((e["source_id"], e["target_id"]) for e in edges) == [
('group_a.downstream_join_id', 'group_c.upstream_join_id'),
('group_a.group_b.downstream_join_id', 'group_a.task5'),
('group_a.group_b.task2', 'group_a.group_b.task3'),
('group_a.group_b.task2', 'group_a.group_b.task4'),
('group_a.group_b.task3', 'group_a.group_b.downstream_join_id'),
('group_a.group_b.task4', 'group_a.group_b.downstream_join_id'),
('group_a.task5', 'group_a.downstream_join_id'),
('group_a.task5', 'group_c.task8'),
('group_a.upstream_join_id', 'group_a.group_b.task2'),
('group_c.downstream_join_id', 'group_d.upstream_join_id'),
('group_c.downstream_join_id', 'task10'),
('group_c.downstream_join_id', 'task9'),
('group_c.task6', 'group_c.task8'),
('group_c.task7', 'group_c.task8'),
('group_c.task8', 'group_c.downstream_join_id'),
('group_c.upstream_join_id', 'group_c.task6'),
('group_c.upstream_join_id', 'group_c.task7'),
('group_d.task11', 'group_d.task12'),
('group_d.upstream_join_id', 'group_d.task11'),
('task1', 'group_a.upstream_join_id'),
]
def test_duplicate_group_id():
from airflow.exceptions import DuplicateTaskIdFound
execution_date = pendulum.parse("20200101")
with pytest.raises(DuplicateTaskIdFound, match=r".* 'task1' .*"):
with DAG("test_duplicate_group_id", start_date=execution_date):
_ = DummyOperator(task_id="task1")
with TaskGroup("task1"):
pass
with pytest.raises(DuplicateTaskIdFound, match=r".* 'group1' .*"):
with DAG("test_duplicate_group_id", start_date=execution_date):
_ = DummyOperator(task_id="task1")
with TaskGroup("group1", prefix_group_id=False):
with TaskGroup("group1"):
pass
with pytest.raises(DuplicateTaskIdFound, match=r".* 'group1' .*"):
with DAG("test_duplicate_group_id", start_date=execution_date):
with TaskGroup("group1", prefix_group_id=False):
_ = DummyOperator(task_id="group1")
with pytest.raises(DuplicateTaskIdFound, match=r".* 'group1.downstream_join_id' .*"):
with DAG("test_duplicate_group_id", start_date=execution_date):
_ = DummyOperator(task_id="task1")
with TaskGroup("group1"):
_ = DummyOperator(task_id="downstream_join_id")
with pytest.raises(DuplicateTaskIdFound, match=r".* 'group1.upstream_join_id' .*"):
with DAG("test_duplicate_group_id", start_date=execution_date):
_ = DummyOperator(task_id="task1")
with TaskGroup("group1"):
_ = DummyOperator(task_id="upstream_join_id")
def test_task_without_dag():
"""
Test that if a task doesn't have a DAG when it's being set as the relative of another task which
has a DAG, the task should be added to the root TaskGroup of the other task's DAG.
"""
dag = DAG(dag_id='test_task_without_dag', start_date=pendulum.parse("20200101"))
op1 = DummyOperator(task_id='op1', dag=dag)
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id="op3")
op1 >> op2
op3 >> op2
assert op1.dag == op2.dag == op3.dag
assert dag.task_group.children.keys() == {"op1", "op2", "op3"}
assert dag.task_group.children.keys() == dag.task_dict.keys()
| apache-2.0 |
glenjarvis/Lemonade | src/tests/line_entry_tests.py | 1 | 4202 | #!/usr/bin/env python
# pylint: disable=R0904,W0142,C0103
"""Unit Tests for the core data structures"""
import datetime
import decimal
import unittest
try:
# Help bootstrap (If PYTHONPATH isn't set); pylint: disable=W0611
from bootstrap import lemonade
except ImportError:
pass
from lemonade import models
class LineEntryTestCase(unittest.TestCase):
"""LineEntry Tests
A LineEntry is a single line and the smallest atomic unit of a
JournalEntry.
"""
def setUp(self):
self.date = '2013-01-01'
self.description = "Daily Deposit"
self.journal_entry = models.JournalEntry(
date=self.date,
description=self.description)
self.args = {
'journal_entry': self.journal_entry,
'amount_type': models.LineEntry.DEBIT,
'amount': 7169.98,
'post_ref': 1020,
}
def test_shouldnt_initialize_wo_args(self):
"""It shouldn't initialize without arguments"""
self.assertRaises(TypeError, models.LineEntry)
def test_should_have_valid_journal_entry_arg(self):
"""It should have valid journal entry argument"""
del self.args['journal_entry']
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['journal_entry'] = "InvalidObject"
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['journal_entry'] = models.JournalEntry(
date=self.date,
description=self.description)
line_entry = models.LineEntry(**self.args)
self.assertTrue(isinstance(line_entry.journal_entry,
models.JournalEntry))
def test_should_have_date_through_journal_entry(self):
"""It should have valid date from journal entry"""
line_entry = models.LineEntry(**self.args)
self.assertEqual(line_entry.journal_entry.date,
datetime.date(2013, 1, 1))
def test_should_have_valid_amount_type_arg(self):
"""It should have valid amount_type argument"""
del self.args['amount_type']
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['amount_type'] = 'Meaningless Entry'
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['amount_type'] = models.LineEntry.DEBIT
line_entry = models.LineEntry(**self.args)
self.assertEqual(line_entry.amount_type, models.LineEntry.DEBIT)
def test_should_have_valid_amount_arg(self):
"""It should have valid amount argument"""
del self.args['amount']
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['amount'] = 'Meaningless Amount'
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['amount'] = '7169.98'
line_entry = models.LineEntry(**self.args)
self.assertTrue(isinstance(line_entry.amount, decimal.Decimal))
self.assertEqual(line_entry.amount, decimal.Decimal('7169.98'))
self.args['amount'] = 7169.980000
line_entry = models.LineEntry(**self.args)
self.assertTrue(isinstance(line_entry.amount, decimal.Decimal))
self.assertEqual(line_entry.amount, decimal.Decimal('7169.98'))
def test_should_have_valid_post_ref(self):
"""It should have valid post_ref argument
Note: A LineItem object is not aware of the Chart Of Accounts.
So, the PostRef is only validated to be a positive integer. The
existance of that positive integer as an account number in the
Chart Of Accounts is not validated on this level.
"""
del self.args['post_ref']
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['post_ref'] = 'Invalid PostRef'
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['post_ref'] = 0
self.assertRaises(TypeError, models.LineEntry, **self.args)
self.args['post_ref'] = '1020'
line_entry = models.LineEntry(**self.args)
self.assertEquals(line_entry.post_ref, 1020)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
alexanderturner/ansible | lib/ansible/modules/packaging/os/pulp_repo.py | 21 | 23737 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Joe Adams <@sysadmind>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: pulp_repo
author: "Joe Adams (@sysadmind)"
short_description: Add or remove Pulp repos from a remote host.
description:
- Add or remove Pulp repos from a remote host.
version_added: "2.3"
requirements: []
options:
add_export_distributor:
description:
- Whether or not to add the export distributor to new C(rpm) repositories.
required: false
default: false
feed:
description:
- Upstream feed URL to receive updates from.
required: false
default: null
force_basic_auth:
description:
- httplib2, the library used by the M(uri) module only sends
authentication information when a webservice responds to an initial
request with a 401 status. Since some basic auth services do not
properly send a 401, logins will fail. This option forces the sending of
the Basic authentication header upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
importer_ssl_ca_cert:
description:
- CA certificate string used to validate the feed source SSL certificate.
This can be the file content or the path to the file.
required: false
default: null
importer_ssl_client_cert:
description:
- Certificate used as the client certificate when synchronizing the
repository. This is used to communicate authentication information to
the feed source. The value to this option must be the full path to the
certificate. The specified file may be the certificate itself or a
single file containing both the certificate and private key. This can be
the file content or the path to the file.
required: false
default: null
importer_ssl_client_key:
description:
- Private key to the certificate specified in I(importer_ssl_client_cert),
assuming it is not included in the certificate file itself. This can be
the file content or the path to the file.
required: false
default: null
name:
description:
- Name of the repo to add or remove. This correlates to repo-id in Pulp.
required: true
proxy_host:
description:
- Proxy url setting for the pulp repository importer. This is in the
format scheme://host.
required: false
default: null
proxy_port:
description:
- Proxy port setting for the pulp repository importer.
required: false
default: null
publish_distributor:
description:
- Distributor to use when state is C(publish). The default is to
publish all distributors.
required: false
pulp_host:
description:
- URL of the pulp server to connect to.
default: http://127.0.0.1
relative_url:
description:
- Relative URL for the local repository.
required: true
default: null
repo_type:
description:
- Repo plugin type to use (i.e. C(rpm), C(docker)).
default: rpm
serve_http:
description:
- Make the repo available over HTTP.
required: false
default: false
serve_https:
description:
- Make the repo available over HTTPS.
required: false
default: true
state:
description:
- The repo state. A state of C(sync) will queue a sync of the repo.
This is asynchronous but not delayed like a scheduled sync. A state of
C(publish) will use the repository's distributor to publish the content.
required: false
default: present
choices: [ "present", "absent", "sync", "publish" ]
url_password:
description:
- The password for use in HTTP basic authentication to the pulp API.
If the I(url_username) parameter is not specified, the I(url_password)
parameter will not be used.
required: false
url_username:
description:
- The username for use in HTTP basic authentication to the pulp API.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
used on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: [ "yes", "no" ]
wait_for_completion:
description:
- Wait for asynchronous tasks to complete before returning.
required: false
default: 'no'
choices: [ "yes", "no" ]
notes:
- This module can currently only create distributors and importers on rpm
repositories. Contributions to support other repo types are welcome.
'''
EXAMPLES = '''
- name: Create a new repo with name 'my_repo'
pulp_repo:
name: my_repo
relative_url: my/repo
state: present
- name: Create a repo with a feed and a relative URL
pulp_repo:
name: my_centos_updates
repo_type: rpm
feed: http://mirror.centos.org/centos/6/updates/x86_64/
relative_url: centos/6/updates
url_username: admin
url_password: admin
force_basic_auth: yes
state: present
- name: Remove a repo from the pulp server
pulp_repo:
name: my_old_repo
repo_type: rpm
state: absent
'''
RETURN = '''
repo:
description: Name of the repo that the action was performed on.
returned: success
type: string
sample: my_repo
'''
import json
import os
from time import sleep
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.urls import url_argument_spec
class pulp_server(object):
"""
Class to interact with a Pulp server
"""
def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
self.module = module
self.host = pulp_host
self.repo_type = repo_type
self.repo_cache = dict()
self.wait_for_completion = wait_for_completion
def check_repo_exists(self, repo_id):
try:
self.get_repo_config_by_id(repo_id)
except IndexError:
return False
else:
return True
def compare_repo_distributor_config(self, repo_id, **kwargs):
repo_config = self.get_repo_config_by_id(repo_id)
for distributor in repo_config['distributors']:
for key, value in kwargs.items():
if not distributor['config'][key] == value:
return False
return True
def compare_repo_importer_config(self, repo_id, **kwargs):
repo_config = self.get_repo_config_by_id(repo_id)
for importer in repo_config['importers']:
for key, value in kwargs.items():
if value is not None:
if key not in importer['config'].keys():
return False
if not importer['config'][key] == value:
return False
return True
def create_repo(
self,
repo_id,
relative_url,
feed=None,
serve_http=False,
serve_https=True,
proxy_host=None,
proxy_port=None,
ssl_ca_cert=None,
ssl_client_cert=None,
ssl_client_key=None,
add_export_distributor=False
):
url = "%s/pulp/api/v2/repositories/" % self.host
data = dict()
data['id'] = repo_id
data['distributors'] = []
if self.repo_type == 'rpm':
yum_distributor = dict()
yum_distributor['distributor_id'] = "yum_distributor"
yum_distributor['distributor_type_id'] = "yum_distributor"
yum_distributor['auto_publish'] = True
yum_distributor['distributor_config'] = dict()
yum_distributor['distributor_config']['http'] = serve_http
yum_distributor['distributor_config']['https'] = serve_https
yum_distributor['distributor_config']['relative_url'] = relative_url
data['distributors'].append(yum_distributor)
if add_export_distributor:
export_distributor = dict()
export_distributor['distributor_id'] = "export_distributor"
export_distributor['distributor_type_id'] = "export_distributor"
export_distributor['auto_publish'] = False
export_distributor['distributor_config'] = dict()
export_distributor['distributor_config']['http'] = serve_http
export_distributor['distributor_config']['https'] = serve_https
export_distributor['distributor_config']['relative_url'] = relative_url
data['distributors'].append(export_distributor)
data['importer_type_id'] = "yum_importer"
data['importer_config'] = dict()
if feed:
data['importer_config']['feed'] = feed
if proxy_host:
data['importer_config']['proxy_host'] = proxy_host
if proxy_port:
data['importer_config']['proxy_port'] = proxy_port
if ssl_ca_cert:
data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
if ssl_client_cert:
data['importer_config']['ssl_client_cert'] = ssl_client_cert
if ssl_client_key:
data['importer_config']['ssl_client_key'] = ssl_client_key
data['notes'] = {
"_repo-type": "rpm-repo"
}
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 201:
self.module.fail_json(
msg="Failed to create repo.",
status_code=info['status'],
response=info['msg'],
url=url)
else:
return True
def delete_repo(self, repo_id):
url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
response, info = fetch_url(self.module, url, data='', method='DELETE')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to delete repo.",
status_code=info['status'],
response=info['msg'],
url=url)
if self.wait_for_completion:
self.verify_tasks_completed(json.load(response))
return True
def get_repo_config_by_id(self, repo_id):
if repo_id not in self.repo_cache.keys():
repo_array = [x for x in self.repo_list if x['id'] == repo_id]
self.repo_cache[repo_id] = repo_array[0]
return self.repo_cache[repo_id]
def publish_repo(self, repo_id, publish_distributor):
url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
# If there's no distributor specified, we will publish them all
if publish_distributor is None:
repo_config = self.get_repo_config_by_id(repo_id)
for distributor in repo_config['distributors']:
data = dict()
data['id'] = distributor['id']
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to publish the repo.",
status_code=info['status'],
response=info['msg'],
url=url,
distributor=distributor['id'])
else:
data = dict()
data['id'] = publish_distributor
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to publish the repo",
status_code=info['status'],
response=info['msg'],
url=url,
distributor=publish_distributor)
if self.wait_for_completion:
self.verify_tasks_completed(json.load(response))
return True
def sync_repo(self, repo_id):
url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
response, info = fetch_url(self.module, url, data='', method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to schedule a sync of the repo.",
status_code=info['status'],
response=info['msg'],
url=url)
if self.wait_for_completion:
self.verify_tasks_completed(json.load(response))
return True
def update_repo_distributor_config(self, repo_id, **kwargs):
url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
repo_config = self.get_repo_config_by_id(repo_id)
for distributor in repo_config['distributors']:
distributor_url = "%s%s/" % (url, distributor['id'])
data = dict()
data['distributor_config'] = dict()
for key, value in kwargs.items():
data['distributor_config'][key] = value
response, info = fetch_url(
self.module,
distributor_url,
data=json.dumps(data),
method='PUT')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to set the relative url for the repository.",
status_code=info['status'],
response=info['msg'],
url=url)
def update_repo_importer_config(self, repo_id, **kwargs):
url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
data = dict()
importer_config = dict()
for key, value in kwargs.items():
if value is not None:
importer_config[key] = value
data['importer_config'] = importer_config
if self.repo_type == 'rpm':
data['importer_type_id'] = "yum_importer"
response, info = fetch_url(
self.module,
url,
data=json.dumps(data),
method='POST')
if info['status'] != 202:
self.module.fail_json(
msg="Failed to set the repo importer configuration",
status_code=info['status'],
response=info['msg'],
importer_config=importer_config,
url=url)
def set_repo_list(self):
url = "%s/pulp/api/v2/repositories/?details=true" % self.host
response, info = fetch_url(self.module, url, method='GET')
if info['status'] != 200:
self.module.fail_json(
msg="Request failed",
status_code=info['status'],
response=info['msg'],
url=url)
self.repo_list = json.load(response)
def verify_tasks_completed(self, response_dict):
for task in response_dict['spawned_tasks']:
task_url = "%s%s" % (self.host, task['_href'])
while True:
response, info = fetch_url(
self.module,
task_url,
data='',
method='GET')
if info['status'] != 200:
self.module.fail_json(
msg="Failed to check async task status.",
status_code=info['status'],
response=info['msg'],
url=task_url)
task_dict = json.load(response)
if task_dict['state'] == 'finished':
return True
if task_dict['state'] == 'error':
self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
sleep(2)
def main():
argument_spec = url_argument_spec()
argument_spec.update(
add_export_distributor=dict(default=False, type='bool'),
feed=dict(),
importer_ssl_ca_cert=dict(),
importer_ssl_client_cert=dict(),
importer_ssl_client_key=dict(),
name=dict(required=True, aliases=['repo']),
proxy_host=dict(),
proxy_port=dict(),
publish_distributor=dict(),
pulp_host=dict(default="https://127.0.0.1"),
relative_url=dict(),
repo_type=dict(default="rpm"),
serve_http=dict(default=False, type='bool'),
serve_https=dict(default=True, type='bool'),
state=dict(
default="present",
choices=['absent', 'present', 'sync', 'publish']),
wait_for_completion=dict(default=False, type="bool"))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
add_export_distributor = module.params['add_export_distributor']
feed = module.params['feed']
importer_ssl_ca_cert = module.params['importer_ssl_ca_cert']
importer_ssl_client_cert = module.params['importer_ssl_client_cert']
importer_ssl_client_key = module.params['importer_ssl_client_key']
proxy_host = module.params['proxy_host']
proxy_port = module.params['proxy_port']
publish_distributor = module.params['publish_distributor']
pulp_host = module.params['pulp_host']
relative_url = module.params['relative_url']
repo = module.params['name']
repo_type = module.params['repo_type']
serve_http = module.params['serve_http']
serve_https = module.params['serve_https']
state = module.params['state']
wait_for_completion = module.params['wait_for_completion']
if (state == 'present') and (not relative_url):
module.fail_json(msg="When state is present, relative_url is required.")
# Ensure that the importer_ssl_* is the content and not a file path
if importer_ssl_ca_cert is not None:
importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
if os.path.isfile(importer_ssl_ca_cert_file_path):
importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
try:
importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
finally:
importer_ssl_ca_cert_file_object.close()
if importer_ssl_client_cert is not None:
importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
if os.path.isfile(importer_ssl_client_cert_file_path):
importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
try:
importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
finally:
importer_ssl_client_cert_file_object.close()
if importer_ssl_client_key is not None:
importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
if os.path.isfile(importer_ssl_client_key_file_path):
importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
try:
importer_ssl_client_key = importer_ssl_client_key_file_object.read()
finally:
importer_ssl_client_key_file_object.close()
server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
server.set_repo_list()
repo_exists = server.check_repo_exists(repo)
changed = False
if state == 'absent' and repo_exists:
if not module.check_mode:
server.delete_repo(repo)
changed = True
if state == 'sync':
if not repo_exists:
module.fail_json(msg="Repository was not found. The repository can not be synced.")
if not module.check_mode:
server.sync_repo(repo)
changed = True
if state == 'publish':
if not repo_exists:
module.fail_json(msg="Repository was not found. The repository can not be published.")
if not module.check_mode:
server.publish_repo(repo, publish_distributor)
changed = True
if state == 'present':
if not repo_exists:
if not module.check_mode:
server.create_repo(
repo_id=repo,
relative_url=relative_url,
feed=feed,
serve_http=serve_http,
serve_https=serve_https,
proxy_host=proxy_host,
proxy_port=proxy_port,
ssl_ca_cert=importer_ssl_ca_cert,
ssl_client_cert=importer_ssl_client_cert,
ssl_client_key=importer_ssl_client_key,
add_export_distributor=add_export_distributor)
changed = True
else:
# Check to make sure all the settings are correct
# The importer config gets overwritten on set and not updated, so
# we set the whole config at the same time.
if not server.compare_repo_importer_config(
repo,
feed=feed,
proxy_host=proxy_host,
proxy_port=proxy_port,
ssl_ca_cert=importer_ssl_ca_cert,
ssl_client_cert=importer_ssl_client_cert,
ssl_client_key=importer_ssl_client_key
):
if not module.check_mode:
server.update_repo_importer_config(
repo,
feed=feed,
proxy_host=proxy_host,
proxy_port=proxy_port,
ssl_ca_cert=importer_ssl_ca_cert,
ssl_client_cert=importer_ssl_client_cert,
ssl_client_key=importer_ssl_client_key)
changed = True
if relative_url is not None:
if not server.compare_repo_distributor_config(
repo,
relative_url=relative_url
):
if not module.check_mode:
server.update_repo_distributor_config(
repo,
relative_url=relative_url)
changed = True
if not server.compare_repo_distributor_config(repo, http=serve_http):
if not module.check_mode:
server.update_repo_distributor_config(repo, http=serve_http)
changed = True
if not server.compare_repo_distributor_config(repo, https=serve_https):
if not module.check_mode:
server.update_repo_distributor_config(repo, https=serve_https)
changed = True
module.exit_json(changed=changed, repo=repo)
if __name__ == '__main__':
main()
| gpl-3.0 |
reinaH/osf.io | scripts/migrate_inconsistent_file_keys.py | 64 | 2950 | #!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
NOTE: This is a one-time migration.
Log:
Run by sloria on production on 2014-10-16 at 16:00 EST. 15 nodes were migrated
which include only the RPP and forks of the RPP, as expected. Verified that the
affected files are now accessible.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
| apache-2.0 |
hehongliang/tensorflow | tensorflow/contrib/testing/python/framework/util_test.py | 198 | 4128 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python.training import summary_io
def assert_summary(expected_tags, expected_simple_values, summary_proto):
"""Asserts summary contains the specified tags and values.
Args:
expected_tags: All tags in summary.
expected_simple_values: Simply values for some tags.
summary_proto: Summary to validate.
Raises:
ValueError: if expectations are not met.
"""
actual_tags = set()
for value in summary_proto.value:
actual_tags.add(value.tag)
if value.tag in expected_simple_values:
expected = expected_simple_values[value.tag]
actual = value.simple_value
np.testing.assert_almost_equal(
actual, expected, decimal=2, err_msg=value.tag)
expected_tags = set(expected_tags)
if expected_tags != actual_tags:
raise ValueError('Expected tags %s, got %s.' % (expected_tags, actual_tags))
def to_summary_proto(summary_str):
"""Create summary based on latest stats.
Args:
summary_str: Serialized summary.
Returns:
summary_pb2.Summary.
Raises:
ValueError: if tensor is not a valid summary tensor.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
return summary
# TODO(ptucker): Move to a non-test package?
def latest_event_file(base_dir):
"""Find latest event file in `base_dir`.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
File path, or `None` if none exists.
"""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
return sorted(file_paths)[-1] if file_paths else None
def latest_events(base_dir):
"""Parse events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
Iterable of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
file_path = latest_event_file(base_dir)
return summary_io.summary_iterator(file_path) if file_path else []
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
List of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
return [e for e in latest_events(base_dir) if e.HasField('summary')]
def simple_values_from_events(events, tags):
"""Parse summaries from events with simple_value.
Args:
events: List of tensorflow.Event protos.
tags: List of string event tags corresponding to simple_value summaries.
Returns:
dict of tag:value.
Raises:
ValueError: if a summary with a specified tag does not contain simple_value.
"""
step_by_tag = {}
value_by_tag = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
tag = v.tag
if tag in tags:
if not v.HasField('simple_value'):
raise ValueError('Summary for %s is not a simple_value.' % tag)
# The events are mostly sorted in step order, but we explicitly check
# just in case.
if tag not in step_by_tag or e.step > step_by_tag[tag]:
step_by_tag[tag] = e.step
value_by_tag[tag] = v.simple_value
return value_by_tag
| apache-2.0 |
GwangJin/gwangmoney-core | qa/rpc-tests/listtransactions.py | 164 | 4718 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcoinTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
if __name__ == '__main__':
ListTransactionsTest().main()
| mit |
samimoftheworld/google-appengine-wx-launcher | launcher/taskthread.py | 26 | 5287 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import time
import threading
import wx
import launcher
# TODO(jrg): rename this file task_thread.py
class TaskThread(threading.Thread):
"""A TaskThread is a thread for managing a task (subprocess).
This thread creates a subprocess and directs the subprocess output
to the task controller for display. All tasks have an associated
project.
All callbacks initiated from this class (e.g. DisplayProjectOutput,
_TaskWillStart) are called on the main thread with wx.CallAfter().
"""
def __init__(self, controller, project, cmd, stdin=None):
"""Initialize a new TaskThread.
Args:
controller: A TaskController (or any controller that responds
that has a callable AppendText attribute) which
accepts stdout.
project: The App Engine project (application) related to this task.
cmd: A list of executable and args; the command to run in a
subprocess which starts the app.
stdin: The file used for stdin of our subprocess.
"""
super(TaskThread, self).__init__()
self._controller = controller
self._project = project
self._cmd = cmd
self._stdin = stdin
self.process = None
# Override of threading.Thread method so NotToBeCamelCased
def run(self):
self._TaskWillStart()
self.LogOutput('Running command: \"%s\"\n' % str(self._cmd), date=True)
self.process = subprocess.Popen(self._cmd,
stdin=self._stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
started = False
while True:
line = self.process.stdout.readline()
if not line:
break
self.LogOutput(line)
if not started:
# Don't declare ourselves as 'started' until we see the subprocess
# announce that it is ready.
if self._IsLaunchCompletedLogLine(line):
self._TaskDidStart()
started = True
except IOError:
pass
# if we get here: process died (or is about to), so thread can die.
code = self.process.wait()
self.LogOutput('(Process exited with code %d)\n\n' % code, date=True)
self._TaskDidStop(code)
self.process = None
def _IsLaunchCompletedLogLine(self, line):
"""Is the line that was logged the "hey, we've started!" value?
Args:
line: a string, presumably a log line from the subprocess
Returns:
True if the line is a special line that indicates that the subprocess
as started. False otherwise.
"""
if re.match('.*Running application.*http://[^:]+:[0-9]+', line):
return True
return False
# Override of threading.Thread method so NotToBeCamelCased
def stop(self):
if not self.process:
return
platform = self._PlatformObject()
platform.KillProcess(self.process)
def _PlatformObject(self):
"""Return a platform object.
Split out for easier unit testing
"""
return launcher.Platform()
def LogOutput(self, line, date=False):
"""Display a given line (typically process output) in the Logs window.
Args:
line: a line of text to display for this subprocess / App Engine app
date: if True, prefix with date.
"""
if date:
line = time.strftime("%Y-%m-%d %X") + ' ' + line
wx.CallAfter(self._controller.DisplayProjectOutput, self._project, line)
def _TaskWillStart(self):
"""If our controller has a _TaskWillStart, call it on the main thread.
The controller's property is called with our project as an arg.
This method is called right before the task is started."""
attr = getattr(self._controller, '_TaskWillStart', None)
if attr and callable(attr):
wx.CallAfter(attr, self.project)
def _TaskDidStart(self):
"""If our controller has a _TaskDidStart, call it on the main thread.
The controller's property is called with our project as an arg.
This method is called right after the task is started."""
attr = getattr(self._controller, '_TaskDidStart', None)
if attr and callable(attr):
wx.CallAfter(attr, self.project)
def _TaskDidStop(self, code):
"""If our controller has a _TaskDidStop, call it on the main thread.
The controller's property is called with our project and the
task result code as arguments.
This method is called right after the task has stopped."""
attr = getattr(self._controller, '_TaskDidStop', None)
if attr and callable(attr):
wx.CallAfter(attr, self.project, code)
@property
def project(self):
"""A taskthread's project is read-only."""
return self._project
| apache-2.0 |
krasin/omim | tools/testlog_to_xml_converter.py | 36 | 7619 | #!/usr/bin/env python
'''
This script generates jUnit-style xml files from the log written by our tests.
This xml file is used in Jenkins to show the state of the test execution.
Created on May 13, 2015
@author: t.danshin
'''
from __future__ import print_function
import sys
import xml.etree.ElementTree as ElementTree
from optparse import OptionParser
import re
class PrefixesInLog:
OK = "OK"
FAILED = "FAILED"
BEGIN = "BEGIN: "
END = "END: "
RUNNING = "Running "
TEST_TOOK = "Test took "
RESULT = "result: "
class TestInfo:
EXE = "UNKNOWN_COMPILED_FILE"
NAME = "UNKNOWN_CPP_FILE"
FAILED = "FAILED"
PASSED = "PASSED"
def __init__(self):
self.test_name = TestInfo.NAME
self.test_suite = TestInfo.EXE
self.test_comment = None
self.test_result = TestInfo.FAILED
self.test_duration = 0.0
def set_name(self, test_name):
self.test_name = test_name.replace("::", ".")
def set_exe_name(self, exe):
self.test_suite = exe if exe else TestInfo.EXE
def set_duration(self, milis):
self.test_duration = float(milis) / 1000
def set_test_result(self, result_string):
if result_string.startswith(PrefixesInLog.FAILED):
self.test_result = TestInfo.FAILED
self.append_comment(string_after_prefix(result_string, PrefixesInLog.FAILED))
elif result_string.startswith(PrefixesInLog.OK):
self.test_result = TestInfo.PASSED
def append_comment(self, comment):
if not self.test_comment:
if comment.strip(): # if we don't have a comment to test yet, and the line we got is not an empty string
self.test_comment = comment
else:
try:
self.test_comment = u"{old_comment}\n{comment}".format(old_comment=self.test_comment, comment=comment)
except Exception as ex:
print(comment)
print(type(ex))
sys.exit(2)
def is_empty(self):
return self.test_name == TestInfo.NAME and self.test_suite == TestInfo.EXE and self.test_comment
def __repr__(self):
local_comment = self.test_comment if self.test_comment else str()
return "{suite}::{name}: {comment} -> {result}\n".format(suite=self.test_suite,
name=self.test_name,
comment=local_comment,
result=self.test_result)
def xml(self):
d = ElementTree.Element("testcase", {"name":self.test_name,
"classname":self.test_suite,
"time":str(self.test_duration)})
if self.test_comment:
b = ElementTree.SubElement(d, "system-err")
b.text = self.test_comment
if self.test_result == TestInfo.FAILED:
fail = ElementTree.SubElement(d, "failure")
if self.test_comment:
fail.text = self.test_comment
return d
class Parser:
def __init__(self, logfile, xml_file):
self.logfile = logfile
self.xml_file = xml_file
self.current_exe = None
self.test_info = TestInfo()
self.var_should_pass = False
self.root = ElementTree.Element("testsuite")
def write_xml_file(self):
ElementTree.ElementTree(self.root).write(self.xml_file, encoding="UTF-8")
def parse_log_file(self):
with open(self.logfile) as f:
PipeEach(f.readlines()).through_functions(
self.check_for_exe_boundaries,
self.check_for_testcase_boundaries,
self.check_test_result,
self.should_pass,
self.append_to_comment
)
def should_pass(self, line):
return self.var_should_pass
def check_for_exe_boundaries(self, line):
if line.startswith(PrefixesInLog.BEGIN):
if self.current_exe: #if we never had an End to a Beginning
self.test_info = TestInfo()
self.append_to_xml()
self.var_should_pass = False
self.current_exe = string_after_prefix(line, PrefixesInLog.BEGIN)
return True
elif line.startswith(PrefixesInLog.END):
self.var_should_pass = False
parts = line.split(" | ")
end_exe = string_after_prefix(parts[0], PrefixesInLog.END)
result = int(string_after_prefix(parts[1], PrefixesInLog.RESULT))
if result != 0:
if not self.test_info:
self.test_info = TestInfo()
self.test_info.set_exe_name(end_exe)
self.test_info.set_test_result(TestInfo.FAILED)
self.append_to_xml()
self.current_exe = None
return True
return False
def check_for_testcase_boundaries(self, line):
if line.startswith(PrefixesInLog.RUNNING):
if not self.test_info:
self.test_info = TestInfo()
self.test_info.set_name(string_after_prefix(line, PrefixesInLog.RUNNING))
self.test_info.set_exe_name(self.current_exe)
return True
elif line.startswith(PrefixesInLog.TEST_TOOK):
self.test_info.set_duration(string_after_prefix(line, PrefixesInLog.TEST_TOOK, end=-3))
self.append_to_xml()
self.test_info = None
return True
return False
def check_test_result(self, line):
if line == PrefixesInLog.OK or line.startswith(PrefixesInLog.FAILED):
self.test_info.set_test_result(line)
return True
return False
def append_to_xml(self):
if self.test_info:
self.test_info.set_exe_name(self.current_exe)
self.root.append(self.test_info.xml())
def append_to_comment(self, line):
if self.test_info:
if line == "All tests passed." or re.match("\d{1,} tests failed", line, re.IGNORECASE):
self.var_should_pass = True
return False
self.test_info.append_comment(line)
return False
class PipeEach:
def __init__(self, iterable_param):
self.iterable_param = iterable_param
def through_functions(self, *fns):
for param in self.iterable_param:
param = param.rstrip().decode('utf-8')
for fn in fns:
if fn(param):
break
def string_after_prefix(line, prefix, end=None):
return line[len(prefix):end] if end else line[len(prefix):]
def read_cl_options():
parser = OptionParser()
parser.add_option("-o", "--output", dest="output", default="test_results.xml", help="resulting log file. Default testlog.log")
parser.add_option("-i", "--include", dest="input", default="testlog.log", help="The path to the original log file to parse")
(options, args) = parser.parse_args()
return options
def main():
options = read_cl_options()
parser = Parser(options.input, options.output)
parser.parse_log_file()
parser.write_xml_file()
print("\nFinished writing the xUnit-style xml file\n")
if __name__ == '__main__':
main()
| apache-2.0 |
zadgroup/edx-platform | lms/djangoapps/certificates/management/commands/resubmit_error_certificates.py | 120 | 4248 | """Management command for re-submitting certificates with an error status.
Certificates may have "error" status for a variety of reasons,
but the most likely is that the course was misconfigured
in the certificates worker.
This management command identifies certificate tasks
that have an error status and re-resubmits them.
Example usage:
# Re-submit certificates for *all* courses
$ ./manage.py lms resubmit_error_certificates
# Re-submit certificates for particular courses
$ ./manage.py lms resubmit_error_certificates -c edX/DemoX/Fall_2015 -c edX/DemoX/Spring_2016
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from certificates import api as certs_api
from certificates.models import GeneratedCertificate, CertificateStatuses
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Resubmit certificates with error status. """
option_list = BaseCommand.option_list + (
make_option(
'-c', '--course',
metavar='COURSE_KEY',
dest='course_key_list',
action='append',
default=[],
help='Only re-submit certificates for these courses.'
),
)
def handle(self, *args, **options):
"""Resubmit certificates with status 'error'.
Arguments:
username (unicode): Identifier for the certificate's user.
Keyword Arguments:
course_key_list (list): List of course key strings.
Raises:
CommandError
"""
only_course_keys = []
for course_key_str in options.get('course_key_list', []):
try:
only_course_keys.append(CourseKey.from_string(course_key_str))
except InvalidKeyError:
raise CommandError(
'"{course_key_str}" is not a valid course key.'.format(
course_key_str=course_key_str
)
)
if only_course_keys:
LOGGER.info(
(
u'Starting to re-submit certificates with status "error" '
u'in these courses: %s'
), ", ".join([unicode(key) for key in only_course_keys])
)
else:
LOGGER.info(u'Starting to re-submit certificates with status "error".')
# Retrieve the IDs of generated certificates with
# error status in the set of courses we're considering.
queryset = (
GeneratedCertificate.objects.select_related('user')
).filter(status=CertificateStatuses.error)
if only_course_keys:
queryset = queryset.filter(course_id__in=only_course_keys)
resubmit_list = [(cert.user, cert.course_id) for cert in queryset]
course_cache = {}
resubmit_count = 0
for user, course_key in resubmit_list:
course = self._load_course_with_cache(course_key, course_cache)
if course is not None:
certs_api.generate_user_certificates(user, course_key, course=course)
resubmit_count += 1
LOGGER.info(
(
u"Re-submitted certificate for user %s "
u"in course '%s'"
), user.id, course_key
)
else:
LOGGER.error(
(
u"Could not find course for course key '%s'. "
u"Certificate for user %s will not be resubmitted."
), course_key, user.id
)
LOGGER.info("Finished resubmitting %s certificate tasks", resubmit_count)
def _load_course_with_cache(self, course_key, course_cache):
"""Retrieve the course, then cache it to avoid Mongo queries. """
course = (
course_cache[course_key] if course_key in course_cache
else modulestore().get_course(course_key, depth=0)
)
course_cache[course_key] = course
return course
| agpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/nbformat/v2/rwbase.py | 12 | 5997 | """Base classes and utilities for readers and writers.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from base64 import encodestring, decodestring
import pprint
from ipython_genutils.py3compat import str_to_bytes, unicode_type, string_types
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def restore_bytes(nb):
"""Restore bytes of image data from unicode-only formats.
Base64 encoding is handled elsewhere. Bytes objects in the notebook are
always b64-encoded. We DO NOT encode/decode around file formats.
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
output.png = str_to_bytes(output.png, 'ascii')
if 'jpeg' in output:
output.jpeg = str_to_bytes(output.jpeg, 'ascii')
return nb
# output keys that are likely to have multiline values
_multiline_outputs = ['text', 'html', 'svg', 'latex', 'javascript', 'json']
def rejoin_lines(nb):
"""rejoin multiline text into strings
For reversing effects of ``split_lines(nb)``.
This only rejoins lines that have been split, so if text objects were not split
they will pass through unchanged.
Used when reading JSON files that may have been passed through split_lines.
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
if 'input' in cell and isinstance(cell.input, list):
cell.input = u'\n'.join(cell.input)
for output in cell.outputs:
for key in _multiline_outputs:
item = output.get(key, None)
if isinstance(item, list):
output[key] = u'\n'.join(item)
else: # text cell
for key in ['source', 'rendered']:
item = cell.get(key, None)
if isinstance(item, list):
cell[key] = u'\n'.join(item)
return nb
def split_lines(nb):
"""split likely multiline text into lists of strings
For file output more friendly to line-based VCS. ``rejoin_lines(nb)`` will
reverse the effects of ``split_lines(nb)``.
Used when writing JSON files.
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
if 'input' in cell and isinstance(cell.input, string_types):
cell.input = cell.input.splitlines()
for output in cell.outputs:
for key in _multiline_outputs:
item = output.get(key, None)
if isinstance(item, string_types):
output[key] = item.splitlines()
else: # text cell
for key in ['source', 'rendered']:
item = cell.get(key, None)
if isinstance(item, string_types):
cell[key] = item.splitlines()
return nb
# b64 encode/decode are never actually used, because all bytes objects in
# the notebook are already b64-encoded, and we don't need/want to double-encode
def base64_decode(nb):
"""Restore all bytes objects in the notebook from base64-encoded strings.
Note: This is never used
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
if isinstance(output.png, unicode_type):
output.png = output.png.encode('ascii')
output.png = decodestring(output.png)
if 'jpeg' in output:
if isinstance(output.jpeg, unicode_type):
output.jpeg = output.jpeg.encode('ascii')
output.jpeg = decodestring(output.jpeg)
return nb
def base64_encode(nb):
"""Base64 encode all bytes objects in the notebook.
These will be b64-encoded unicode strings
Note: This is never used
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
output.png = encodestring(output.png).decode('ascii')
if 'jpeg' in output:
output.jpeg = encodestring(output.jpeg).decode('ascii')
return nb
class NotebookReader(object):
"""A class for reading notebooks."""
def reads(self, s, **kwargs):
"""Read a notebook from a string."""
raise NotImplementedError("loads must be implemented in a subclass")
def read(self, fp, **kwargs):
"""Read a notebook from a file like object"""
return self.read(fp.read(), **kwargs)
class NotebookWriter(object):
"""A class for writing notebooks."""
def writes(self, nb, **kwargs):
"""Write a notebook to a string."""
raise NotImplementedError("loads must be implemented in a subclass")
def write(self, nb, fp, **kwargs):
"""Write a notebook to a file like object"""
return fp.write(self.writes(nb,**kwargs))
| mit |
baoboa/pyqt5 | examples/qtdemo/headingitem.py | 3 | 4037 | #############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtGui import (QColor, QFontMetrics, QImage, QLinearGradient,
QPainter, QPen)
from colors import Colors
from demoitem import DemoItem
class HeadingItem(DemoItem):
def __init__(self, text, parent=None):
super(HeadingItem, self).__init__(parent)
self.text = text
self.noSubPixeling = True
def createImage(self, transform):
sx = min(transform.m11(), transform.m22())
sy = max(transform.m22(), sx)
fm = QFontMetrics(Colors.headingFont())
w = fm.width(self.text) + 1
h = fm.height()
xShadow = 3.0
yShadow = 3.0
image = QImage(int((w + xShadow) * sx), int((h + yShadow) * sy),
QImage.Format_ARGB32_Premultiplied)
image.fill(QColor(0, 0, 0, 0).rgba())
painter = QPainter(image)
painter.setFont(Colors.headingFont())
painter.scale(sx, sy)
# Draw shadow.
brush_shadow = QLinearGradient(xShadow, yShadow, w, yShadow)
brush_shadow.setSpread(QLinearGradient.PadSpread)
if Colors.useEightBitPalette:
brush_shadow.setColorAt(0.0, QColor(0, 0, 0))
else:
brush_shadow.setColorAt(0.0, QColor(0, 0, 0, 100))
pen_shadow = QPen()
pen_shadow.setBrush(brush_shadow)
painter.setPen(pen_shadow)
painter.drawText(int(xShadow), int(yShadow), int(w), int(h),
Qt.AlignLeft, self.text)
# Draw text.
brush_text = QLinearGradient(0, 0, w, w)
brush_text.setSpread(QLinearGradient.PadSpread)
brush_text.setColorAt(0.0, QColor(255, 255, 255))
brush_text.setColorAt(0.2, QColor(255, 255, 255))
brush_text.setColorAt(0.5, QColor(190, 190, 190))
pen_text = QPen()
pen_text.setBrush(brush_text)
painter.setPen(pen_text)
painter.drawText(0, 0, int(w), int(h), Qt.AlignLeft, self.text)
return image
def animationStarted(self, id=0):
self.noSubPixeling = False
def animationStopped(self, id=0):
self.noSubPixeling = True
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.